problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_32456
rasdani/github-patches
git_diff
PlasmaPy__PlasmaPy-217
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create physics exceptions and warnings Several of the exceptions and warnings that come up in PlasmaPy result from when we try to violate the laws of physics. Most of the time these violations cause a `UserWarning` or raise a `ValueError`, but these are pretty generic and don't give much insight into the cause of the problem. To help with exception handling, I propose that we create new exceptions and warnings, such as `PhysicsError` and `PhysicsWarning`. We could additionally be more specific and also have `RelativityError` and/or `RelativityWarning`. One possibility would be to put these in a new file called `plasmapy/utils/exceptions.py`. This would be a good first contribution for someone who is somewhat familiar with Python. Here's documentation on [user-defined exceptions](https://docs.python.org/3/tutorial/errors.html#user-defined-exceptions. Thank you! </issue> <code> [start of plasmapy/utils/exceptions.py] 1 """ 2 Custom Error and Warning names to improve readability 3 """ 4 5 6 # ---------- 7 # Exceptions: 8 # ---------- 9 10 class PlasmaPyError(Exception): 11 """ 12 Base class of PlasmaPy custom errors. 13 14 All custom exceptions raised by PlasmaPy should inherit from this class 15 and be defined in this module. 16 17 Custom exceptions can inherit from other exception types too. Thus, if code 18 already knows how to handle a ValueError, it won't need any specific 19 modification. 20 """ 21 22 23 class PhysicsError(PlasmaPyError, ValueError): 24 """Error for use of a physics value outside PlasmaPy theoretical bounds""" 25 26 27 class RelativityError(PhysicsError): 28 """Error for use of a speed greater than or equal to the speed of light""" 29 30 31 # ---------- 32 # Warnings: 33 # ---------- 34 35 class PlasmaPyWarning(Warning): 36 """Base class of PlasmaPy custom warnings. 37 38 All PlasmaPy custom warnings should inherit from this class and be defined 39 in this module. 40 41 Warnings should be issued using warnings.warn, which will not break 42 execution if unhandled. 43 """ 44 45 46 class PhysicsWarning(PlasmaPyWarning): 47 """Warning for using a mildly worrisome physics value""" 48 49 50 class RelativityWarning(PhysicsWarning): 51 """Warning for use of a speed quantity approaching the speed of light""" 52 [end of plasmapy/utils/exceptions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plasmapy/utils/exceptions.py b/plasmapy/utils/exceptions.py --- a/plasmapy/utils/exceptions.py +++ b/plasmapy/utils/exceptions.py @@ -18,14 +18,51 @@ already knows how to handle a ValueError, it won't need any specific modification. """ + pass class PhysicsError(PlasmaPyError, ValueError): """Error for use of a physics value outside PlasmaPy theoretical bounds""" + pass class RelativityError(PhysicsError): """Error for use of a speed greater than or equal to the speed of light""" + pass + + +class AtomicError(PlasmaPyError): + """Error for use by an atomic subpackage""" + pass + + +class MissingAtomicDataError(AtomicError): + """Error for use when atomic data is missing.""" + pass + + +class NoChargeInfoError(AtomicError): + """Error for use when charge information is needed but missing.""" + + +class IonError(NoChargeInfoError): + """Error for use when an ion is invalid.""" + pass + + +class IsotopeError(AtomicError): + """Error for use when an isotope is invalid.""" + pass + + +class ElementError(IsotopeError, IonError): + """Error for use when an element is invalid.""" + pass + + +class ParticleError(ElementError): + """Error for use when a particle is invalid.""" + pass # ---------- @@ -41,11 +78,24 @@ Warnings should be issued using warnings.warn, which will not break execution if unhandled. """ + pass class PhysicsWarning(PlasmaPyWarning): """Warning for using a mildly worrisome physics value""" + pass class RelativityWarning(PhysicsWarning): """Warning for use of a speed quantity approaching the speed of light""" + pass + + +class AtomicWarning(PlasmaPyWarning): + """Warnings for use in the atomic subpackage.""" + pass + + +class MissingAtomicDataWarning(AtomicWarning): + """Warning for use when atomic data is missing.""" + pass
{"golden_diff": "diff --git a/plasmapy/utils/exceptions.py b/plasmapy/utils/exceptions.py\n--- a/plasmapy/utils/exceptions.py\n+++ b/plasmapy/utils/exceptions.py\n@@ -18,14 +18,51 @@\n already knows how to handle a ValueError, it won't need any specific\n modification.\n \"\"\"\n+ pass\n \n \n class PhysicsError(PlasmaPyError, ValueError):\n \"\"\"Error for use of a physics value outside PlasmaPy theoretical bounds\"\"\"\n+ pass\n \n \n class RelativityError(PhysicsError):\n \"\"\"Error for use of a speed greater than or equal to the speed of light\"\"\"\n+ pass\n+\n+\n+class AtomicError(PlasmaPyError):\n+ \"\"\"Error for use by an atomic subpackage\"\"\"\n+ pass\n+\n+\n+class MissingAtomicDataError(AtomicError):\n+ \"\"\"Error for use when atomic data is missing.\"\"\"\n+ pass\n+\n+\n+class NoChargeInfoError(AtomicError):\n+ \"\"\"Error for use when charge information is needed but missing.\"\"\"\n+\n+\n+class IonError(NoChargeInfoError):\n+ \"\"\"Error for use when an ion is invalid.\"\"\"\n+ pass\n+\n+\n+class IsotopeError(AtomicError):\n+ \"\"\"Error for use when an isotope is invalid.\"\"\"\n+ pass\n+\n+\n+class ElementError(IsotopeError, IonError):\n+ \"\"\"Error for use when an element is invalid.\"\"\"\n+ pass\n+\n+\n+class ParticleError(ElementError):\n+ \"\"\"Error for use when a particle is invalid.\"\"\"\n+ pass\n \n \n # ----------\n@@ -41,11 +78,24 @@\n Warnings should be issued using warnings.warn, which will not break\n execution if unhandled.\n \"\"\"\n+ pass\n \n \n class PhysicsWarning(PlasmaPyWarning):\n \"\"\"Warning for using a mildly worrisome physics value\"\"\"\n+ pass\n \n \n class RelativityWarning(PhysicsWarning):\n \"\"\"Warning for use of a speed quantity approaching the speed of light\"\"\"\n+ pass\n+\n+\n+class AtomicWarning(PlasmaPyWarning):\n+ \"\"\"Warnings for use in the atomic subpackage.\"\"\"\n+ pass\n+\n+\n+class MissingAtomicDataWarning(AtomicWarning):\n+ \"\"\"Warning for use when atomic data is missing.\"\"\"\n+ pass\n", "issue": "Create physics exceptions and warnings\nSeveral of the exceptions and warnings that come up in PlasmaPy result from when we try to violate the laws of physics. Most of the time these violations cause a `UserWarning` or raise a `ValueError`, but these are pretty generic and don't give much insight into the cause of the problem. To help with exception handling, I propose that we create new exceptions and warnings, such as `PhysicsError` and `PhysicsWarning`. We could additionally be more specific and also have `RelativityError` and/or `RelativityWarning`. One possibility would be to put these in a new file called `plasmapy/utils/exceptions.py`.\r\n\r\nThis would be a good first contribution for someone who is somewhat familiar with Python. Here's documentation on [user-defined exceptions](https://docs.python.org/3/tutorial/errors.html#user-defined-exceptions. Thank you!\r\n\n", "before_files": [{"content": "\"\"\"\nCustom Error and Warning names to improve readability\n\"\"\"\n\n\n# ----------\n# Exceptions:\n# ----------\n\nclass PlasmaPyError(Exception):\n \"\"\"\n Base class of PlasmaPy custom errors.\n\n All custom exceptions raised by PlasmaPy should inherit from this class\n and be defined in this module.\n\n Custom exceptions can inherit from other exception types too. Thus, if code\n already knows how to handle a ValueError, it won't need any specific\n modification.\n \"\"\"\n\n\nclass PhysicsError(PlasmaPyError, ValueError):\n \"\"\"Error for use of a physics value outside PlasmaPy theoretical bounds\"\"\"\n\n\nclass RelativityError(PhysicsError):\n \"\"\"Error for use of a speed greater than or equal to the speed of light\"\"\"\n\n\n# ----------\n# Warnings:\n# ----------\n\nclass PlasmaPyWarning(Warning):\n \"\"\"Base class of PlasmaPy custom warnings.\n\n All PlasmaPy custom warnings should inherit from this class and be defined\n in this module.\n\n Warnings should be issued using warnings.warn, which will not break\n execution if unhandled.\n \"\"\"\n\n\nclass PhysicsWarning(PlasmaPyWarning):\n \"\"\"Warning for using a mildly worrisome physics value\"\"\"\n\n\nclass RelativityWarning(PhysicsWarning):\n \"\"\"Warning for use of a speed quantity approaching the speed of light\"\"\"\n", "path": "plasmapy/utils/exceptions.py"}]}
1,104
493
gh_patches_debug_916
rasdani/github-patches
git_diff
facebookresearch__ParlAI-581
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Can we keep a mturk task from outside parlai/mturk/tasks? Hi @JackUrb, I have a few questions regarding the mturk evaluation: 1. This link (http://parl.ai/static/docs/mturk.html#running-a-task) says that > to run an MTurk task, first ensure that the task directory is in `parlai/mturk/tasks/`. Is it by design? I tried to keep my task in some other directory (outside root parlai directory) and tried to import parlai as a package but that doesn't seem to work. Basically I am trying to use ParlAI as one of the dependency for one of my project for Human In the loop evaluation. 2. How easy/hard it is to provide support for keeping the mturk task anywhere? </issue> <code> [start of setup.py] 1 # Copyright (c) 2017-present, Facebook, Inc. 2 # All rights reserved. 3 # This source code is licensed under the BSD-style license found in the 4 # LICENSE file in the root directory of this source tree. An additional grant 5 # of patent rights can be found in the PATENTS file in the same directory. 6 7 8 from setuptools import setup, find_packages 9 import sys 10 11 if sys.version_info < (3,): 12 sys.exit('Sorry, Python3 is required for ParlAI.') 13 14 with open('README.md', encoding="utf8") as f: 15 readme = f.read() 16 17 with open('LICENSE') as f: 18 license = f.read() 19 20 with open('requirements.txt') as f: 21 reqs = f.read() 22 23 setup( 24 name='parlai', 25 version='0.1.0', 26 description='Unified API for accessing dialog datasets.', 27 long_description=readme, 28 url='http://parl.ai/', 29 license=license, 30 packages=find_packages(exclude=( 31 'data', 'docs', 'downloads', 'examples', 'logs', 'tests')), 32 install_requires=reqs.strip().split('\n'), 33 ) 34 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -30,4 +30,5 @@ packages=find_packages(exclude=( 'data', 'docs', 'downloads', 'examples', 'logs', 'tests')), install_requires=reqs.strip().split('\n'), + include_package_data=True, )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,4 +30,5 @@\n packages=find_packages(exclude=(\n 'data', 'docs', 'downloads', 'examples', 'logs', 'tests')),\n install_requires=reqs.strip().split('\\n'),\n+ include_package_data=True,\n )\n", "issue": "Can we keep a mturk task from outside parlai/mturk/tasks?\nHi @JackUrb, I have a few questions regarding the mturk evaluation:\r\n\r\n1. This link (http://parl.ai/static/docs/mturk.html#running-a-task) says that \r\n\r\n> to run an MTurk task, first ensure that the task directory is in `parlai/mturk/tasks/`. \r\n\r\n Is it by design? I tried to keep my task in some other directory (outside root parlai directory) and tried to import parlai as a package but that doesn't seem to work. Basically I am trying to use ParlAI as one of the dependency for one of my project for Human In the loop evaluation.\r\n\r\n2. How easy/hard it is to provide support for keeping the mturk task anywhere?\r\n\n", "before_files": [{"content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\n\nfrom setuptools import setup, find_packages\nimport sys\n\nif sys.version_info < (3,):\n sys.exit('Sorry, Python3 is required for ParlAI.')\n\nwith open('README.md', encoding=\"utf8\") as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nwith open('requirements.txt') as f:\n reqs = f.read()\n\nsetup(\n name='parlai',\n version='0.1.0',\n description='Unified API for accessing dialog datasets.',\n long_description=readme,\n url='http://parl.ai/',\n license=license,\n packages=find_packages(exclude=(\n 'data', 'docs', 'downloads', 'examples', 'logs', 'tests')),\n install_requires=reqs.strip().split('\\n'),\n)\n", "path": "setup.py"}]}
1,011
78
gh_patches_debug_30885
rasdani/github-patches
git_diff
plotly__dash-2207
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Render arbitrary keys of components inside dictionaries. It should be possible to render arbitrary keys as component inside an object prop. Example typescript props: ```ts type Props = { dynamic: {[k: string]: JSX.Element} } ``` prop-types: `PropTypes.objectOf(PropTypes.node)` </issue> <code> [start of dash/development/_collect_nodes.py] 1 def is_node(value): 2 return value in ("node", "element") 3 4 5 def is_shape(value): 6 return value in ("shape", "exact") 7 8 9 def collect_array(a_value, base, nodes): 10 a_type = a_value["name"] 11 if is_node(a_type): 12 nodes.append(base) 13 elif a_type in ("shape", "exact"): 14 nodes = collect_nodes(a_value["value"], base + "[]", nodes) 15 elif a_type == "union": 16 nodes = collect_union(a_value["value"], base + "[]", nodes) 17 return nodes 18 19 20 def collect_union(type_list, base, nodes): 21 for t in type_list: 22 if is_node(t["name"]): 23 nodes.append(base) 24 elif is_shape(t["name"]): 25 nodes = collect_nodes(t["value"], base, nodes) 26 elif t["name"] == "arrayOf": 27 nodes = collect_array(t["value"], base, nodes) 28 return nodes 29 30 31 def collect_nodes(metadata, base="", nodes=None): 32 nodes = nodes or [] 33 34 for prop_name, value in metadata.items(): 35 # Support for recursive shapes, the type is directly in the field. 36 t_value = value.get("type", value) 37 p_type = t_value.get("name") 38 39 if base: 40 key = f"{base}.{prop_name}" 41 else: 42 key = prop_name 43 if is_node(p_type): 44 nodes.append(key) 45 elif p_type == "arrayOf": 46 a_value = t_value.get("value", t_value) 47 nodes = collect_array(a_value, key, nodes) 48 elif is_shape(p_type): 49 nodes = collect_nodes(t_value["value"], key, nodes) 50 elif p_type == "union": 51 nodes = collect_union(t_value["value"], key, nodes) 52 53 return nodes 54 55 56 def filter_base_nodes(nodes): 57 return [n for n in nodes if not any(e in n for e in ("[]", "."))] 58 [end of dash/development/_collect_nodes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dash/development/_collect_nodes.py b/dash/development/_collect_nodes.py --- a/dash/development/_collect_nodes.py +++ b/dash/development/_collect_nodes.py @@ -14,6 +14,8 @@ nodes = collect_nodes(a_value["value"], base + "[]", nodes) elif a_type == "union": nodes = collect_union(a_value["value"], base + "[]", nodes) + elif a_type == "objectOf": + nodes = collect_object(a_value["value"], base + "[]", nodes) return nodes @@ -25,6 +27,22 @@ nodes = collect_nodes(t["value"], base, nodes) elif t["name"] == "arrayOf": nodes = collect_array(t["value"], base, nodes) + elif t["name"] == "objectOf": + nodes = collect_object(t["value"], base, nodes) + return nodes + + +def collect_object(o_value, base, nodes): + o_name = o_value.get("name") + o_key = base + "{}" + if is_node(o_name): + nodes.append(o_key) + elif is_shape(o_name): + nodes = collect_nodes(o_value.get("value", {}), o_key, nodes) + elif o_name == "union": + nodes = collect_union(o_value.get("value"), o_key, nodes) + elif o_name == "arrayOf": + nodes = collect_array(o_value, o_key, nodes) return nodes @@ -49,9 +67,12 @@ nodes = collect_nodes(t_value["value"], key, nodes) elif p_type == "union": nodes = collect_union(t_value["value"], key, nodes) + elif p_type == "objectOf": + o_value = t_value.get("value", {}) + nodes = collect_object(o_value, key, nodes) return nodes def filter_base_nodes(nodes): - return [n for n in nodes if not any(e in n for e in ("[]", "."))] + return [n for n in nodes if not any(e in n for e in ("[]", ".", "{}"))]
{"golden_diff": "diff --git a/dash/development/_collect_nodes.py b/dash/development/_collect_nodes.py\n--- a/dash/development/_collect_nodes.py\n+++ b/dash/development/_collect_nodes.py\n@@ -14,6 +14,8 @@\n nodes = collect_nodes(a_value[\"value\"], base + \"[]\", nodes)\n elif a_type == \"union\":\n nodes = collect_union(a_value[\"value\"], base + \"[]\", nodes)\n+ elif a_type == \"objectOf\":\n+ nodes = collect_object(a_value[\"value\"], base + \"[]\", nodes)\n return nodes\n \n \n@@ -25,6 +27,22 @@\n nodes = collect_nodes(t[\"value\"], base, nodes)\n elif t[\"name\"] == \"arrayOf\":\n nodes = collect_array(t[\"value\"], base, nodes)\n+ elif t[\"name\"] == \"objectOf\":\n+ nodes = collect_object(t[\"value\"], base, nodes)\n+ return nodes\n+\n+\n+def collect_object(o_value, base, nodes):\n+ o_name = o_value.get(\"name\")\n+ o_key = base + \"{}\"\n+ if is_node(o_name):\n+ nodes.append(o_key)\n+ elif is_shape(o_name):\n+ nodes = collect_nodes(o_value.get(\"value\", {}), o_key, nodes)\n+ elif o_name == \"union\":\n+ nodes = collect_union(o_value.get(\"value\"), o_key, nodes)\n+ elif o_name == \"arrayOf\":\n+ nodes = collect_array(o_value, o_key, nodes)\n return nodes\n \n \n@@ -49,9 +67,12 @@\n nodes = collect_nodes(t_value[\"value\"], key, nodes)\n elif p_type == \"union\":\n nodes = collect_union(t_value[\"value\"], key, nodes)\n+ elif p_type == \"objectOf\":\n+ o_value = t_value.get(\"value\", {})\n+ nodes = collect_object(o_value, key, nodes)\n \n return nodes\n \n \n def filter_base_nodes(nodes):\n- return [n for n in nodes if not any(e in n for e in (\"[]\", \".\"))]\n+ return [n for n in nodes if not any(e in n for e in (\"[]\", \".\", \"{}\"))]\n", "issue": "Render arbitrary keys of components inside dictionaries.\nIt should be possible to render arbitrary keys as component inside an object prop.\r\n\r\nExample typescript props:\r\n```ts\r\ntype Props = {\r\n dynamic: {[k: string]: JSX.Element}\r\n}\r\n```\r\nprop-types: `PropTypes.objectOf(PropTypes.node)` \n", "before_files": [{"content": "def is_node(value):\n return value in (\"node\", \"element\")\n\n\ndef is_shape(value):\n return value in (\"shape\", \"exact\")\n\n\ndef collect_array(a_value, base, nodes):\n a_type = a_value[\"name\"]\n if is_node(a_type):\n nodes.append(base)\n elif a_type in (\"shape\", \"exact\"):\n nodes = collect_nodes(a_value[\"value\"], base + \"[]\", nodes)\n elif a_type == \"union\":\n nodes = collect_union(a_value[\"value\"], base + \"[]\", nodes)\n return nodes\n\n\ndef collect_union(type_list, base, nodes):\n for t in type_list:\n if is_node(t[\"name\"]):\n nodes.append(base)\n elif is_shape(t[\"name\"]):\n nodes = collect_nodes(t[\"value\"], base, nodes)\n elif t[\"name\"] == \"arrayOf\":\n nodes = collect_array(t[\"value\"], base, nodes)\n return nodes\n\n\ndef collect_nodes(metadata, base=\"\", nodes=None):\n nodes = nodes or []\n\n for prop_name, value in metadata.items():\n # Support for recursive shapes, the type is directly in the field.\n t_value = value.get(\"type\", value)\n p_type = t_value.get(\"name\")\n\n if base:\n key = f\"{base}.{prop_name}\"\n else:\n key = prop_name\n if is_node(p_type):\n nodes.append(key)\n elif p_type == \"arrayOf\":\n a_value = t_value.get(\"value\", t_value)\n nodes = collect_array(a_value, key, nodes)\n elif is_shape(p_type):\n nodes = collect_nodes(t_value[\"value\"], key, nodes)\n elif p_type == \"union\":\n nodes = collect_union(t_value[\"value\"], key, nodes)\n\n return nodes\n\n\ndef filter_base_nodes(nodes):\n return [n for n in nodes if not any(e in n for e in (\"[]\", \".\"))]\n", "path": "dash/development/_collect_nodes.py"}]}
1,126
487
gh_patches_debug_16417
rasdani/github-patches
git_diff
celery__kombu-101
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Kombu compression "encodes" the body ? At https://github.com/ask/kombu/blob/master/kombu/compression.py#L61, we see: ``` return encoder(body.encode("utf-8")), content_type ``` Same for decoding. What gives? It crashes with msgpack (which can't be encoded). Is there any reason for that ? </issue> <code> [start of kombu/compression.py] 1 """ 2 kombu.compression 3 ================= 4 5 Compression utilities. 6 7 :copyright: (c) 2009 - 2012 by Ask Solem. 8 :license: BSD, see LICENSE for more details. 9 10 """ 11 from __future__ import absolute_import 12 13 import zlib 14 15 _aliases = {} 16 _encoders = {} 17 _decoders = {} 18 19 __all__ = ["register", "encoders", "get_encoder", 20 "get_decoder", "compress", "decompress"] 21 22 23 def register(encoder, decoder, content_type, aliases=[]): 24 """Register new compression method. 25 26 :param encoder: Function used to compress text. 27 :param decoder: Function used to decompress previously compressed text. 28 :param content_type: The mime type this compression method identifies as. 29 :param aliases: A list of names to associate with this compression method. 30 31 """ 32 _encoders[content_type] = encoder 33 _decoders[content_type] = decoder 34 _aliases.update((alias, content_type) for alias in aliases) 35 36 37 def encoders(): 38 """Returns a list of available compression methods.""" 39 return _encoders.keys() 40 41 42 def get_encoder(t): 43 """Get encoder by alias name.""" 44 t = _aliases.get(t, t) 45 return _encoders[t], t 46 47 48 def get_decoder(t): 49 """Get decoder by alias name.""" 50 return _decoders[_aliases.get(t, t)] 51 52 53 def compress(body, content_type): 54 """Compress text. 55 56 :param body: The text to compress. 57 :param content_type: mime-type of compression method to use. 58 59 """ 60 encoder, content_type = get_encoder(content_type) 61 return encoder(body.encode("utf-8")), content_type 62 63 64 def decompress(body, content_type): 65 """Decompress compressed text. 66 67 :param body: Previously compressed text to uncompress. 68 :param content_type: mime-type of compression method used. 69 70 """ 71 return get_decoder(content_type)(body).decode("utf-8") 72 73 74 register(zlib.compress, 75 zlib.decompress, 76 "application/x-gzip", aliases=["gzip", "zlib"]) 77 try: 78 import bz2 79 except ImportError: 80 pass # Jython? 81 else: 82 register(bz2.compress, 83 bz2.decompress, 84 "application/x-bz2", aliases=["bzip2", "bzip"]) 85 [end of kombu/compression.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kombu/compression.py b/kombu/compression.py --- a/kombu/compression.py +++ b/kombu/compression.py @@ -10,6 +10,8 @@ """ from __future__ import absolute_import +from kombu.utils.encoding import ensure_bytes, bytes_to_str + import zlib _aliases = {} @@ -58,7 +60,7 @@ """ encoder, content_type = get_encoder(content_type) - return encoder(body.encode("utf-8")), content_type + return encoder(ensure_bytes(body)), content_type def decompress(body, content_type): @@ -68,7 +70,7 @@ :param content_type: mime-type of compression method used. """ - return get_decoder(content_type)(body).decode("utf-8") + return bytes_to_str(get_decoder(content_type)(body)) register(zlib.compress,
{"golden_diff": "diff --git a/kombu/compression.py b/kombu/compression.py\n--- a/kombu/compression.py\n+++ b/kombu/compression.py\n@@ -10,6 +10,8 @@\n \"\"\"\n from __future__ import absolute_import\n \n+from kombu.utils.encoding import ensure_bytes, bytes_to_str\n+\n import zlib\n \n _aliases = {}\n@@ -58,7 +60,7 @@\n \n \"\"\"\n encoder, content_type = get_encoder(content_type)\n- return encoder(body.encode(\"utf-8\")), content_type\n+ return encoder(ensure_bytes(body)), content_type\n \n \n def decompress(body, content_type):\n@@ -68,7 +70,7 @@\n :param content_type: mime-type of compression method used.\n \n \"\"\"\n- return get_decoder(content_type)(body).decode(\"utf-8\")\n+ return bytes_to_str(get_decoder(content_type)(body))\n \n \n register(zlib.compress,\n", "issue": "Kombu compression \"encodes\" the body ?\nAt https://github.com/ask/kombu/blob/master/kombu/compression.py#L61, we see:\n\n```\nreturn encoder(body.encode(\"utf-8\")), content_type\n```\n\nSame for decoding. What gives? It crashes with msgpack (which can't be encoded). Is there any reason for that ?\n\n", "before_files": [{"content": "\"\"\"\nkombu.compression\n=================\n\nCompression utilities.\n\n:copyright: (c) 2009 - 2012 by Ask Solem.\n:license: BSD, see LICENSE for more details.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport zlib\n\n_aliases = {}\n_encoders = {}\n_decoders = {}\n\n__all__ = [\"register\", \"encoders\", \"get_encoder\",\n \"get_decoder\", \"compress\", \"decompress\"]\n\n\ndef register(encoder, decoder, content_type, aliases=[]):\n \"\"\"Register new compression method.\n\n :param encoder: Function used to compress text.\n :param decoder: Function used to decompress previously compressed text.\n :param content_type: The mime type this compression method identifies as.\n :param aliases: A list of names to associate with this compression method.\n\n \"\"\"\n _encoders[content_type] = encoder\n _decoders[content_type] = decoder\n _aliases.update((alias, content_type) for alias in aliases)\n\n\ndef encoders():\n \"\"\"Returns a list of available compression methods.\"\"\"\n return _encoders.keys()\n\n\ndef get_encoder(t):\n \"\"\"Get encoder by alias name.\"\"\"\n t = _aliases.get(t, t)\n return _encoders[t], t\n\n\ndef get_decoder(t):\n \"\"\"Get decoder by alias name.\"\"\"\n return _decoders[_aliases.get(t, t)]\n\n\ndef compress(body, content_type):\n \"\"\"Compress text.\n\n :param body: The text to compress.\n :param content_type: mime-type of compression method to use.\n\n \"\"\"\n encoder, content_type = get_encoder(content_type)\n return encoder(body.encode(\"utf-8\")), content_type\n\n\ndef decompress(body, content_type):\n \"\"\"Decompress compressed text.\n\n :param body: Previously compressed text to uncompress.\n :param content_type: mime-type of compression method used.\n\n \"\"\"\n return get_decoder(content_type)(body).decode(\"utf-8\")\n\n\nregister(zlib.compress,\n zlib.decompress,\n \"application/x-gzip\", aliases=[\"gzip\", \"zlib\"])\ntry:\n import bz2\nexcept ImportError:\n pass # Jython?\nelse:\n register(bz2.compress,\n bz2.decompress,\n \"application/x-bz2\", aliases=[\"bzip2\", \"bzip\"])\n", "path": "kombu/compression.py"}]}
1,288
204
gh_patches_debug_1572
rasdani/github-patches
git_diff
hylang__hy-2070
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The manual is missing module names It looks like the big doc reorganization ended up omitting the actual module names of the various contrib and extra modules; for example, the section named "Walk" documents the functions and macros in `hy.contrib.walk`, but doesn't mention the name `hy.contrib.walk` or otherwise indicate how to bring the enumerated names into scope. Pointed out in https://github.com/hylang/hy/issues/2065#issuecomment-842377526. </issue> <code> [start of docs/conf.py] 1 # -*- coding: utf-8 -*- 2 # 3 # This file is execfile()d with the current directory set to its containing dir. 4 5 import re, os, sys, time, html 6 7 sys.path.insert(0, os.path.abspath('..')) 8 9 extensions = [ 10 'sphinx.ext.napoleon', 11 'sphinx.ext.intersphinx', 12 'sphinx.ext.autodoc', 13 'sphinxcontrib.hydomain', 14 ] 15 16 from get_version import __version__ as hy_version 17 18 # Read the Docs might dirty its checkout, so strip the dirty flag. 19 hy_version = re.sub(r'[+.]dirty\Z', '', hy_version) 20 21 templates_path = ['_templates'] 22 source_suffix = '.rst' 23 24 master_doc = 'index' 25 26 # General information about the project. 27 project = 'hy' 28 copyright = '%s the authors' % time.strftime('%Y') 29 30 # The version info for the project you're documenting, acts as replacement for 31 # |version| and |release|, also used in various other places throughout the 32 # built documents. 33 # 34 # The short X.Y version. 35 version = ".".join(hy_version.split(".")[:-1]) 36 # The full version, including alpha/beta/rc tags. 37 release = hy_version 38 hy_descriptive_version = html.escape(hy_version) 39 if "+" in hy_version: 40 hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>" 41 42 exclude_patterns = ['_build', 'coreteam.rst'] 43 add_module_names = False 44 45 pygments_style = 'sphinx' 46 47 import sphinx_rtd_theme 48 html_theme = 'sphinx_rtd_theme' 49 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 50 51 # Add any paths that contain custom static files (such as style sheets) here, 52 # relative to this directory. They are copied after the builtin static files, 53 # so a file named "default.css" will overwrite the builtin "default.css". 54 html_static_path = ['_static'] 55 56 html_use_smartypants = False 57 html_show_sphinx = False 58 59 html_context = dict( 60 hy_descriptive_version = hy_descriptive_version) 61 62 highlight_language = 'clojure' 63 64 intersphinx_mapping = dict( 65 py = ('https://docs.python.org/3/', None)) 66 # ** Generate Cheatsheet 67 import json 68 from pathlib import Path 69 from itertools import zip_longest 70 71 def refize(spec): 72 role = ':hy:func:' 73 if isinstance(spec, dict): 74 _name = spec['name'] 75 uri = spec['uri'] 76 if spec.get('internal'): 77 role = ':ref:' 78 else: 79 uri = spec 80 _name = str.split(uri, '.')[-1] 81 return '{}`{} <{}>`'.format(role, _name, uri) 82 83 84 def format_refs(refs, indent): 85 args = [iter(map(refize, refs))] 86 ref_groups = zip_longest(*args, fillvalue="") 87 return str.join( 88 ' \\\n' + ' ' * (indent + 3), 89 [str.join(' ', ref_group) for ref_group in ref_groups], 90 ) 91 92 93 def format_row(category, divider_loc): 94 return '{title: <{width}} | {methods}'.format( 95 width=divider_loc, 96 title=category['name'], 97 methods=format_refs(category['methods'], divider_loc) 98 ) 99 100 101 def format_table(table_spec): 102 table_name = table_spec['name'] 103 categories = table_spec['categories'] 104 longest_cat_name = max([len(category['name']) for category in categories]) 105 table = [ 106 table_name, 107 '-' * len(table_name), 108 '', 109 '=' * longest_cat_name + ' ' + '=' * 25, 110 *(format_row(category, longest_cat_name) for category in categories), 111 '=' * longest_cat_name + ' ' + '=' * 25, 112 '' 113 ] 114 return '\n'.join(table) 115 116 117 # Modifications to the cheatsheet should be added in `cheatsheet.json` 118 cheatsheet_spec = json.loads(Path('./docs/cheatsheet.json').read_text()) 119 cheatsheet = [ 120 '..', 121 ' DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``', 122 ' If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``', 123 '', 124 '.. _cheatsheet:', 125 '', 126 'Cheatsheet', 127 '==========', 128 '', 129 *map(format_table, cheatsheet_spec), 130 ] 131 Path('./docs/cheatsheet.rst').write_text('\n'.join(cheatsheet)) 132 133 134 # ** Sphinx App Setup 135 136 137 def setup(app): 138 app.add_css_file('overrides.css') 139 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -40,7 +40,7 @@ hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>" exclude_patterns = ['_build', 'coreteam.rst'] -add_module_names = False +add_module_names = True pygments_style = 'sphinx'
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -40,7 +40,7 @@\n hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n \n exclude_patterns = ['_build', 'coreteam.rst']\n-add_module_names = False\n+add_module_names = True\n \n pygments_style = 'sphinx'\n", "issue": "The manual is missing module names\nIt looks like the big doc reorganization ended up omitting the actual module names of the various contrib and extra modules; for example, the section named \"Walk\" documents the functions and macros in `hy.contrib.walk`, but doesn't mention the name `hy.contrib.walk` or otherwise indicate how to bring the enumerated names into scope.\r\n\r\nPointed out in https://github.com/hylang/hy/issues/2065#issuecomment-842377526.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# This file is execfile()d with the current directory set to its containing dir.\n\nimport re, os, sys, time, html\n\nsys.path.insert(0, os.path.abspath('..'))\n\nextensions = [\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.autodoc',\n 'sphinxcontrib.hydomain',\n]\n\nfrom get_version import __version__ as hy_version\n\n# Read the Docs might dirty its checkout, so strip the dirty flag.\nhy_version = re.sub(r'[+.]dirty\\Z', '', hy_version)\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\n\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'hy'\ncopyright = '%s the authors' % time.strftime('%Y')\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \".\".join(hy_version.split(\".\")[:-1])\n# The full version, including alpha/beta/rc tags.\nrelease = hy_version\nhy_descriptive_version = html.escape(hy_version)\nif \"+\" in hy_version:\n hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n\nexclude_patterns = ['_build', 'coreteam.rst']\nadd_module_names = False\n\npygments_style = 'sphinx'\n\nimport sphinx_rtd_theme\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_use_smartypants = False\nhtml_show_sphinx = False\n\nhtml_context = dict(\n hy_descriptive_version = hy_descriptive_version)\n\nhighlight_language = 'clojure'\n\nintersphinx_mapping = dict(\n py = ('https://docs.python.org/3/', None))\n# ** Generate Cheatsheet\nimport json\nfrom pathlib import Path\nfrom itertools import zip_longest\n\ndef refize(spec):\n role = ':hy:func:'\n if isinstance(spec, dict):\n _name = spec['name']\n uri = spec['uri']\n if spec.get('internal'):\n role = ':ref:'\n else:\n uri = spec\n _name = str.split(uri, '.')[-1]\n return '{}`{} <{}>`'.format(role, _name, uri)\n\n\ndef format_refs(refs, indent):\n args = [iter(map(refize, refs))]\n ref_groups = zip_longest(*args, fillvalue=\"\")\n return str.join(\n ' \\\\\\n' + ' ' * (indent + 3),\n [str.join(' ', ref_group) for ref_group in ref_groups],\n )\n\n\ndef format_row(category, divider_loc):\n return '{title: <{width}} | {methods}'.format(\n width=divider_loc,\n title=category['name'],\n methods=format_refs(category['methods'], divider_loc)\n )\n\n\ndef format_table(table_spec):\n table_name = table_spec['name']\n categories = table_spec['categories']\n longest_cat_name = max([len(category['name']) for category in categories])\n table = [\n table_name,\n '-' * len(table_name),\n '',\n '=' * longest_cat_name + ' ' + '=' * 25,\n *(format_row(category, longest_cat_name) for category in categories),\n '=' * longest_cat_name + ' ' + '=' * 25,\n ''\n ]\n return '\\n'.join(table)\n\n\n# Modifications to the cheatsheet should be added in `cheatsheet.json`\ncheatsheet_spec = json.loads(Path('./docs/cheatsheet.json').read_text())\ncheatsheet = [\n '..',\n ' DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``',\n ' If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``',\n '',\n '.. _cheatsheet:',\n '',\n 'Cheatsheet',\n '==========',\n '',\n *map(format_table, cheatsheet_spec),\n]\nPath('./docs/cheatsheet.rst').write_text('\\n'.join(cheatsheet))\n\n\n# ** Sphinx App Setup\n\n\ndef setup(app):\n app.add_css_file('overrides.css')\n", "path": "docs/conf.py"}]}
1,944
93
gh_patches_debug_29513
rasdani/github-patches
git_diff
TheAlgorithms__Python-10140
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Concatenate/consolidate all algorithms with different implementations ### Feature description There are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file </issue> <code> [start of strings/reverse_letters.py] 1 def reverse_letters(input_str: str) -> str: 2 """ 3 Reverses letters in a given string without adjusting the position of the words 4 >>> reverse_letters('The cat in the hat') 5 'ehT tac ni eht tah' 6 >>> reverse_letters('The quick brown fox jumped over the lazy dog.') 7 'ehT kciuq nworb xof depmuj revo eht yzal .god' 8 >>> reverse_letters('Is this true?') 9 'sI siht ?eurt' 10 >>> reverse_letters("I love Python") 11 'I evol nohtyP' 12 """ 13 return " ".join([word[::-1] for word in input_str.split()]) 14 15 16 if __name__ == "__main__": 17 import doctest 18 19 doctest.testmod() 20 [end of strings/reverse_letters.py] [start of strings/reverse_long_words.py] 1 def reverse_long_words(sentence: str) -> str: 2 """ 3 Reverse all words that are longer than 4 characters in a sentence. 4 5 >>> reverse_long_words("Hey wollef sroirraw") 6 'Hey fellow warriors' 7 >>> reverse_long_words("nohtyP is nohtyP") 8 'Python is Python' 9 >>> reverse_long_words("1 12 123 1234 54321 654321") 10 '1 12 123 1234 12345 123456' 11 """ 12 return " ".join( 13 "".join(word[::-1]) if len(word) > 4 else word for word in sentence.split() 14 ) 15 16 17 if __name__ == "__main__": 18 import doctest 19 20 doctest.testmod() 21 print(reverse_long_words("Hey wollef sroirraw")) 22 [end of strings/reverse_long_words.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/strings/reverse_letters.py b/strings/reverse_letters.py --- a/strings/reverse_letters.py +++ b/strings/reverse_letters.py @@ -1,19 +1,24 @@ -def reverse_letters(input_str: str) -> str: +def reverse_letters(sentence: str, length: int = 0) -> str: """ - Reverses letters in a given string without adjusting the position of the words - >>> reverse_letters('The cat in the hat') - 'ehT tac ni eht tah' - >>> reverse_letters('The quick brown fox jumped over the lazy dog.') - 'ehT kciuq nworb xof depmuj revo eht yzal .god' - >>> reverse_letters('Is this true?') - 'sI siht ?eurt' - >>> reverse_letters("I love Python") - 'I evol nohtyP' + Reverse all words that are longer than the given length of characters in a sentence. + If unspecified, length is taken as 0 + + >>> reverse_letters("Hey wollef sroirraw", 3) + 'Hey fellow warriors' + >>> reverse_letters("nohtyP is nohtyP", 2) + 'Python is Python' + >>> reverse_letters("1 12 123 1234 54321 654321", 0) + '1 21 321 4321 12345 123456' + >>> reverse_letters("racecar") + 'racecar' """ - return " ".join([word[::-1] for word in input_str.split()]) + return " ".join( + "".join(word[::-1]) if len(word) > length else word for word in sentence.split() + ) if __name__ == "__main__": import doctest doctest.testmod() + print(reverse_letters("Hey wollef sroirraw")) diff --git a/strings/reverse_long_words.py b/strings/reverse_long_words.py deleted file mode 100644 --- a/strings/reverse_long_words.py +++ /dev/null @@ -1,21 +0,0 @@ -def reverse_long_words(sentence: str) -> str: - """ - Reverse all words that are longer than 4 characters in a sentence. - - >>> reverse_long_words("Hey wollef sroirraw") - 'Hey fellow warriors' - >>> reverse_long_words("nohtyP is nohtyP") - 'Python is Python' - >>> reverse_long_words("1 12 123 1234 54321 654321") - '1 12 123 1234 12345 123456' - """ - return " ".join( - "".join(word[::-1]) if len(word) > 4 else word for word in sentence.split() - ) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() - print(reverse_long_words("Hey wollef sroirraw"))
{"golden_diff": "diff --git a/strings/reverse_letters.py b/strings/reverse_letters.py\n--- a/strings/reverse_letters.py\n+++ b/strings/reverse_letters.py\n@@ -1,19 +1,24 @@\n-def reverse_letters(input_str: str) -> str:\n+def reverse_letters(sentence: str, length: int = 0) -> str:\n \"\"\"\n- Reverses letters in a given string without adjusting the position of the words\n- >>> reverse_letters('The cat in the hat')\n- 'ehT tac ni eht tah'\n- >>> reverse_letters('The quick brown fox jumped over the lazy dog.')\n- 'ehT kciuq nworb xof depmuj revo eht yzal .god'\n- >>> reverse_letters('Is this true?')\n- 'sI siht ?eurt'\n- >>> reverse_letters(\"I love Python\")\n- 'I evol nohtyP'\n+ Reverse all words that are longer than the given length of characters in a sentence.\n+ If unspecified, length is taken as 0\n+\n+ >>> reverse_letters(\"Hey wollef sroirraw\", 3)\n+ 'Hey fellow warriors'\n+ >>> reverse_letters(\"nohtyP is nohtyP\", 2)\n+ 'Python is Python'\n+ >>> reverse_letters(\"1 12 123 1234 54321 654321\", 0)\n+ '1 21 321 4321 12345 123456'\n+ >>> reverse_letters(\"racecar\")\n+ 'racecar'\n \"\"\"\n- return \" \".join([word[::-1] for word in input_str.split()])\n+ return \" \".join(\n+ \"\".join(word[::-1]) if len(word) > length else word for word in sentence.split()\n+ )\n \n \n if __name__ == \"__main__\":\n import doctest\n \n doctest.testmod()\n+ print(reverse_letters(\"Hey wollef sroirraw\"))\ndiff --git a/strings/reverse_long_words.py b/strings/reverse_long_words.py\ndeleted file mode 100644\n--- a/strings/reverse_long_words.py\n+++ /dev/null\n@@ -1,21 +0,0 @@\n-def reverse_long_words(sentence: str) -> str:\n- \"\"\"\n- Reverse all words that are longer than 4 characters in a sentence.\n-\n- >>> reverse_long_words(\"Hey wollef sroirraw\")\n- 'Hey fellow warriors'\n- >>> reverse_long_words(\"nohtyP is nohtyP\")\n- 'Python is Python'\n- >>> reverse_long_words(\"1 12 123 1234 54321 654321\")\n- '1 12 123 1234 12345 123456'\n- \"\"\"\n- return \" \".join(\n- \"\".join(word[::-1]) if len(word) > 4 else word for word in sentence.split()\n- )\n-\n-\n-if __name__ == \"__main__\":\n- import doctest\n-\n- doctest.testmod()\n- print(reverse_long_words(\"Hey wollef sroirraw\"))\n", "issue": "Concatenate/consolidate all algorithms with different implementations\n### Feature description\n\nThere are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file\n", "before_files": [{"content": "def reverse_letters(input_str: str) -> str:\n \"\"\"\n Reverses letters in a given string without adjusting the position of the words\n >>> reverse_letters('The cat in the hat')\n 'ehT tac ni eht tah'\n >>> reverse_letters('The quick brown fox jumped over the lazy dog.')\n 'ehT kciuq nworb xof depmuj revo eht yzal .god'\n >>> reverse_letters('Is this true?')\n 'sI siht ?eurt'\n >>> reverse_letters(\"I love Python\")\n 'I evol nohtyP'\n \"\"\"\n return \" \".join([word[::-1] for word in input_str.split()])\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "strings/reverse_letters.py"}, {"content": "def reverse_long_words(sentence: str) -> str:\n \"\"\"\n Reverse all words that are longer than 4 characters in a sentence.\n\n >>> reverse_long_words(\"Hey wollef sroirraw\")\n 'Hey fellow warriors'\n >>> reverse_long_words(\"nohtyP is nohtyP\")\n 'Python is Python'\n >>> reverse_long_words(\"1 12 123 1234 54321 654321\")\n '1 12 123 1234 12345 123456'\n \"\"\"\n return \" \".join(\n \"\".join(word[::-1]) if len(word) > 4 else word for word in sentence.split()\n )\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n print(reverse_long_words(\"Hey wollef sroirraw\"))\n", "path": "strings/reverse_long_words.py"}]}
1,040
733
gh_patches_debug_28763
rasdani/github-patches
git_diff
DataDog__dd-trace-py-2196
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DD_TAGS separator inconsistent with heroku-buildpack-datadog https://github.com/DataDog/heroku-buildpack-datadog sets `DD_TAGS` separated by spaces. However, dd-trace-py splits the tags by commas: https://github.com/DataDog/dd-trace-py/blob/master/ddtrace/utils/formats.py#L87-L116 ``` def parse_tags_str(tags_str): """Parse a string of tags typically provided via environment variables. The expected string is of the form:: "key1:value1,key2:value2" :param tags_str: A string of the above form to parse tags from. :return: A dict containing the tags that were parsed. """ parsed_tags = {} if not tags_str: return parsed_tags for tag in tags_str.split(","): try: key, value = tag.split(":", 1) # Validate the tag if key == "" or value == "" or value.endswith(":"): raise ValueError except ValueError: log.error( "Malformed tag in tag pair '%s' from tag string '%s'.", tag, tags_str, ) else: parsed_tags[key] = value return parsed_tags ``` This results in all of the tags being set as the value of the first tag. This looks to have been previously surfaced for the java tracer here: https://github.com/DataDog/heroku-buildpack-datadog/issues/187 And was initially changed in the buildpack but was reverted and dd-trace-java updated to accommodate commas or spaces: https://github.com/DataDog/dd-trace-java/pull/2011 ### Which version of dd-trace-py are you using? 0.47.0 ### How can we reproduce your problem? Perform a trace with `DD_TAGS` set by https://github.com/DataDog/heroku-buildpack-datadog. ### What is the result that you get? `dyno` tag contains all other tags. ### What is the result that you expected? Tags to be properly separated. </issue> <code> [start of ddtrace/utils/formats.py] 1 import logging 2 import os 3 4 from .deprecation import deprecation 5 6 7 log = logging.getLogger(__name__) 8 9 10 def get_env(*parts, **kwargs): 11 """Retrieves environment variables value for the given integration. It must be used 12 for consistency between integrations. The implementation is backward compatible 13 with legacy nomenclature: 14 15 * `DATADOG_` is a legacy prefix with lower priority 16 * `DD_` environment variables have the highest priority 17 * the environment variable is built concatenating `integration` and `variable` 18 arguments 19 * return `default` otherwise 20 21 :param parts: evironment variable parts that will be joined with ``_`` to generate the name 22 :type parts: :obj:`str` 23 :param kwargs: ``default`` is the only supported keyword argument which sets the default value 24 if no environment variable is found 25 :rtype: :obj:`str` | ``kwargs["default"]`` 26 :returns: The string environment variable value or the value of ``kwargs["default"]`` if not found 27 """ 28 default = kwargs.get("default") 29 30 key = "_".join(parts) 31 key = key.upper() 32 legacy_env = "DATADOG_{}".format(key) 33 env = "DD_{}".format(key) 34 35 value = os.getenv(env) 36 legacy = os.getenv(legacy_env) 37 if legacy: 38 # Deprecation: `DATADOG_` variables are deprecated 39 deprecation( 40 name="DATADOG_", 41 message="Use `DD_` prefix instead", 42 version="1.0.0", 43 ) 44 45 value = value or legacy 46 return value if value else default 47 48 49 def deep_getattr(obj, attr_string, default=None): 50 """ 51 Returns the attribute of `obj` at the dotted path given by `attr_string` 52 If no such attribute is reachable, returns `default` 53 54 >>> deep_getattr(cass, 'cluster') 55 <cassandra.cluster.Cluster object at 0xa20c350 56 57 >>> deep_getattr(cass, 'cluster.metadata.partitioner') 58 u'org.apache.cassandra.dht.Murmur3Partitioner' 59 60 >>> deep_getattr(cass, 'i.dont.exist', default='default') 61 'default' 62 """ 63 attrs = attr_string.split(".") 64 for attr in attrs: 65 try: 66 obj = getattr(obj, attr) 67 except AttributeError: 68 return default 69 70 return obj 71 72 73 def asbool(value): 74 """Convert the given String to a boolean object. 75 76 Accepted values are `True` and `1`. 77 """ 78 if value is None: 79 return False 80 81 if isinstance(value, bool): 82 return value 83 84 return value.lower() in ("true", "1") 85 86 87 def parse_tags_str(tags_str): 88 """Parse a string of tags typically provided via environment variables. 89 90 The expected string is of the form:: 91 "key1:value1,key2:value2" 92 93 :param tags_str: A string of the above form to parse tags from. 94 :return: A dict containing the tags that were parsed. 95 """ 96 parsed_tags = {} 97 if not tags_str: 98 return parsed_tags 99 100 for tag in tags_str.split(","): 101 try: 102 key, value = tag.split(":", 1) 103 104 # Validate the tag 105 if key == "" or value == "" or value.endswith(":"): 106 raise ValueError 107 except ValueError: 108 log.error( 109 "Malformed tag in tag pair '%s' from tag string '%s'.", 110 tag, 111 tags_str, 112 ) 113 else: 114 parsed_tags[key] = value 115 116 return parsed_tags 117 [end of ddtrace/utils/formats.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ddtrace/utils/formats.py b/ddtrace/utils/formats.py --- a/ddtrace/utils/formats.py +++ b/ddtrace/utils/formats.py @@ -1,9 +1,13 @@ import logging import os +import re from .deprecation import deprecation +# Tags `key:value` must be separated by either comma or space +_TAGS_NOT_SEPARATED = re.compile(r":[^,\s]+:") + log = logging.getLogger(__name__) @@ -89,6 +93,7 @@ The expected string is of the form:: "key1:value1,key2:value2" + "key1:value1 key2:value2" :param tags_str: A string of the above form to parse tags from. :return: A dict containing the tags that were parsed. @@ -97,7 +102,29 @@ if not tags_str: return parsed_tags - for tag in tags_str.split(","): + if _TAGS_NOT_SEPARATED.search(tags_str): + log.error("Malformed tag string with tags not separated by comma or space '%s'.", tags_str) + return parsed_tags + + # Identify separator based on which successfully identifies the correct + # number of valid tags + numtagseps = tags_str.count(":") + for sep in [",", " "]: + if sum(":" in _ for _ in tags_str.split(sep)) == numtagseps: + break + else: + log.error( + ( + "Failed to find separator for tag string: '%s'.\n" + "Tag strings must be comma or space separated:\n" + " key1:value1,key2:value2\n" + " key1:value1 key2:value2" + ), + tags_str, + ) + return parsed_tags + + for tag in tags_str.split(sep): try: key, value = tag.split(":", 1)
{"golden_diff": "diff --git a/ddtrace/utils/formats.py b/ddtrace/utils/formats.py\n--- a/ddtrace/utils/formats.py\n+++ b/ddtrace/utils/formats.py\n@@ -1,9 +1,13 @@\n import logging\n import os\n+import re\n \n from .deprecation import deprecation\n \n \n+# Tags `key:value` must be separated by either comma or space\n+_TAGS_NOT_SEPARATED = re.compile(r\":[^,\\s]+:\")\n+\n log = logging.getLogger(__name__)\n \n \n@@ -89,6 +93,7 @@\n \n The expected string is of the form::\n \"key1:value1,key2:value2\"\n+ \"key1:value1 key2:value2\"\n \n :param tags_str: A string of the above form to parse tags from.\n :return: A dict containing the tags that were parsed.\n@@ -97,7 +102,29 @@\n if not tags_str:\n return parsed_tags\n \n- for tag in tags_str.split(\",\"):\n+ if _TAGS_NOT_SEPARATED.search(tags_str):\n+ log.error(\"Malformed tag string with tags not separated by comma or space '%s'.\", tags_str)\n+ return parsed_tags\n+\n+ # Identify separator based on which successfully identifies the correct\n+ # number of valid tags\n+ numtagseps = tags_str.count(\":\")\n+ for sep in [\",\", \" \"]:\n+ if sum(\":\" in _ for _ in tags_str.split(sep)) == numtagseps:\n+ break\n+ else:\n+ log.error(\n+ (\n+ \"Failed to find separator for tag string: '%s'.\\n\"\n+ \"Tag strings must be comma or space separated:\\n\"\n+ \" key1:value1,key2:value2\\n\"\n+ \" key1:value1 key2:value2\"\n+ ),\n+ tags_str,\n+ )\n+ return parsed_tags\n+\n+ for tag in tags_str.split(sep):\n try:\n key, value = tag.split(\":\", 1)\n", "issue": "DD_TAGS separator inconsistent with heroku-buildpack-datadog\nhttps://github.com/DataDog/heroku-buildpack-datadog sets `DD_TAGS` separated by spaces.\r\n\r\nHowever, dd-trace-py splits the tags by commas:\r\n\r\nhttps://github.com/DataDog/dd-trace-py/blob/master/ddtrace/utils/formats.py#L87-L116\r\n```\r\ndef parse_tags_str(tags_str):\r\n \"\"\"Parse a string of tags typically provided via environment variables.\r\n The expected string is of the form::\r\n \"key1:value1,key2:value2\"\r\n :param tags_str: A string of the above form to parse tags from.\r\n :return: A dict containing the tags that were parsed.\r\n \"\"\"\r\n parsed_tags = {}\r\n if not tags_str:\r\n return parsed_tags\r\n\r\n for tag in tags_str.split(\",\"):\r\n try:\r\n key, value = tag.split(\":\", 1)\r\n\r\n # Validate the tag\r\n if key == \"\" or value == \"\" or value.endswith(\":\"):\r\n raise ValueError\r\n except ValueError:\r\n log.error(\r\n \"Malformed tag in tag pair '%s' from tag string '%s'.\",\r\n tag,\r\n tags_str,\r\n )\r\n else:\r\n parsed_tags[key] = value\r\n\r\n return parsed_tags\r\n```\r\n\r\nThis results in all of the tags being set as the value of the first tag.\r\n\r\nThis looks to have been previously surfaced for the java tracer here:\r\nhttps://github.com/DataDog/heroku-buildpack-datadog/issues/187\r\n\r\nAnd was initially changed in the buildpack but was reverted and dd-trace-java updated to accommodate commas or spaces:\r\nhttps://github.com/DataDog/dd-trace-java/pull/2011\r\n\r\n### Which version of dd-trace-py are you using?\r\n0.47.0\r\n\r\n### How can we reproduce your problem?\r\nPerform a trace with `DD_TAGS` set by https://github.com/DataDog/heroku-buildpack-datadog.\r\n\r\n### What is the result that you get?\r\n`dyno` tag contains all other tags.\r\n\r\n### What is the result that you expected?\r\nTags to be properly separated.\r\n\n", "before_files": [{"content": "import logging\nimport os\n\nfrom .deprecation import deprecation\n\n\nlog = logging.getLogger(__name__)\n\n\ndef get_env(*parts, **kwargs):\n \"\"\"Retrieves environment variables value for the given integration. It must be used\n for consistency between integrations. The implementation is backward compatible\n with legacy nomenclature:\n\n * `DATADOG_` is a legacy prefix with lower priority\n * `DD_` environment variables have the highest priority\n * the environment variable is built concatenating `integration` and `variable`\n arguments\n * return `default` otherwise\n\n :param parts: evironment variable parts that will be joined with ``_`` to generate the name\n :type parts: :obj:`str`\n :param kwargs: ``default`` is the only supported keyword argument which sets the default value\n if no environment variable is found\n :rtype: :obj:`str` | ``kwargs[\"default\"]``\n :returns: The string environment variable value or the value of ``kwargs[\"default\"]`` if not found\n \"\"\"\n default = kwargs.get(\"default\")\n\n key = \"_\".join(parts)\n key = key.upper()\n legacy_env = \"DATADOG_{}\".format(key)\n env = \"DD_{}\".format(key)\n\n value = os.getenv(env)\n legacy = os.getenv(legacy_env)\n if legacy:\n # Deprecation: `DATADOG_` variables are deprecated\n deprecation(\n name=\"DATADOG_\",\n message=\"Use `DD_` prefix instead\",\n version=\"1.0.0\",\n )\n\n value = value or legacy\n return value if value else default\n\n\ndef deep_getattr(obj, attr_string, default=None):\n \"\"\"\n Returns the attribute of `obj` at the dotted path given by `attr_string`\n If no such attribute is reachable, returns `default`\n\n >>> deep_getattr(cass, 'cluster')\n <cassandra.cluster.Cluster object at 0xa20c350\n\n >>> deep_getattr(cass, 'cluster.metadata.partitioner')\n u'org.apache.cassandra.dht.Murmur3Partitioner'\n\n >>> deep_getattr(cass, 'i.dont.exist', default='default')\n 'default'\n \"\"\"\n attrs = attr_string.split(\".\")\n for attr in attrs:\n try:\n obj = getattr(obj, attr)\n except AttributeError:\n return default\n\n return obj\n\n\ndef asbool(value):\n \"\"\"Convert the given String to a boolean object.\n\n Accepted values are `True` and `1`.\n \"\"\"\n if value is None:\n return False\n\n if isinstance(value, bool):\n return value\n\n return value.lower() in (\"true\", \"1\")\n\n\ndef parse_tags_str(tags_str):\n \"\"\"Parse a string of tags typically provided via environment variables.\n\n The expected string is of the form::\n \"key1:value1,key2:value2\"\n\n :param tags_str: A string of the above form to parse tags from.\n :return: A dict containing the tags that were parsed.\n \"\"\"\n parsed_tags = {}\n if not tags_str:\n return parsed_tags\n\n for tag in tags_str.split(\",\"):\n try:\n key, value = tag.split(\":\", 1)\n\n # Validate the tag\n if key == \"\" or value == \"\" or value.endswith(\":\"):\n raise ValueError\n except ValueError:\n log.error(\n \"Malformed tag in tag pair '%s' from tag string '%s'.\",\n tag,\n tags_str,\n )\n else:\n parsed_tags[key] = value\n\n return parsed_tags\n", "path": "ddtrace/utils/formats.py"}]}
2,023
449
gh_patches_debug_685
rasdani/github-patches
git_diff
pytorch__TensorRT-1849
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add Test Suite for `torch.compile` backend Partitioning/Lowering Phases - Add robust test suite for `torch.compile` backend, ensuring each phase functions correctly - Add general-purpose utilities for test expansion as the backend evolves </issue> <code> [start of py/torch_tensorrt/dynamo/torch_compile/utils.py] 1 import torch 2 3 from typing import Any, Union, Sequence, Dict 4 from torch_tensorrt import _Input, Device 5 6 7 def prepare_inputs( 8 inputs: Union[_Input.Input, torch.Tensor, Sequence, Dict], 9 device: torch.device = torch.device("cuda"), 10 ) -> Any: 11 if isinstance(inputs, _Input.Input): 12 if isinstance(inputs.shape, dict): 13 return inputs.example_tensor(optimization_profile_field="opt_shape").to( 14 device 15 ) 16 else: 17 return inputs.example_tensor().to(device) 18 19 elif isinstance(inputs, torch.Tensor): 20 return inputs 21 22 elif isinstance(inputs, list): 23 prepared_input = list() 24 25 for input_obj in inputs: 26 prepared_input.append(prepare_inputs(input_obj)) 27 28 return prepared_input 29 30 elif isinstance(inputs, tuple): 31 prepared_input = list() 32 33 for input_obj in inputs: 34 prepared_input.append(prepare_inputs(input_obj)) 35 36 return tuple(prepared_input) 37 38 elif isinstance(inputs, dict): 39 prepared_input = dict() 40 41 for key, input_obj in inputs.items(): 42 prepared_input[key] = prepare_inputs(input_obj) 43 44 return prepared_input 45 46 else: 47 raise ValueError( 48 f"Invalid input type {type(inputs)} encountered in the torch_compile input parsing. " 49 + "Allowed input types: {torch_tensorrt.Input, torch.Tensor, list, tuple, dict}" 50 ) 51 52 53 def prepare_device(device: Union[Device, torch.device]) -> torch.device: 54 if isinstance(device, Device): 55 if device.gpu_id != -1: 56 device = torch.device(device.gpu_id) 57 else: 58 raise ValueError("Invalid GPU ID provided for the CUDA device provided") 59 60 elif isinstance(device, torch.device): 61 device = device 62 63 else: 64 raise ValueError( 65 "Invalid device provided. Supported options: torch.device | torch_tensorrt.Device" 66 ) 67 [end of py/torch_tensorrt/dynamo/torch_compile/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/py/torch_tensorrt/dynamo/torch_compile/utils.py b/py/torch_tensorrt/dynamo/torch_compile/utils.py --- a/py/torch_tensorrt/dynamo/torch_compile/utils.py +++ b/py/torch_tensorrt/dynamo/torch_compile/utils.py @@ -64,3 +64,5 @@ raise ValueError( "Invalid device provided. Supported options: torch.device | torch_tensorrt.Device" ) + + return device
{"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/torch_compile/utils.py b/py/torch_tensorrt/dynamo/torch_compile/utils.py\n--- a/py/torch_tensorrt/dynamo/torch_compile/utils.py\n+++ b/py/torch_tensorrt/dynamo/torch_compile/utils.py\n@@ -64,3 +64,5 @@\n raise ValueError(\n \"Invalid device provided. Supported options: torch.device | torch_tensorrt.Device\"\n )\n+\n+ return device\n", "issue": "Add Test Suite for `torch.compile` backend Partitioning/Lowering Phases\n- Add robust test suite for `torch.compile` backend, ensuring each phase functions correctly\r\n- Add general-purpose utilities for test expansion as the backend evolves\n", "before_files": [{"content": "import torch\n\nfrom typing import Any, Union, Sequence, Dict\nfrom torch_tensorrt import _Input, Device\n\n\ndef prepare_inputs(\n inputs: Union[_Input.Input, torch.Tensor, Sequence, Dict],\n device: torch.device = torch.device(\"cuda\"),\n) -> Any:\n if isinstance(inputs, _Input.Input):\n if isinstance(inputs.shape, dict):\n return inputs.example_tensor(optimization_profile_field=\"opt_shape\").to(\n device\n )\n else:\n return inputs.example_tensor().to(device)\n\n elif isinstance(inputs, torch.Tensor):\n return inputs\n\n elif isinstance(inputs, list):\n prepared_input = list()\n\n for input_obj in inputs:\n prepared_input.append(prepare_inputs(input_obj))\n\n return prepared_input\n\n elif isinstance(inputs, tuple):\n prepared_input = list()\n\n for input_obj in inputs:\n prepared_input.append(prepare_inputs(input_obj))\n\n return tuple(prepared_input)\n\n elif isinstance(inputs, dict):\n prepared_input = dict()\n\n for key, input_obj in inputs.items():\n prepared_input[key] = prepare_inputs(input_obj)\n\n return prepared_input\n\n else:\n raise ValueError(\n f\"Invalid input type {type(inputs)} encountered in the torch_compile input parsing. \"\n + \"Allowed input types: {torch_tensorrt.Input, torch.Tensor, list, tuple, dict}\"\n )\n\n\ndef prepare_device(device: Union[Device, torch.device]) -> torch.device:\n if isinstance(device, Device):\n if device.gpu_id != -1:\n device = torch.device(device.gpu_id)\n else:\n raise ValueError(\"Invalid GPU ID provided for the CUDA device provided\")\n\n elif isinstance(device, torch.device):\n device = device\n\n else:\n raise ValueError(\n \"Invalid device provided. Supported options: torch.device | torch_tensorrt.Device\"\n )\n", "path": "py/torch_tensorrt/dynamo/torch_compile/utils.py"}]}
1,118
102
gh_patches_debug_21907
rasdani/github-patches
git_diff
webkom__lego-1985
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Timezone email Format dates in emails in the same language as the email template (Norwegian), and converted to the proper timezone. ![image](https://user-images.githubusercontent.com/1467188/34150647-476ffb12-e4a9-11e7-9f17-5b83e0690dad.png) ![screenshot 19 des 1 2017 10_39_27](https://user-images.githubusercontent.com/1467188/34150870-f9c33536-e4a9-11e7-836e-a3e93ae27f0f.png) </issue> <code> [start of lego/apps/events/notifications.py] 1 from lego.apps.notifications.constants import ( 2 EVENT_ADMIN_REGISTRATION, 3 EVENT_ADMIN_UNREGISTRATION, 4 EVENT_BUMP, 5 EVENT_PAYMENT_OVERDUE, 6 EVENT_PAYMENT_OVERDUE_CREATOR, 7 ) 8 from lego.apps.notifications.notification import Notification 9 10 11 class EventBumpNotification(Notification): 12 13 name = EVENT_BUMP 14 15 def generate_mail(self): 16 event = self.kwargs["event"] 17 18 return self._delay_mail( 19 to_email=self.user.email, 20 context={"event": event.title, "name": self.user.full_name, "id": event.id}, 21 subject=f"Du er flyttet opp fra ventelisten på arrangementet {event.title}", 22 plain_template="events/email/bump.txt", 23 html_template="events/email/bump.html", 24 ) 25 26 def generate_push(self): 27 event = self.kwargs["event"] 28 29 return self._delay_push( 30 template="events/push/bump.txt", 31 context={"event": event.title}, 32 instance=event, 33 ) 34 35 36 class EventPaymentOverdueNotification(Notification): 37 38 name = EVENT_PAYMENT_OVERDUE 39 40 def generate_mail(self): 41 event = self.kwargs["event"] 42 43 return self._delay_mail( 44 to_email=self.user.email, 45 context={ 46 "event": event.title, 47 "name": self.user.full_name, 48 "due_date": event.payment_due_date, 49 "id": event.id, 50 }, 51 subject=f"Du har ikke betalt påmeldingen på arrangementet {event.title}", 52 plain_template="events/email/payment_overdue.txt", 53 html_template="events/email/payment_overdue.html", 54 ) 55 56 def generate_push(self): 57 event = self.kwargs["event"] 58 59 return self._delay_push( 60 template="events/push/payment_overdue.txt", 61 context={"event": event.title}, 62 instance=event, 63 ) 64 65 66 class EventPaymentOverdueCreatorNotification(Notification): 67 68 name = EVENT_PAYMENT_OVERDUE_CREATOR 69 70 def generate_mail(self): 71 event = self.kwargs["event"] 72 users = self.kwargs["users"] 73 74 return self._delay_mail( 75 to_email=self.user.email, 76 context={ 77 "event": event.title, 78 "users": users, 79 "name": self.user.full_name, 80 "id": event.id, 81 }, 82 subject=f"Følgende registrerte har ikke betalt påmeldingen til arrangementet" 83 f" {event.title}", 84 plain_template="events/email/payment_overdue_author.txt", 85 html_template="events/email/payment_overdue_author.html", 86 ) 87 88 89 class EventAdminRegistrationNotification(Notification): 90 91 name = EVENT_ADMIN_REGISTRATION 92 93 def generate_mail(self): 94 event = self.kwargs["event"] 95 reason = self.kwargs["reason"] 96 97 return self._delay_mail( 98 to_email=self.user.email, 99 context={ 100 "event": event.title, 101 "name": self.user.full_name, 102 "reason": reason, 103 "id": event.id, 104 }, 105 subject=f"Du har blitt adminpåmeldt på arrangementet {event.title}", 106 plain_template="events/email/admin_registration.txt", 107 html_template="events/email/admin_registration.html", 108 ) 109 110 def generate_push(self): 111 event = self.kwargs["event"] 112 113 return self._delay_push( 114 template="events/push/admin_registration.txt", 115 context={"event": event.title}, 116 instance=event, 117 ) 118 119 120 class EventAdminUnregistrationNotification(Notification): 121 122 name = EVENT_ADMIN_UNREGISTRATION 123 124 def generate_mail(self): 125 event = self.kwargs["event"] 126 creator = self.kwargs["creator"] 127 reason = self.kwargs["reason"] 128 129 return self._delay_mail( 130 to_email=self.user.email, 131 context={ 132 "event": event.title, 133 "creator_name": creator.full_name, 134 "creator_email": creator.email, 135 "name": self.user.full_name, 136 "reason": reason, 137 "id": event.id, 138 }, 139 subject=f"Du har blitt fjernet fra arrangementet {event.title}", 140 plain_template="events/email/admin_unregistration.txt", 141 html_template="events/email/admin_unregistration.html", 142 ) 143 144 def generate_push(self): 145 event = self.kwargs["event"] 146 147 return self._delay_push( 148 template="events/push/admin_unregistration.txt", 149 context={"event": event.title}, 150 instance=event, 151 ) 152 [end of lego/apps/events/notifications.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lego/apps/events/notifications.py b/lego/apps/events/notifications.py --- a/lego/apps/events/notifications.py +++ b/lego/apps/events/notifications.py @@ -1,3 +1,7 @@ +from django.utils import timezone + +import pytz + from lego.apps.notifications.constants import ( EVENT_ADMIN_REGISTRATION, EVENT_ADMIN_UNREGISTRATION, @@ -40,12 +44,18 @@ def generate_mail(self): event = self.kwargs["event"] + date = timezone.localtime( + value=event.payment_due_date, timezone=pytz.timezone("Europe/Oslo") + ) + + due_date = date.strftime("%d.%m.%y, kl. %H:%M") + return self._delay_mail( to_email=self.user.email, context={ "event": event.title, "name": self.user.full_name, - "due_date": event.payment_due_date, + "due_date": due_date, "id": event.id, }, subject=f"Du har ikke betalt påmeldingen på arrangementet {event.title}",
{"golden_diff": "diff --git a/lego/apps/events/notifications.py b/lego/apps/events/notifications.py\n--- a/lego/apps/events/notifications.py\n+++ b/lego/apps/events/notifications.py\n@@ -1,3 +1,7 @@\n+from django.utils import timezone\n+\n+import pytz\n+\n from lego.apps.notifications.constants import (\n EVENT_ADMIN_REGISTRATION,\n EVENT_ADMIN_UNREGISTRATION,\n@@ -40,12 +44,18 @@\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n \n+ date = timezone.localtime(\n+ value=event.payment_due_date, timezone=pytz.timezone(\"Europe/Oslo\")\n+ )\n+\n+ due_date = date.strftime(\"%d.%m.%y, kl. %H:%M\")\n+\n return self._delay_mail(\n to_email=self.user.email,\n context={\n \"event\": event.title,\n \"name\": self.user.full_name,\n- \"due_date\": event.payment_due_date,\n+ \"due_date\": due_date,\n \"id\": event.id,\n },\n subject=f\"Du har ikke betalt p\u00e5meldingen p\u00e5 arrangementet {event.title}\",\n", "issue": "Timezone email\nFormat dates in emails in the same language as the email template (Norwegian), and converted to the proper timezone. \r\n\r\n![image](https://user-images.githubusercontent.com/1467188/34150647-476ffb12-e4a9-11e7-9f17-5b83e0690dad.png)\r\n\r\n![screenshot 19 des 1 2017 10_39_27](https://user-images.githubusercontent.com/1467188/34150870-f9c33536-e4a9-11e7-836e-a3e93ae27f0f.png)\r\n\r\n\n", "before_files": [{"content": "from lego.apps.notifications.constants import (\n EVENT_ADMIN_REGISTRATION,\n EVENT_ADMIN_UNREGISTRATION,\n EVENT_BUMP,\n EVENT_PAYMENT_OVERDUE,\n EVENT_PAYMENT_OVERDUE_CREATOR,\n)\nfrom lego.apps.notifications.notification import Notification\n\n\nclass EventBumpNotification(Notification):\n\n name = EVENT_BUMP\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\"event\": event.title, \"name\": self.user.full_name, \"id\": event.id},\n subject=f\"Du er flyttet opp fra ventelisten p\u00e5 arrangementet {event.title}\",\n plain_template=\"events/email/bump.txt\",\n html_template=\"events/email/bump.html\",\n )\n\n def generate_push(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_push(\n template=\"events/push/bump.txt\",\n context={\"event\": event.title},\n instance=event,\n )\n\n\nclass EventPaymentOverdueNotification(Notification):\n\n name = EVENT_PAYMENT_OVERDUE\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n \"event\": event.title,\n \"name\": self.user.full_name,\n \"due_date\": event.payment_due_date,\n \"id\": event.id,\n },\n subject=f\"Du har ikke betalt p\u00e5meldingen p\u00e5 arrangementet {event.title}\",\n plain_template=\"events/email/payment_overdue.txt\",\n html_template=\"events/email/payment_overdue.html\",\n )\n\n def generate_push(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_push(\n template=\"events/push/payment_overdue.txt\",\n context={\"event\": event.title},\n instance=event,\n )\n\n\nclass EventPaymentOverdueCreatorNotification(Notification):\n\n name = EVENT_PAYMENT_OVERDUE_CREATOR\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n users = self.kwargs[\"users\"]\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n \"event\": event.title,\n \"users\": users,\n \"name\": self.user.full_name,\n \"id\": event.id,\n },\n subject=f\"F\u00f8lgende registrerte har ikke betalt p\u00e5meldingen til arrangementet\"\n f\" {event.title}\",\n plain_template=\"events/email/payment_overdue_author.txt\",\n html_template=\"events/email/payment_overdue_author.html\",\n )\n\n\nclass EventAdminRegistrationNotification(Notification):\n\n name = EVENT_ADMIN_REGISTRATION\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n reason = self.kwargs[\"reason\"]\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n \"event\": event.title,\n \"name\": self.user.full_name,\n \"reason\": reason,\n \"id\": event.id,\n },\n subject=f\"Du har blitt adminp\u00e5meldt p\u00e5 arrangementet {event.title}\",\n plain_template=\"events/email/admin_registration.txt\",\n html_template=\"events/email/admin_registration.html\",\n )\n\n def generate_push(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_push(\n template=\"events/push/admin_registration.txt\",\n context={\"event\": event.title},\n instance=event,\n )\n\n\nclass EventAdminUnregistrationNotification(Notification):\n\n name = EVENT_ADMIN_UNREGISTRATION\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n creator = self.kwargs[\"creator\"]\n reason = self.kwargs[\"reason\"]\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n \"event\": event.title,\n \"creator_name\": creator.full_name,\n \"creator_email\": creator.email,\n \"name\": self.user.full_name,\n \"reason\": reason,\n \"id\": event.id,\n },\n subject=f\"Du har blitt fjernet fra arrangementet {event.title}\",\n plain_template=\"events/email/admin_unregistration.txt\",\n html_template=\"events/email/admin_unregistration.html\",\n )\n\n def generate_push(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_push(\n template=\"events/push/admin_unregistration.txt\",\n context={\"event\": event.title},\n instance=event,\n )\n", "path": "lego/apps/events/notifications.py"}]}
1,983
252
gh_patches_debug_30420
rasdani/github-patches
git_diff
PrefectHQ__prefect-347
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add back `environment.yml` file I realized why we might want to maintain an `environment.yml` file in parallel with our `requirements.txt` file: `requirements.txt` will be installed via `pip`, whereas if you create an environment via `conda`, the packages will be installed / maintained via `conda`. This can be useful for those who try to `conda install` everything (since it has different package version logic + handles non-python dependencies). </issue> <code> [start of setup.py] 1 # Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula 2 3 from setuptools import find_packages, setup 4 5 import sys 6 import versioneer 7 8 install_requires = [ 9 "click >= 6.7, < 7.0", 10 "cloudpickle >= 0.6.0", 11 "croniter >= 0.3.23, < 0.4", 12 "cryptography >= 2.2.2, < 3.0", 13 "dask >= 0.18, < 0.19", 14 "distributed >= 1.21.8, < 2.0", 15 "docker >= 3.4.1, < 3.5", 16 "marshmallow == 3.0.0b19", 17 "marshmallow-oneofschema >= 2.0.0b2, < 3.0", 18 "mypy >= 0.600, < 0.700", 19 "mypy_extensions >= 0.4.0, < 0.5", 20 "pendulum >= 2.0.4, < 3.0", 21 "python-dateutil >= 2.7.3, < 3.0", 22 "requests >= 2.20, < 3.0", 23 "toml >= 0.9.4, < 1.0", 24 "typing >= 3.6.4, < 4.0", 25 "typing_extensions >= 3.6.4, < 4.0", 26 "xxhash >= 1.2.0, < 2.0", 27 ] 28 29 templates = ["jinja2 >= 2.0, < 3.0"] 30 viz = ["bokeh == 0.13.0", "graphviz >= 0.8.3"] 31 dev = [ 32 "pre-commit", 33 "pytest >= 3.8, < 4.0", 34 "pytest-cov", 35 "pytest-env", 36 "pytest-xdist", 37 "Pygments == 2.2.0", 38 ] 39 40 if sys.version_info >= (3, 6): 41 dev += ["black"] 42 43 extras = { 44 "dev": dev + viz, 45 "viz": viz, 46 "templates": templates, 47 "all_extras": dev + templates + viz, 48 } 49 50 setup( 51 name="prefect", 52 version=versioneer.get_version(), 53 cmdclass=versioneer.get_cmdclass(), 54 description="", 55 long_description=open("README.md").read(), 56 url="https://www.github.com/prefecthq/prefect", 57 author="Prefect Technologies, Inc.", 58 author_email="[email protected]", 59 install_requires=install_requires, 60 extras_require=extras, 61 scripts=[], 62 packages=find_packages(where="src"), 63 package_dir={"": "src"}, 64 include_package_data=True, 65 entry_points={"console_scripts": ["prefect=prefect.cli:cli"]}, 66 ) 67 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -2,51 +2,39 @@ from setuptools import find_packages, setup +import configparser import sys import versioneer -install_requires = [ - "click >= 6.7, < 7.0", - "cloudpickle >= 0.6.0", - "croniter >= 0.3.23, < 0.4", - "cryptography >= 2.2.2, < 3.0", - "dask >= 0.18, < 0.19", - "distributed >= 1.21.8, < 2.0", - "docker >= 3.4.1, < 3.5", - "marshmallow == 3.0.0b19", - "marshmallow-oneofschema >= 2.0.0b2, < 3.0", - "mypy >= 0.600, < 0.700", - "mypy_extensions >= 0.4.0, < 0.5", - "pendulum >= 2.0.4, < 3.0", - "python-dateutil >= 2.7.3, < 3.0", - "requests >= 2.20, < 3.0", - "toml >= 0.9.4, < 1.0", - "typing >= 3.6.4, < 4.0", - "typing_extensions >= 3.6.4, < 4.0", - "xxhash >= 1.2.0, < 2.0", -] +config = configparser.ConfigParser() +config.read("requirements.ini") -templates = ["jinja2 >= 2.0, < 3.0"] -viz = ["bokeh == 0.13.0", "graphviz >= 0.8.3"] -dev = [ - "pre-commit", - "pytest >= 3.8, < 4.0", - "pytest-cov", - "pytest-env", - "pytest-xdist", - "Pygments == 2.2.0", -] +## base requirements +install_requires = ["".join(req) for req in config["base"].items()] -if sys.version_info >= (3, 6): - dev += ["black"] +## section dependencies +includes = {} +for section in config.sections(): + includes[section] = config[section].pop("include", "").split(",") extras = { - "dev": dev + viz, - "viz": viz, - "templates": templates, - "all_extras": dev + templates + viz, + "dev": ["".join(req) for req in config["dev"].items()], + "viz": ["".join(req) for req in config["viz"].items()], + "templates": ["".join(req) for req in config["templates"].items()], } +## process include keyword for related sections +for section in extras: + for other in includes[section]: + extras[section] += extras.get(other.strip(), []) + + +if sys.version_info >= (3, 6): + extras["dev"] += ["black"] + +extras["all_extras"] = extras["dev"] + extras["viz"] + extras["templates"] + + setup( name="prefect", version=versioneer.get_version(),
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -2,51 +2,39 @@\n \n from setuptools import find_packages, setup\n \n+import configparser\n import sys\n import versioneer\n \n-install_requires = [\n- \"click >= 6.7, < 7.0\",\n- \"cloudpickle >= 0.6.0\",\n- \"croniter >= 0.3.23, < 0.4\",\n- \"cryptography >= 2.2.2, < 3.0\",\n- \"dask >= 0.18, < 0.19\",\n- \"distributed >= 1.21.8, < 2.0\",\n- \"docker >= 3.4.1, < 3.5\",\n- \"marshmallow == 3.0.0b19\",\n- \"marshmallow-oneofschema >= 2.0.0b2, < 3.0\",\n- \"mypy >= 0.600, < 0.700\",\n- \"mypy_extensions >= 0.4.0, < 0.5\",\n- \"pendulum >= 2.0.4, < 3.0\",\n- \"python-dateutil >= 2.7.3, < 3.0\",\n- \"requests >= 2.20, < 3.0\",\n- \"toml >= 0.9.4, < 1.0\",\n- \"typing >= 3.6.4, < 4.0\",\n- \"typing_extensions >= 3.6.4, < 4.0\",\n- \"xxhash >= 1.2.0, < 2.0\",\n-]\n+config = configparser.ConfigParser()\n+config.read(\"requirements.ini\")\n \n-templates = [\"jinja2 >= 2.0, < 3.0\"]\n-viz = [\"bokeh == 0.13.0\", \"graphviz >= 0.8.3\"]\n-dev = [\n- \"pre-commit\",\n- \"pytest >= 3.8, < 4.0\",\n- \"pytest-cov\",\n- \"pytest-env\",\n- \"pytest-xdist\",\n- \"Pygments == 2.2.0\",\n-]\n+## base requirements\n+install_requires = [\"\".join(req) for req in config[\"base\"].items()]\n \n-if sys.version_info >= (3, 6):\n- dev += [\"black\"]\n+## section dependencies\n+includes = {}\n+for section in config.sections():\n+ includes[section] = config[section].pop(\"include\", \"\").split(\",\")\n \n extras = {\n- \"dev\": dev + viz,\n- \"viz\": viz,\n- \"templates\": templates,\n- \"all_extras\": dev + templates + viz,\n+ \"dev\": [\"\".join(req) for req in config[\"dev\"].items()],\n+ \"viz\": [\"\".join(req) for req in config[\"viz\"].items()],\n+ \"templates\": [\"\".join(req) for req in config[\"templates\"].items()],\n }\n \n+## process include keyword for related sections\n+for section in extras:\n+ for other in includes[section]:\n+ extras[section] += extras.get(other.strip(), [])\n+\n+\n+if sys.version_info >= (3, 6):\n+ extras[\"dev\"] += [\"black\"]\n+\n+extras[\"all_extras\"] = extras[\"dev\"] + extras[\"viz\"] + extras[\"templates\"]\n+\n+\n setup(\n name=\"prefect\",\n version=versioneer.get_version(),\n", "issue": "Add back `environment.yml` file\nI realized why we might want to maintain an `environment.yml` file in parallel with our `requirements.txt` file: `requirements.txt` will be installed via `pip`, whereas if you create an environment via `conda`, the packages will be installed / maintained via `conda`. This can be useful for those who try to `conda install` everything (since it has different package version logic + handles non-python dependencies).\n", "before_files": [{"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula\n\nfrom setuptools import find_packages, setup\n\nimport sys\nimport versioneer\n\ninstall_requires = [\n \"click >= 6.7, < 7.0\",\n \"cloudpickle >= 0.6.0\",\n \"croniter >= 0.3.23, < 0.4\",\n \"cryptography >= 2.2.2, < 3.0\",\n \"dask >= 0.18, < 0.19\",\n \"distributed >= 1.21.8, < 2.0\",\n \"docker >= 3.4.1, < 3.5\",\n \"marshmallow == 3.0.0b19\",\n \"marshmallow-oneofschema >= 2.0.0b2, < 3.0\",\n \"mypy >= 0.600, < 0.700\",\n \"mypy_extensions >= 0.4.0, < 0.5\",\n \"pendulum >= 2.0.4, < 3.0\",\n \"python-dateutil >= 2.7.3, < 3.0\",\n \"requests >= 2.20, < 3.0\",\n \"toml >= 0.9.4, < 1.0\",\n \"typing >= 3.6.4, < 4.0\",\n \"typing_extensions >= 3.6.4, < 4.0\",\n \"xxhash >= 1.2.0, < 2.0\",\n]\n\ntemplates = [\"jinja2 >= 2.0, < 3.0\"]\nviz = [\"bokeh == 0.13.0\", \"graphviz >= 0.8.3\"]\ndev = [\n \"pre-commit\",\n \"pytest >= 3.8, < 4.0\",\n \"pytest-cov\",\n \"pytest-env\",\n \"pytest-xdist\",\n \"Pygments == 2.2.0\",\n]\n\nif sys.version_info >= (3, 6):\n dev += [\"black\"]\n\nextras = {\n \"dev\": dev + viz,\n \"viz\": viz,\n \"templates\": templates,\n \"all_extras\": dev + templates + viz,\n}\n\nsetup(\n name=\"prefect\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"\",\n long_description=open(\"README.md\").read(),\n url=\"https://www.github.com/prefecthq/prefect\",\n author=\"Prefect Technologies, Inc.\",\n author_email=\"[email protected]\",\n install_requires=install_requires,\n extras_require=extras,\n scripts=[],\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n entry_points={\"console_scripts\": [\"prefect=prefect.cli:cli\"]},\n)\n", "path": "setup.py"}]}
1,396
788
gh_patches_debug_13957
rasdani/github-patches
git_diff
opendatacube__datacube-core-680
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Errors when running against the impending sqlalchemy 1.3 release (in beta) Originally reported in #667 Datacube consistently fails when run against the current beta version of sqlalchemy. [According to](https://www.sqlalchemy.org/blog/2019/02/08/sqlalchemy-1.3.0b3-released/) the sqlalchemy devs this release "1.3b3 should hopefully be the last beta release for 1.3, as no additional major changes are planned." This isn't currently a problem, but it will break all of our builds and guides if not resolved before 1.3 is declared stable. Manually reproduce the error with: ``` pip install sqlalchemy==1.3b3 datacube system init ``` - Either the sqlalchemy 1.3 beta has a bug, which we should report to them. - Or our own code is doing something incorrect and we should fix it before 1.3 is declared stable. Errors when running against the impending sqlalchemy 1.3 release (in beta) Originally reported in #667 Datacube consistently fails when run against the current beta version of sqlalchemy. [According to](https://www.sqlalchemy.org/blog/2019/02/08/sqlalchemy-1.3.0b3-released/) the sqlalchemy devs this release "1.3b3 should hopefully be the last beta release for 1.3, as no additional major changes are planned." This isn't currently a problem, but it will break all of our builds and guides if not resolved before 1.3 is declared stable. Manually reproduce the error with: ``` pip install sqlalchemy==1.3b3 datacube system init ``` - Either the sqlalchemy 1.3 beta has a bug, which we should report to them. - Or our own code is doing something incorrect and we should fix it before 1.3 is declared stable. </issue> <code> [start of datacube/drivers/postgres/sql.py] 1 # coding=utf-8 2 """ 3 Custom types for postgres & sqlalchemy 4 """ 5 6 from sqlalchemy import TIMESTAMP 7 from sqlalchemy.dialects.postgresql.ranges import RangeOperators 8 from sqlalchemy.ext.compiler import compiles 9 from sqlalchemy.sql import sqltypes 10 from sqlalchemy.sql.expression import Executable, ClauseElement 11 from sqlalchemy.sql.functions import GenericFunction 12 13 SCHEMA_NAME = 'agdc' 14 15 16 class CreateView(Executable, ClauseElement): 17 def __init__(self, name, select): 18 self.name = name 19 self.select = select 20 21 22 @compiles(CreateView) 23 def visit_create_view(element, compiler, **kw): 24 return "CREATE VIEW %s AS %s" % ( 25 element.name, 26 compiler.process(element.select, literal_binds=True) 27 ) 28 29 30 TYPES_INIT_SQL = """ 31 create or replace function {schema}.common_timestamp(text) 32 returns timestamp with time zone as $$ 33 select ($1)::timestamp at time zone 'utc'; 34 $$ language sql immutable returns null on null input; 35 36 create type {schema}.float8range as range ( 37 subtype = float8, 38 subtype_diff = float8mi 39 ); 40 """.format(schema=SCHEMA_NAME) 41 42 43 # pylint: disable=abstract-method 44 class FLOAT8RANGE(RangeOperators, sqltypes.TypeEngine): 45 __visit_name__ = 'FLOAT8RANGE' 46 47 48 @compiles(FLOAT8RANGE) 49 def visit_float8range(element, compiler, **kw): 50 return "FLOAT8RANGE" 51 52 53 # Register the function with SQLAlchemhy. 54 # pylint: disable=too-many-ancestors 55 class CommonTimestamp(GenericFunction): 56 type = TIMESTAMP(timezone=True) 57 package = 'agdc' 58 identifier = 'common_timestamp' 59 60 name = '%s.common_timestamp' % SCHEMA_NAME 61 62 63 # pylint: disable=too-many-ancestors 64 class Float8Range(GenericFunction): 65 type = FLOAT8RANGE 66 package = 'agdc' 67 identifier = 'float8range' 68 69 name = '%s.float8range' % SCHEMA_NAME 70 71 72 class PGNAME(sqltypes.Text): 73 """Postgres 'NAME' type.""" 74 __visit_name__ = 'NAME' 75 76 77 @compiles(PGNAME) 78 def visit_name(element, compiler, **kw): 79 return "NAME" 80 81 82 def pg_exists(conn, name): 83 """ 84 Does a postgres object exist? 85 :rtype bool 86 """ 87 return conn.execute("SELECT to_regclass(%s)", name).scalar() is not None 88 89 90 def pg_column_exists(conn, table, column): 91 """ 92 Does a postgres object exist? 93 :rtype bool 94 """ 95 return conn.execute(""" 96 SELECT 1 FROM pg_attribute 97 WHERE attrelid = to_regclass(%s) 98 AND attname = %s 99 AND NOT attisdropped 100 """, table, column).scalar() is not None 101 102 103 def escape_pg_identifier(engine, name): 104 """ 105 Escape identifiers (tables, fields, roles, etc) for inclusion in SQL statements. 106 107 psycopg2 can safely merge query arguments, but cannot do the same for dynamically 108 generating queries. 109 110 See http://initd.org/psycopg/docs/sql.html for more information. 111 """ 112 # New (2.7+) versions of psycopg2 have function: extensions.quote_ident() 113 # But it's too bleeding edge right now. We'll ask the server to escape instead, as 114 # these are not performance sensitive. 115 return engine.execute("select quote_ident(%s)", name).scalar() 116 [end of datacube/drivers/postgres/sql.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/datacube/drivers/postgres/sql.py b/datacube/drivers/postgres/sql.py --- a/datacube/drivers/postgres/sql.py +++ b/datacube/drivers/postgres/sql.py @@ -57,7 +57,11 @@ package = 'agdc' identifier = 'common_timestamp' - name = '%s.common_timestamp' % SCHEMA_NAME + name = 'common_timestamp' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.packagenames = ['%s' % SCHEMA_NAME] # pylint: disable=too-many-ancestors @@ -66,7 +70,11 @@ package = 'agdc' identifier = 'float8range' - name = '%s.float8range' % SCHEMA_NAME + name = 'float8range' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.packagenames = ['%s' % SCHEMA_NAME] class PGNAME(sqltypes.Text):
{"golden_diff": "diff --git a/datacube/drivers/postgres/sql.py b/datacube/drivers/postgres/sql.py\n--- a/datacube/drivers/postgres/sql.py\n+++ b/datacube/drivers/postgres/sql.py\n@@ -57,7 +57,11 @@\n package = 'agdc'\n identifier = 'common_timestamp'\n \n- name = '%s.common_timestamp' % SCHEMA_NAME\n+ name = 'common_timestamp'\n+\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ self.packagenames = ['%s' % SCHEMA_NAME]\n \n \n # pylint: disable=too-many-ancestors\n@@ -66,7 +70,11 @@\n package = 'agdc'\n identifier = 'float8range'\n \n- name = '%s.float8range' % SCHEMA_NAME\n+ name = 'float8range'\n+\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ self.packagenames = ['%s' % SCHEMA_NAME]\n \n \n class PGNAME(sqltypes.Text):\n", "issue": "Errors when running against the impending sqlalchemy 1.3 release (in beta)\nOriginally reported in #667\r\n\r\nDatacube consistently fails when run against the current beta version of sqlalchemy. [According to](https://www.sqlalchemy.org/blog/2019/02/08/sqlalchemy-1.3.0b3-released/) the sqlalchemy devs this release \"1.3b3 should hopefully be the last beta release for 1.3, as no additional major changes are planned.\"\r\n\r\nThis isn't currently a problem, but it will break all of our builds and guides if not resolved before 1.3 is declared stable.\r\n\r\nManually reproduce the error with:\r\n\r\n```\r\n pip install sqlalchemy==1.3b3\r\n datacube system init\r\n```\r\n\r\n- Either the sqlalchemy 1.3 beta has a bug, which we should report to them.\r\n- Or our own code is doing something incorrect and we should fix it before 1.3 is declared stable.\r\n\nErrors when running against the impending sqlalchemy 1.3 release (in beta)\nOriginally reported in #667\r\n\r\nDatacube consistently fails when run against the current beta version of sqlalchemy. [According to](https://www.sqlalchemy.org/blog/2019/02/08/sqlalchemy-1.3.0b3-released/) the sqlalchemy devs this release \"1.3b3 should hopefully be the last beta release for 1.3, as no additional major changes are planned.\"\r\n\r\nThis isn't currently a problem, but it will break all of our builds and guides if not resolved before 1.3 is declared stable.\r\n\r\nManually reproduce the error with:\r\n\r\n```\r\n pip install sqlalchemy==1.3b3\r\n datacube system init\r\n```\r\n\r\n- Either the sqlalchemy 1.3 beta has a bug, which we should report to them.\r\n- Or our own code is doing something incorrect and we should fix it before 1.3 is declared stable.\r\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nCustom types for postgres & sqlalchemy\n\"\"\"\n\nfrom sqlalchemy import TIMESTAMP\nfrom sqlalchemy.dialects.postgresql.ranges import RangeOperators\nfrom sqlalchemy.ext.compiler import compiles\nfrom sqlalchemy.sql import sqltypes\nfrom sqlalchemy.sql.expression import Executable, ClauseElement\nfrom sqlalchemy.sql.functions import GenericFunction\n\nSCHEMA_NAME = 'agdc'\n\n\nclass CreateView(Executable, ClauseElement):\n def __init__(self, name, select):\n self.name = name\n self.select = select\n\n\n@compiles(CreateView)\ndef visit_create_view(element, compiler, **kw):\n return \"CREATE VIEW %s AS %s\" % (\n element.name,\n compiler.process(element.select, literal_binds=True)\n )\n\n\nTYPES_INIT_SQL = \"\"\"\ncreate or replace function {schema}.common_timestamp(text)\nreturns timestamp with time zone as $$\nselect ($1)::timestamp at time zone 'utc';\n$$ language sql immutable returns null on null input;\n\ncreate type {schema}.float8range as range (\n subtype = float8,\n subtype_diff = float8mi\n);\n\"\"\".format(schema=SCHEMA_NAME)\n\n\n# pylint: disable=abstract-method\nclass FLOAT8RANGE(RangeOperators, sqltypes.TypeEngine):\n __visit_name__ = 'FLOAT8RANGE'\n\n\n@compiles(FLOAT8RANGE)\ndef visit_float8range(element, compiler, **kw):\n return \"FLOAT8RANGE\"\n\n\n# Register the function with SQLAlchemhy.\n# pylint: disable=too-many-ancestors\nclass CommonTimestamp(GenericFunction):\n type = TIMESTAMP(timezone=True)\n package = 'agdc'\n identifier = 'common_timestamp'\n\n name = '%s.common_timestamp' % SCHEMA_NAME\n\n\n# pylint: disable=too-many-ancestors\nclass Float8Range(GenericFunction):\n type = FLOAT8RANGE\n package = 'agdc'\n identifier = 'float8range'\n\n name = '%s.float8range' % SCHEMA_NAME\n\n\nclass PGNAME(sqltypes.Text):\n \"\"\"Postgres 'NAME' type.\"\"\"\n __visit_name__ = 'NAME'\n\n\n@compiles(PGNAME)\ndef visit_name(element, compiler, **kw):\n return \"NAME\"\n\n\ndef pg_exists(conn, name):\n \"\"\"\n Does a postgres object exist?\n :rtype bool\n \"\"\"\n return conn.execute(\"SELECT to_regclass(%s)\", name).scalar() is not None\n\n\ndef pg_column_exists(conn, table, column):\n \"\"\"\n Does a postgres object exist?\n :rtype bool\n \"\"\"\n return conn.execute(\"\"\"\n SELECT 1 FROM pg_attribute\n WHERE attrelid = to_regclass(%s)\n AND attname = %s\n AND NOT attisdropped\n \"\"\", table, column).scalar() is not None\n\n\ndef escape_pg_identifier(engine, name):\n \"\"\"\n Escape identifiers (tables, fields, roles, etc) for inclusion in SQL statements.\n\n psycopg2 can safely merge query arguments, but cannot do the same for dynamically\n generating queries.\n\n See http://initd.org/psycopg/docs/sql.html for more information.\n \"\"\"\n # New (2.7+) versions of psycopg2 have function: extensions.quote_ident()\n # But it's too bleeding edge right now. We'll ask the server to escape instead, as\n # these are not performance sensitive.\n return engine.execute(\"select quote_ident(%s)\", name).scalar()\n", "path": "datacube/drivers/postgres/sql.py"}]}
1,945
249
gh_patches_debug_1473
rasdani/github-patches
git_diff
ivy-llc__ivy-13177
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> tril_indces_from </issue> <code> [start of ivy/functional/frontends/jax/numpy/indexing.py] 1 # local 2 import ivy 3 from ivy.functional.frontends.jax.func_wrapper import ( 4 to_ivy_arrays_and_back, 5 ) 6 7 8 @to_ivy_arrays_and_back 9 def diagonal(a, offset=0, axis1=0, axis2=1): 10 return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2) 11 12 13 @to_ivy_arrays_and_back 14 def diag(v, k=0): 15 return ivy.diag(v, k=k) 16 17 18 @to_ivy_arrays_and_back 19 def diag_indices(n, ndim=2): 20 idx = ivy.arange(n, dtype=int) 21 return (idx,) * ndim 22 23 24 # take_along_axis 25 @to_ivy_arrays_and_back 26 def take_along_axis(arr, indices, axis, mode="fill"): 27 return ivy.take_along_axis(arr, indices, axis, mode=mode) 28 29 30 @to_ivy_arrays_and_back 31 def tril_indices(n_rows, n_cols=None, k=0): 32 return ivy.tril_indices(n_rows, n_cols, k) 33 [end of ivy/functional/frontends/jax/numpy/indexing.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/jax/numpy/indexing.py b/ivy/functional/frontends/jax/numpy/indexing.py --- a/ivy/functional/frontends/jax/numpy/indexing.py +++ b/ivy/functional/frontends/jax/numpy/indexing.py @@ -30,3 +30,8 @@ @to_ivy_arrays_and_back def tril_indices(n_rows, n_cols=None, k=0): return ivy.tril_indices(n_rows, n_cols, k) + + +@to_ivy_arrays_and_back +def tril_indices_from(arr, k=0): + return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)
{"golden_diff": "diff --git a/ivy/functional/frontends/jax/numpy/indexing.py b/ivy/functional/frontends/jax/numpy/indexing.py\n--- a/ivy/functional/frontends/jax/numpy/indexing.py\n+++ b/ivy/functional/frontends/jax/numpy/indexing.py\n@@ -30,3 +30,8 @@\n @to_ivy_arrays_and_back\n def tril_indices(n_rows, n_cols=None, k=0):\n return ivy.tril_indices(n_rows, n_cols, k)\n+\n+\n+@to_ivy_arrays_and_back\n+def tril_indices_from(arr, k=0):\n+ return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)\n", "issue": "tril_indces_from\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)\n\n\n@to_ivy_arrays_and_back\ndef diag(v, k=0):\n return ivy.diag(v, k=k)\n\n\n@to_ivy_arrays_and_back\ndef diag_indices(n, ndim=2):\n idx = ivy.arange(n, dtype=int)\n return (idx,) * ndim\n\n\n# take_along_axis\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis, mode=\"fill\"):\n return ivy.take_along_axis(arr, indices, axis, mode=mode)\n\n\n@to_ivy_arrays_and_back\ndef tril_indices(n_rows, n_cols=None, k=0):\n return ivy.tril_indices(n_rows, n_cols, k)\n", "path": "ivy/functional/frontends/jax/numpy/indexing.py"}]}
852
158
gh_patches_debug_336
rasdani/github-patches
git_diff
piskvorky__gensim-919
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> import gensim fails since updating to Xcode 7.3 I just updated my version of Xcode to 7.3. When I run `pip install --upgrade gensim` the process completed without any issues. However, when I try `import gensim` within the python shell the terminal barfs a bunch of C++ output with a block of execution errors that begins with: `Exception: Compilation failed (return status=1): clang: error: unsupported option '-b mi2'. clang: error: unsupported option '-b mi'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-sse4a'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-tbm'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'....` I think this has something to do with where gensim is looking for its header files, but I'm somewhat at a loss. Any help debugging would be greatly appreciated. </issue> <code> [start of gensim/corpora/__init__.py] 1 """ 2 This package contains implementations of various streaming corpus I/O format. 3 """ 4 5 # bring corpus classes directly into package namespace, to save some typing 6 from .indexedcorpus import IndexedCorpus # must appear before the other classes 7 8 from .mmcorpus import MmCorpus 9 from .bleicorpus import BleiCorpus 10 from .svmlightcorpus import SvmLightCorpus 11 from .lowcorpus import LowCorpus 12 from .dictionary import Dictionary 13 from .hashdictionary import HashDictionary 14 from .wikicorpus import WikiCorpus 15 from .textcorpus import TextCorpus 16 from .ucicorpus import UciCorpus 17 from .malletcorpus import MalletCorpus 18 from .sharded_corpus import ShardedCorpus 19 [end of gensim/corpora/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gensim/corpora/__init__.py b/gensim/corpora/__init__.py --- a/gensim/corpora/__init__.py +++ b/gensim/corpora/__init__.py @@ -15,4 +15,3 @@ from .textcorpus import TextCorpus from .ucicorpus import UciCorpus from .malletcorpus import MalletCorpus -from .sharded_corpus import ShardedCorpus
{"golden_diff": "diff --git a/gensim/corpora/__init__.py b/gensim/corpora/__init__.py\n--- a/gensim/corpora/__init__.py\n+++ b/gensim/corpora/__init__.py\n@@ -15,4 +15,3 @@\n from .textcorpus import TextCorpus\n from .ucicorpus import UciCorpus\n from .malletcorpus import MalletCorpus\n-from .sharded_corpus import ShardedCorpus\n", "issue": "import gensim fails since updating to Xcode 7.3 \nI just updated my version of Xcode to 7.3. When I run `pip install --upgrade gensim` the process completed without any issues. However, when I try `import gensim` within the python shell the terminal barfs a bunch of C++ output with a block of execution errors that begins with: \n\n`Exception: Compilation failed (return status=1): clang: error: unsupported option '-b mi2'. clang: error: unsupported option '-b mi'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-sse4a'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-tbm'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'....`\n\nI think this has something to do with where gensim is looking for its header files, but I'm somewhat at a loss. Any help debugging would be greatly appreciated. \n\n", "before_files": [{"content": "\"\"\"\nThis package contains implementations of various streaming corpus I/O format.\n\"\"\"\n\n# bring corpus classes directly into package namespace, to save some typing\nfrom .indexedcorpus import IndexedCorpus # must appear before the other classes\n\nfrom .mmcorpus import MmCorpus\nfrom .bleicorpus import BleiCorpus\nfrom .svmlightcorpus import SvmLightCorpus\nfrom .lowcorpus import LowCorpus\nfrom .dictionary import Dictionary\nfrom .hashdictionary import HashDictionary\nfrom .wikicorpus import WikiCorpus\nfrom .textcorpus import TextCorpus\nfrom .ucicorpus import UciCorpus\nfrom .malletcorpus import MalletCorpus\nfrom .sharded_corpus import ShardedCorpus\n", "path": "gensim/corpora/__init__.py"}]}
976
110
gh_patches_debug_1358
rasdani/github-patches
git_diff
mirumee__ariadne-270
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Upgrade to GraphQL-core v3 I'm getting the following deprecation warning. Is this something that is already on your radar / that you are planning to resolve for the next release? >**DeprecationWarning**: GraphQL-core-next has been discontinued. It is now released as GraphQL-core v3 and newer. </issue> <code> [start of setup.py] 1 #! /usr/bin/env python 2 import os 3 from setuptools import setup 4 5 CLASSIFIERS = [ 6 "Development Status :: 4 - Beta", 7 "Intended Audience :: Developers", 8 "License :: OSI Approved :: BSD License", 9 "Operating System :: OS Independent", 10 "Programming Language :: Python", 11 "Programming Language :: Python :: 3.6", 12 "Programming Language :: Python :: 3.7", 13 "Programming Language :: Python :: 3.8", 14 "Topic :: Software Development :: Libraries :: Python Modules", 15 ] 16 17 README_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md") 18 with open(README_PATH, "r") as f: 19 README = f.read() 20 21 setup( 22 name="ariadne", 23 author="Mirumee Software", 24 author_email="[email protected]", 25 description="Ariadne is a Python library for implementing GraphQL servers.", 26 long_description=README, 27 long_description_content_type="text/markdown", 28 license="BSD", 29 version="0.8.0", 30 url="https://github.com/mirumee/ariadne", 31 packages=["ariadne"], 32 include_package_data=True, 33 install_requires=[ 34 "graphql-core-next<3.0.0", 35 "starlette<0.14", 36 "typing_extensions>=3.6.0", 37 ], 38 extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]}, 39 classifiers=CLASSIFIERS, 40 platforms=["any"], 41 zip_safe=False, 42 ) 43 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ packages=["ariadne"], include_package_data=True, install_requires=[ - "graphql-core-next<3.0.0", + "graphql-core>=3.0.0", "starlette<0.14", "typing_extensions>=3.6.0", ],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -31,7 +31,7 @@\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n- \"graphql-core-next<3.0.0\",\n+ \"graphql-core>=3.0.0\",\n \"starlette<0.14\",\n \"typing_extensions>=3.6.0\",\n ],\n", "issue": "Upgrade to GraphQL-core v3\nI'm getting the following deprecation warning. Is this something that is already on your radar / that you are planning to resolve for the next release?\r\n\r\n>**DeprecationWarning**: GraphQL-core-next has been discontinued. It is now released as GraphQL-core v3 and newer.\n", "before_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.8.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core-next<3.0.0\",\n \"starlette<0.14\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}]}
1,006
98
gh_patches_debug_50420
rasdani/github-patches
git_diff
litestar-org__litestar-2330
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> StaticFilesConfig and virtual directories I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems. https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32 </issue> <code> [start of litestar/openapi/spec/enums.py] 1 from enum import Enum 2 3 __all__ = ("OpenAPIFormat", "OpenAPIType") 4 5 6 class OpenAPIFormat(str, Enum): 7 """Formats extracted from: https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-validation-00#page-13""" 8 9 DATE = "date" 10 DATE_TIME = "date-time" 11 TIME = "time" 12 DURATION = "duration" 13 URL = "url" 14 EMAIL = "email" 15 IDN_EMAIL = "idn-email" 16 HOST_NAME = "hostname" 17 IDN_HOST_NAME = "idn-hostname" 18 IPV4 = "ipv4" 19 IPV6 = "ipv6" 20 URI = "uri" 21 URI_REFERENCE = "uri-reference" 22 URI_TEMPLATE = "uri-template" 23 JSON_POINTER = "json-pointer" 24 RELATIVE_JSON_POINTER = "relative-json-pointer" 25 IRI = "iri-reference" 26 IRI_REFERENCE = "iri-reference" # noqa: PIE796 27 UUID = "uuid" 28 REGEX = "regex" 29 30 31 class OpenAPIType(str, Enum): 32 """An OopenAPI type.""" 33 34 ARRAY = "array" 35 BOOLEAN = "boolean" 36 INTEGER = "integer" 37 NULL = "null" 38 NUMBER = "number" 39 OBJECT = "object" 40 STRING = "string" 41 [end of litestar/openapi/spec/enums.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/litestar/openapi/spec/enums.py b/litestar/openapi/spec/enums.py --- a/litestar/openapi/spec/enums.py +++ b/litestar/openapi/spec/enums.py @@ -26,6 +26,7 @@ IRI_REFERENCE = "iri-reference" # noqa: PIE796 UUID = "uuid" REGEX = "regex" + BINARY = "binary" class OpenAPIType(str, Enum):
{"golden_diff": "diff --git a/litestar/openapi/spec/enums.py b/litestar/openapi/spec/enums.py\n--- a/litestar/openapi/spec/enums.py\n+++ b/litestar/openapi/spec/enums.py\n@@ -26,6 +26,7 @@\n IRI_REFERENCE = \"iri-reference\" # noqa: PIE796\n UUID = \"uuid\"\n REGEX = \"regex\"\n+ BINARY = \"binary\"\n \n \n class OpenAPIType(str, Enum):\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from enum import Enum\n\n__all__ = (\"OpenAPIFormat\", \"OpenAPIType\")\n\n\nclass OpenAPIFormat(str, Enum):\n \"\"\"Formats extracted from: https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-validation-00#page-13\"\"\"\n\n DATE = \"date\"\n DATE_TIME = \"date-time\"\n TIME = \"time\"\n DURATION = \"duration\"\n URL = \"url\"\n EMAIL = \"email\"\n IDN_EMAIL = \"idn-email\"\n HOST_NAME = \"hostname\"\n IDN_HOST_NAME = \"idn-hostname\"\n IPV4 = \"ipv4\"\n IPV6 = \"ipv6\"\n URI = \"uri\"\n URI_REFERENCE = \"uri-reference\"\n URI_TEMPLATE = \"uri-template\"\n JSON_POINTER = \"json-pointer\"\n RELATIVE_JSON_POINTER = \"relative-json-pointer\"\n IRI = \"iri-reference\"\n IRI_REFERENCE = \"iri-reference\" # noqa: PIE796\n UUID = \"uuid\"\n REGEX = \"regex\"\n\n\nclass OpenAPIType(str, Enum):\n \"\"\"An OopenAPI type.\"\"\"\n\n ARRAY = \"array\"\n BOOLEAN = \"boolean\"\n INTEGER = \"integer\"\n NULL = \"null\"\n NUMBER = \"number\"\n OBJECT = \"object\"\n STRING = \"string\"\n", "path": "litestar/openapi/spec/enums.py"}]}
1,071
109
gh_patches_debug_19349
rasdani/github-patches
git_diff
fossasia__open-event-server-4248
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Microlocations : GET requests return ERROR 500 **I'm submitting a ...** (check one with "x") - [x] bug report - [ ] feature request - [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server Endpoint ``` GET v1/events/<event_id>/microlocations ``` Response ``` { "errors":[ { "detail":"Unknown error", "source":{ "pointer":"" }, "status":500, "title":"Unknown error" } ], "jsonapi":{ "version":"1.0" } } ``` Example URL ``` https://open-event-api.herokuapp.com/v1/events/173/microlocations ``` </issue> <code> [start of app/api/microlocations.py] 1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship 2 from marshmallow_jsonapi.flask import Schema, Relationship 3 from marshmallow_jsonapi import fields 4 5 from app.api.bootstrap import api 6 from app.api.helpers.utilities import dasherize 7 from app.models import db 8 from app.models.microlocation import Microlocation 9 from app.models.session import Session 10 from app.api.helpers.db import safe_query 11 from app.api.helpers.utilities import require_relationship 12 from app.api.helpers.permission_manager import has_access 13 from app.api.helpers.exceptions import ForbiddenException 14 from app.api.helpers.query import event_query 15 16 17 class MicrolocationSchema(Schema): 18 """ 19 Api schema for Microlocation Model 20 """ 21 22 class Meta: 23 """ 24 Meta class for Microlocation Api Schema 25 """ 26 type_ = 'microlocation' 27 self_view = 'v1.microlocation_detail' 28 self_view_kwargs = {'id': '<id>'} 29 self_view_many = 'v1.session_list' 30 inflect = dasherize 31 32 id = fields.Str(dump_only=True) 33 name = fields.Str(required=True) 34 latitude = fields.Float(validate=lambda n: -90 <= n <= 90, allow_none=True) 35 longitude = fields.Float(validate=lambda n: -180 <= n <= 180, allow_none=True) 36 floor = fields.Integer(allow_none=True) 37 room = fields.Str(allow_none=True) 38 sessions = Relationship(attribute='session', 39 self_view='v1.microlocation_session', 40 self_view_kwargs={'id': '<id>'}, 41 related_view='v1.session_list', 42 related_view_kwargs={'microlocation_id': '<id>'}, 43 schema='SessionSchema', 44 type_='session') 45 event = Relationship(attribute='event', 46 self_view='v1.microlocation_event', 47 self_view_kwargs={'id': '<id>'}, 48 related_view='v1.event_detail', 49 related_view_kwargs={'microlocation_id': '<id>'}, 50 schema='EventSchema', 51 type_='event') 52 53 54 class MicrolocationListPost(ResourceList): 55 """ 56 List and create microlocations 57 """ 58 def before_post(self, args, kwargs, data): 59 require_relationship(['event'], data) 60 if not has_access('is_coorganizer', event_id=data['event']): 61 raise ForbiddenException({'source': ''}, 'Co-organizer access is required.') 62 63 methods = ['POST', ] 64 schema = MicrolocationSchema 65 data_layer = {'session': db.session, 66 'model': Microlocation} 67 68 69 class MicrolocationList(ResourceList): 70 """ 71 List Microlocations 72 """ 73 def query(self, view_kwargs): 74 query_ = self.session.query(Microlocation) 75 query_ = event_query(self, query_, view_kwargs) 76 if view_kwargs.get('session_id'): 77 session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id') 78 query_ = query_.join(Session).filter(Session.id == session.id) 79 return query_ 80 81 view_kwargs = True 82 methods = ['GET'] 83 schema = MicrolocationSchema 84 data_layer = {'session': db.session, 85 'model': Microlocation, 86 'methods': { 87 'query': query 88 }} 89 90 91 class MicrolocationDetail(ResourceDetail): 92 """ 93 Microlocation detail by id 94 """ 95 96 def before_get_object(self, view_kwargs): 97 98 if view_kwargs.get('session_id') is not None: 99 sessions = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id') 100 if sessions.event_id is not None: 101 view_kwargs['id'] = sessions.event_id 102 else: 103 view_kwargs['id'] = None 104 105 decorators = (api.has_permission('is_coorganizer', methods="PATCH,DELETE", fetch="event_id", fetch_as="event_id", 106 model=Microlocation),) 107 schema = MicrolocationSchema 108 data_layer = {'session': db.session, 109 'model': Microlocation, 110 'methods': {'before_get_object': before_get_object}} 111 112 113 class MicrolocationRelationshipRequired(ResourceRelationship): 114 """ 115 Microlocation Relationship for required entities 116 """ 117 decorators = (api.has_permission('is_coorganizer', methods="PATCH", fetch="event_id", fetch_as="event_id", 118 model=Microlocation),) 119 methods = ['GET', 'PATCH'] 120 schema = MicrolocationSchema 121 data_layer = {'session': db.session, 122 'model': Microlocation} 123 124 125 class MicrolocationRelationshipOptional(ResourceRelationship): 126 """ 127 Microlocation Relationship 128 """ 129 decorators = (api.has_permission('is_coorganizer', methods="PATCH,DELETE", fetch="event_id", fetch_as="event_id", 130 model=Microlocation),) 131 schema = MicrolocationSchema 132 data_layer = {'session': db.session, 133 'model': Microlocation} 134 [end of app/api/microlocations.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/api/microlocations.py b/app/api/microlocations.py --- a/app/api/microlocations.py +++ b/app/api/microlocations.py @@ -26,7 +26,7 @@ type_ = 'microlocation' self_view = 'v1.microlocation_detail' self_view_kwargs = {'id': '<id>'} - self_view_many = 'v1.session_list' + self_view_many = 'v1.microlocation_list_post' inflect = dasherize id = fields.Str(dump_only=True) @@ -36,6 +36,7 @@ floor = fields.Integer(allow_none=True) room = fields.Str(allow_none=True) sessions = Relationship(attribute='session', + many=True, self_view='v1.microlocation_session', self_view_kwargs={'id': '<id>'}, related_view='v1.session_list',
{"golden_diff": "diff --git a/app/api/microlocations.py b/app/api/microlocations.py\n--- a/app/api/microlocations.py\n+++ b/app/api/microlocations.py\n@@ -26,7 +26,7 @@\n type_ = 'microlocation'\n self_view = 'v1.microlocation_detail'\n self_view_kwargs = {'id': '<id>'}\n- self_view_many = 'v1.session_list'\n+ self_view_many = 'v1.microlocation_list_post'\n inflect = dasherize\n \n id = fields.Str(dump_only=True)\n@@ -36,6 +36,7 @@\n floor = fields.Integer(allow_none=True)\n room = fields.Str(allow_none=True)\n sessions = Relationship(attribute='session',\n+ many=True,\n self_view='v1.microlocation_session',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.session_list',\n", "issue": "Microlocations : GET requests return ERROR 500\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server\r\n\r\nEndpoint \r\n```\r\nGET v1/events/<event_id>/microlocations \r\n```\r\n\r\nResponse\r\n```\r\n{\r\n \"errors\":[\r\n {\r\n \"detail\":\"Unknown error\",\r\n \"source\":{\r\n \"pointer\":\"\"\r\n },\r\n \"status\":500,\r\n \"title\":\"Unknown error\"\r\n }\r\n ],\r\n \"jsonapi\":{\r\n \"version\":\"1.0\"\r\n }\r\n}\r\n```\r\n\r\nExample URL\r\n```\r\nhttps://open-event-api.herokuapp.com/v1/events/173/microlocations\r\n```\r\n\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.utilities import dasherize\nfrom app.models import db\nfrom app.models.microlocation import Microlocation\nfrom app.models.session import Session\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.query import event_query\n\n\nclass MicrolocationSchema(Schema):\n \"\"\"\n Api schema for Microlocation Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for Microlocation Api Schema\n \"\"\"\n type_ = 'microlocation'\n self_view = 'v1.microlocation_detail'\n self_view_kwargs = {'id': '<id>'}\n self_view_many = 'v1.session_list'\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n name = fields.Str(required=True)\n latitude = fields.Float(validate=lambda n: -90 <= n <= 90, allow_none=True)\n longitude = fields.Float(validate=lambda n: -180 <= n <= 180, allow_none=True)\n floor = fields.Integer(allow_none=True)\n room = fields.Str(allow_none=True)\n sessions = Relationship(attribute='session',\n self_view='v1.microlocation_session',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.session_list',\n related_view_kwargs={'microlocation_id': '<id>'},\n schema='SessionSchema',\n type_='session')\n event = Relationship(attribute='event',\n self_view='v1.microlocation_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'microlocation_id': '<id>'},\n schema='EventSchema',\n type_='event')\n\n\nclass MicrolocationListPost(ResourceList):\n \"\"\"\n List and create microlocations\n \"\"\"\n def before_post(self, args, kwargs, data):\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n methods = ['POST', ]\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation}\n\n\nclass MicrolocationList(ResourceList):\n \"\"\"\n List Microlocations\n \"\"\"\n def query(self, view_kwargs):\n query_ = self.session.query(Microlocation)\n query_ = event_query(self, query_, view_kwargs)\n if view_kwargs.get('session_id'):\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n query_ = query_.join(Session).filter(Session.id == session.id)\n return query_\n\n view_kwargs = True\n methods = ['GET']\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation,\n 'methods': {\n 'query': query\n }}\n\n\nclass MicrolocationDetail(ResourceDetail):\n \"\"\"\n Microlocation detail by id\n \"\"\"\n\n def before_get_object(self, view_kwargs):\n\n if view_kwargs.get('session_id') is not None:\n sessions = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n if sessions.event_id is not None:\n view_kwargs['id'] = sessions.event_id\n else:\n view_kwargs['id'] = None\n\n decorators = (api.has_permission('is_coorganizer', methods=\"PATCH,DELETE\", fetch=\"event_id\", fetch_as=\"event_id\",\n model=Microlocation),)\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation,\n 'methods': {'before_get_object': before_get_object}}\n\n\nclass MicrolocationRelationshipRequired(ResourceRelationship):\n \"\"\"\n Microlocation Relationship for required entities\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', methods=\"PATCH\", fetch=\"event_id\", fetch_as=\"event_id\",\n model=Microlocation),)\n methods = ['GET', 'PATCH']\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation}\n\n\nclass MicrolocationRelationshipOptional(ResourceRelationship):\n \"\"\"\n Microlocation Relationship\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', methods=\"PATCH,DELETE\", fetch=\"event_id\", fetch_as=\"event_id\",\n model=Microlocation),)\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation}\n", "path": "app/api/microlocations.py"}]}
2,046
199
gh_patches_debug_8814
rasdani/github-patches
git_diff
CTPUG__wafer-243
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> tickets should be decoded on python 3 As seen from the recent quicket hook posts TypeError at /tickets/quicket_hook/ the JSON object must be str, not 'bytes' </issue> <code> [start of wafer/tickets/views.py] 1 import json 2 import logging 3 4 from django.conf import settings 5 from django.contrib.auth import get_user_model 6 from django.core.exceptions import PermissionDenied, ValidationError 7 from django.core.urlresolvers import reverse 8 from django.http import HttpResponse, Http404 9 from django.views.decorators.csrf import csrf_exempt 10 from django.views.decorators.http import require_POST 11 from django.views.generic.edit import FormView 12 13 from wafer.tickets.models import Ticket, TicketType 14 from wafer.tickets.forms import TicketForm 15 16 log = logging.getLogger(__name__) 17 18 19 class ClaimView(FormView): 20 template_name = 'wafer.tickets/claim.html' 21 form_class = TicketForm 22 23 def get_context_data(self, **kwargs): 24 context = super(ClaimView, self).get_context_data(**kwargs) 25 context['can_claim'] = self.can_claim() 26 return context 27 28 def can_claim(self): 29 if settings.WAFER_REGISTRATION_MODE != 'ticket': 30 raise Http404('Ticket-based registration is not in use') 31 if not settings.WAFER_REGISTRATION_OPEN: 32 return False 33 return not self.request.user.userprofile.is_registered() 34 35 def form_valid(self, form): 36 if not self.can_claim(): 37 raise ValidationError('User may not claim a ticket') 38 ticket = Ticket.objects.get(barcode=form.cleaned_data['barcode']) 39 ticket.user = self.request.user 40 ticket.save() 41 return super(ClaimView, self).form_valid(form) 42 43 def get_success_url(self): 44 return reverse( 45 'wafer_user_profile', args=(self.request.user.username,)) 46 47 48 @csrf_exempt 49 @require_POST 50 def quicket_hook(request): 51 ''' 52 Quicket.co.za can POST something like this when tickets are bought: 53 { 54 "reference": "REF00123456", 55 "event_id": 123, 56 "event_name": "My Big Event", 57 "amount": 0.00, 58 "email": "[email protected]", 59 "action": "checkout_started", 60 // Options are "checkout_started","checkout_cancelled","eft_pending", 61 // "checkout_completed" 62 "tickets": [ 63 { 64 "id": 122, 65 "attendee_name": "", 66 "attendee_email": "", 67 "ticket_type": "Free Ticket", 68 "price": 0.00, 69 "barcode": 12345, 70 }, 71 ... 72 ], 73 } 74 ''' 75 if request.GET.get('secret') != settings.WAFER_TICKETS_SECRET: 76 raise PermissionDenied('Incorrect secret') 77 78 payload = json.load(request) 79 for ticket in payload['tickets']: 80 import_ticket(ticket['barcode'], ticket['ticket_type'], 81 ticket['attendee_email']) 82 83 return HttpResponse("Noted\n", content_type='text/plain') 84 85 86 def import_ticket(ticket_barcode, ticket_type, email): 87 if Ticket.objects.filter(barcode=ticket_barcode).exists(): 88 log.debug('Ticket already registered: %s', ticket_barcode) 89 return 90 91 # truncate long ticket type names to length allowed by database 92 ticket_type = ticket_type[:TicketType.MAX_NAME_LENGTH] 93 type_, created = TicketType.objects.get_or_create(name=ticket_type) 94 95 UserModel = get_user_model() 96 97 try: 98 user = UserModel.objects.get(email=email, ticket=None) 99 except UserModel.DoesNotExist: 100 user = None 101 except UserModel.MultipleObjectsReturned: 102 # We're can't uniquely identify the user to associate this ticket 103 # with, so leave it for them to figure out via the 'claim ticket' 104 # interface 105 user = None 106 107 ticket = Ticket.objects.create( 108 barcode=ticket_barcode, 109 email=email, 110 type=type_, 111 user=user, 112 ) 113 ticket.save() 114 115 if user: 116 log.info('Ticket registered: %s and linked to user', ticket) 117 else: 118 log.info('Ticket registered: %s. Unclaimed', ticket) 119 [end of wafer/tickets/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wafer/tickets/views.py b/wafer/tickets/views.py --- a/wafer/tickets/views.py +++ b/wafer/tickets/views.py @@ -75,7 +75,8 @@ if request.GET.get('secret') != settings.WAFER_TICKETS_SECRET: raise PermissionDenied('Incorrect secret') - payload = json.load(request) + # This is required for python 3, and in theory fine on python 2 + payload = json.loads(request.body.decode('utf8')) for ticket in payload['tickets']: import_ticket(ticket['barcode'], ticket['ticket_type'], ticket['attendee_email'])
{"golden_diff": "diff --git a/wafer/tickets/views.py b/wafer/tickets/views.py\n--- a/wafer/tickets/views.py\n+++ b/wafer/tickets/views.py\n@@ -75,7 +75,8 @@\n if request.GET.get('secret') != settings.WAFER_TICKETS_SECRET:\n raise PermissionDenied('Incorrect secret')\n \n- payload = json.load(request)\n+ # This is required for python 3, and in theory fine on python 2\n+ payload = json.loads(request.body.decode('utf8'))\n for ticket in payload['tickets']:\n import_ticket(ticket['barcode'], ticket['ticket_type'],\n ticket['attendee_email'])\n", "issue": "tickets should be decoded on python 3\nAs seen from the recent quicket hook posts\n\nTypeError at /tickets/quicket_hook/\nthe JSON object must be str, not 'bytes'\n\n", "before_files": [{"content": "import json\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import PermissionDenied, ValidationError\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, Http404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic.edit import FormView\n\nfrom wafer.tickets.models import Ticket, TicketType\nfrom wafer.tickets.forms import TicketForm\n\nlog = logging.getLogger(__name__)\n\n\nclass ClaimView(FormView):\n template_name = 'wafer.tickets/claim.html'\n form_class = TicketForm\n\n def get_context_data(self, **kwargs):\n context = super(ClaimView, self).get_context_data(**kwargs)\n context['can_claim'] = self.can_claim()\n return context\n\n def can_claim(self):\n if settings.WAFER_REGISTRATION_MODE != 'ticket':\n raise Http404('Ticket-based registration is not in use')\n if not settings.WAFER_REGISTRATION_OPEN:\n return False\n return not self.request.user.userprofile.is_registered()\n\n def form_valid(self, form):\n if not self.can_claim():\n raise ValidationError('User may not claim a ticket')\n ticket = Ticket.objects.get(barcode=form.cleaned_data['barcode'])\n ticket.user = self.request.user\n ticket.save()\n return super(ClaimView, self).form_valid(form)\n\n def get_success_url(self):\n return reverse(\n 'wafer_user_profile', args=(self.request.user.username,))\n\n\n@csrf_exempt\n@require_POST\ndef quicket_hook(request):\n '''\n Quicket.co.za can POST something like this when tickets are bought:\n {\n \"reference\": \"REF00123456\",\n \"event_id\": 123,\n \"event_name\": \"My Big Event\",\n \"amount\": 0.00,\n \"email\": \"[email protected]\",\n \"action\": \"checkout_started\",\n // Options are \"checkout_started\",\"checkout_cancelled\",\"eft_pending\",\n // \"checkout_completed\"\n \"tickets\": [\n {\n \"id\": 122,\n \"attendee_name\": \"\",\n \"attendee_email\": \"\",\n \"ticket_type\": \"Free Ticket\",\n \"price\": 0.00,\n \"barcode\": 12345,\n },\n ...\n ],\n }\n '''\n if request.GET.get('secret') != settings.WAFER_TICKETS_SECRET:\n raise PermissionDenied('Incorrect secret')\n\n payload = json.load(request)\n for ticket in payload['tickets']:\n import_ticket(ticket['barcode'], ticket['ticket_type'],\n ticket['attendee_email'])\n\n return HttpResponse(\"Noted\\n\", content_type='text/plain')\n\n\ndef import_ticket(ticket_barcode, ticket_type, email):\n if Ticket.objects.filter(barcode=ticket_barcode).exists():\n log.debug('Ticket already registered: %s', ticket_barcode)\n return\n\n # truncate long ticket type names to length allowed by database\n ticket_type = ticket_type[:TicketType.MAX_NAME_LENGTH]\n type_, created = TicketType.objects.get_or_create(name=ticket_type)\n\n UserModel = get_user_model()\n\n try:\n user = UserModel.objects.get(email=email, ticket=None)\n except UserModel.DoesNotExist:\n user = None\n except UserModel.MultipleObjectsReturned:\n # We're can't uniquely identify the user to associate this ticket\n # with, so leave it for them to figure out via the 'claim ticket'\n # interface\n user = None\n\n ticket = Ticket.objects.create(\n barcode=ticket_barcode,\n email=email,\n type=type_,\n user=user,\n )\n ticket.save()\n\n if user:\n log.info('Ticket registered: %s and linked to user', ticket)\n else:\n log.info('Ticket registered: %s. Unclaimed', ticket)\n", "path": "wafer/tickets/views.py"}]}
1,668
147
gh_patches_debug_3467
rasdani/github-patches
git_diff
getmoto__moto-1739
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [SES] Does not properly verify mailbox with display name https://tools.ietf.org/html/rfc2822.html#section-3.4 defines two forms of valid mailbox: * `[email protected]` * `"Foo Bar" <[email protected]>` SES supports both of these forms. Per https://github.com/spulec/moto/blob/master/moto/ses/models.py#L55, only the first form is supported by moto. </issue> <code> [start of moto/ses/models.py] 1 from __future__ import unicode_literals 2 3 import email 4 from email.utils import parseaddr 5 6 from moto.core import BaseBackend, BaseModel 7 from .exceptions import MessageRejectedError 8 from .utils import get_random_message_id 9 10 11 RECIPIENT_LIMIT = 50 12 13 14 class Message(BaseModel): 15 16 def __init__(self, message_id, source, subject, body, destinations): 17 self.id = message_id 18 self.source = source 19 self.subject = subject 20 self.body = body 21 self.destinations = destinations 22 23 24 class RawMessage(BaseModel): 25 26 def __init__(self, message_id, source, destinations, raw_data): 27 self.id = message_id 28 self.source = source 29 self.destinations = destinations 30 self.raw_data = raw_data 31 32 33 class SESQuota(BaseModel): 34 35 def __init__(self, sent): 36 self.sent = sent 37 38 @property 39 def sent_past_24(self): 40 return self.sent 41 42 43 class SESBackend(BaseBackend): 44 45 def __init__(self): 46 self.addresses = [] 47 self.email_addresses = [] 48 self.domains = [] 49 self.sent_messages = [] 50 self.sent_message_count = 0 51 52 def _is_verified_address(self, address): 53 if address in self.addresses: 54 return True 55 user, host = address.split('@', 1) 56 return host in self.domains 57 58 def verify_email_identity(self, address): 59 self.addresses.append(address) 60 61 def verify_email_address(self, address): 62 self.email_addresses.append(address) 63 64 def verify_domain(self, domain): 65 self.domains.append(domain) 66 67 def list_identities(self): 68 return self.domains + self.addresses 69 70 def list_verified_email_addresses(self): 71 return self.email_addresses 72 73 def delete_identity(self, identity): 74 if '@' in identity: 75 self.addresses.remove(identity) 76 else: 77 self.domains.remove(identity) 78 79 def send_email(self, source, subject, body, destinations): 80 recipient_count = sum(map(len, destinations.values())) 81 if recipient_count > RECIPIENT_LIMIT: 82 raise MessageRejectedError('Too many recipients.') 83 if not self._is_verified_address(source): 84 raise MessageRejectedError( 85 "Email address not verified %s" % source 86 ) 87 88 message_id = get_random_message_id() 89 message = Message(message_id, source, subject, body, destinations) 90 self.sent_messages.append(message) 91 self.sent_message_count += recipient_count 92 return message 93 94 def send_raw_email(self, source, destinations, raw_data): 95 if source is not None: 96 _, source_email_address = parseaddr(source) 97 if source_email_address not in self.addresses: 98 raise MessageRejectedError( 99 "Did not have authority to send from email %s" % source_email_address 100 ) 101 102 recipient_count = len(destinations) 103 message = email.message_from_string(raw_data) 104 if source is None: 105 if message['from'] is None: 106 raise MessageRejectedError( 107 "Source not specified" 108 ) 109 110 _, source_email_address = parseaddr(message['from']) 111 if source_email_address not in self.addresses: 112 raise MessageRejectedError( 113 "Did not have authority to send from email %s" % source_email_address 114 ) 115 116 for header in 'TO', 'CC', 'BCC': 117 recipient_count += sum( 118 d.strip() and 1 or 0 119 for d in message.get(header, '').split(',') 120 ) 121 if recipient_count > RECIPIENT_LIMIT: 122 raise MessageRejectedError('Too many recipients.') 123 124 self.sent_message_count += recipient_count 125 message_id = get_random_message_id() 126 message = RawMessage(message_id, source, destinations, raw_data) 127 self.sent_messages.append(message) 128 return message 129 130 def get_send_quota(self): 131 return SESQuota(self.sent_message_count) 132 133 134 ses_backend = SESBackend() 135 [end of moto/ses/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/moto/ses/models.py b/moto/ses/models.py --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -49,7 +49,8 @@ self.sent_messages = [] self.sent_message_count = 0 - def _is_verified_address(self, address): + def _is_verified_address(self, source): + _, address = parseaddr(source) if address in self.addresses: return True user, host = address.split('@', 1)
{"golden_diff": "diff --git a/moto/ses/models.py b/moto/ses/models.py\n--- a/moto/ses/models.py\n+++ b/moto/ses/models.py\n@@ -49,7 +49,8 @@\n self.sent_messages = []\n self.sent_message_count = 0\n \n- def _is_verified_address(self, address):\n+ def _is_verified_address(self, source):\n+ _, address = parseaddr(source)\n if address in self.addresses:\n return True\n user, host = address.split('@', 1)\n", "issue": "[SES] Does not properly verify mailbox with display name\nhttps://tools.ietf.org/html/rfc2822.html#section-3.4 defines two forms of valid mailbox:\r\n\r\n* `[email protected]`\r\n* `\"Foo Bar\" <[email protected]>`\r\n\r\nSES supports both of these forms. Per https://github.com/spulec/moto/blob/master/moto/ses/models.py#L55, only the first form is supported by moto.\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport email\nfrom email.utils import parseaddr\n\nfrom moto.core import BaseBackend, BaseModel\nfrom .exceptions import MessageRejectedError\nfrom .utils import get_random_message_id\n\n\nRECIPIENT_LIMIT = 50\n\n\nclass Message(BaseModel):\n\n def __init__(self, message_id, source, subject, body, destinations):\n self.id = message_id\n self.source = source\n self.subject = subject\n self.body = body\n self.destinations = destinations\n\n\nclass RawMessage(BaseModel):\n\n def __init__(self, message_id, source, destinations, raw_data):\n self.id = message_id\n self.source = source\n self.destinations = destinations\n self.raw_data = raw_data\n\n\nclass SESQuota(BaseModel):\n\n def __init__(self, sent):\n self.sent = sent\n\n @property\n def sent_past_24(self):\n return self.sent\n\n\nclass SESBackend(BaseBackend):\n\n def __init__(self):\n self.addresses = []\n self.email_addresses = []\n self.domains = []\n self.sent_messages = []\n self.sent_message_count = 0\n\n def _is_verified_address(self, address):\n if address in self.addresses:\n return True\n user, host = address.split('@', 1)\n return host in self.domains\n\n def verify_email_identity(self, address):\n self.addresses.append(address)\n\n def verify_email_address(self, address):\n self.email_addresses.append(address)\n\n def verify_domain(self, domain):\n self.domains.append(domain)\n\n def list_identities(self):\n return self.domains + self.addresses\n\n def list_verified_email_addresses(self):\n return self.email_addresses\n\n def delete_identity(self, identity):\n if '@' in identity:\n self.addresses.remove(identity)\n else:\n self.domains.remove(identity)\n\n def send_email(self, source, subject, body, destinations):\n recipient_count = sum(map(len, destinations.values()))\n if recipient_count > RECIPIENT_LIMIT:\n raise MessageRejectedError('Too many recipients.')\n if not self._is_verified_address(source):\n raise MessageRejectedError(\n \"Email address not verified %s\" % source\n )\n\n message_id = get_random_message_id()\n message = Message(message_id, source, subject, body, destinations)\n self.sent_messages.append(message)\n self.sent_message_count += recipient_count\n return message\n\n def send_raw_email(self, source, destinations, raw_data):\n if source is not None:\n _, source_email_address = parseaddr(source)\n if source_email_address not in self.addresses:\n raise MessageRejectedError(\n \"Did not have authority to send from email %s\" % source_email_address\n )\n\n recipient_count = len(destinations)\n message = email.message_from_string(raw_data)\n if source is None:\n if message['from'] is None:\n raise MessageRejectedError(\n \"Source not specified\"\n )\n\n _, source_email_address = parseaddr(message['from'])\n if source_email_address not in self.addresses:\n raise MessageRejectedError(\n \"Did not have authority to send from email %s\" % source_email_address\n )\n\n for header in 'TO', 'CC', 'BCC':\n recipient_count += sum(\n d.strip() and 1 or 0\n for d in message.get(header, '').split(',')\n )\n if recipient_count > RECIPIENT_LIMIT:\n raise MessageRejectedError('Too many recipients.')\n\n self.sent_message_count += recipient_count\n message_id = get_random_message_id()\n message = RawMessage(message_id, source, destinations, raw_data)\n self.sent_messages.append(message)\n return message\n\n def get_send_quota(self):\n return SESQuota(self.sent_message_count)\n\n\nses_backend = SESBackend()\n", "path": "moto/ses/models.py"}]}
1,775
119
gh_patches_debug_1786
rasdani/github-patches
git_diff
mozilla__telemetry-analysis-service-413
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ImportError: No module named 'atmo.clusters.jobs' ``` app@a898b116953a:~$ ./manage.py update_clusters Traceback (most recent call last): File "./manage.py", line 11, in <module> execute_from_command_line(sys.argv) File "/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py", line 353, in execute_from_command_line utility.execute() File "/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py", line 345, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py", line 195, in fetch_command klass = load_command_class(app_name, subcommand) File "/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py", line 39, in load_command_class module = import_module('%s.management.commands.%s' % (app_name, name)) File "/usr/local/lib/python3.5/importlib/__init__.py", line 126, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 986, in _gcd_import File "<frozen importlib._bootstrap>", line 969, in _find_and_load File "<frozen importlib._bootstrap>", line 958, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 673, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 673, in exec_module File "<frozen importlib._bootstrap>", line 222, in _call_with_frames_removed File "/app/atmo/clusters/management/commands/update_clusters.py", line 6, in <module> from ...jobs import update_clusters ImportError: No module named 'atmo.clusters.jobs' ``` </issue> <code> [start of atmo/clusters/management/commands/update_clusters.py] 1 # This Source Code Form is subject to the terms of the Mozilla Public 2 # License, v. 2.0. If a copy of the MPL was not distributed with this 3 # file, you can obtain one at http://mozilla.org/MPL/2.0/. 4 from django.core.management.base import BaseCommand 5 6 from ...jobs import update_clusters 7 8 9 class Command(BaseCommand): 10 help = 'Go through active clusters and update their status' 11 12 def handle(self, *args, **options): 13 self.stdout.write('Updating cluster info...', ending='') 14 update_clusters() 15 self.stdout.write('done.') 16 [end of atmo/clusters/management/commands/update_clusters.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/atmo/clusters/management/commands/update_clusters.py b/atmo/clusters/management/commands/update_clusters.py --- a/atmo/clusters/management/commands/update_clusters.py +++ b/atmo/clusters/management/commands/update_clusters.py @@ -3,7 +3,7 @@ # file, you can obtain one at http://mozilla.org/MPL/2.0/. from django.core.management.base import BaseCommand -from ...jobs import update_clusters +from ...tasks import update_clusters class Command(BaseCommand):
{"golden_diff": "diff --git a/atmo/clusters/management/commands/update_clusters.py b/atmo/clusters/management/commands/update_clusters.py\n--- a/atmo/clusters/management/commands/update_clusters.py\n+++ b/atmo/clusters/management/commands/update_clusters.py\n@@ -3,7 +3,7 @@\n # file, you can obtain one at http://mozilla.org/MPL/2.0/.\n from django.core.management.base import BaseCommand\n \n-from ...jobs import update_clusters\n+from ...tasks import update_clusters\n \n \n class Command(BaseCommand):\n", "issue": "ImportError: No module named 'atmo.clusters.jobs'\n```\r\napp@a898b116953a:~$ ./manage.py update_clusters\r\nTraceback (most recent call last):\r\n File \"./manage.py\", line 11, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py\", line 353, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py\", line 345, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py\", line 195, in fetch_command\r\n klass = load_command_class(app_name, subcommand)\r\n File \"/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py\", line 39, in load_command_class\r\n module = import_module('%s.management.commands.%s' % (app_name, name))\r\n File \"/usr/local/lib/python3.5/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 986, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 969, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 958, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 673, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 673, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 222, in _call_with_frames_removed\r\n File \"/app/atmo/clusters/management/commands/update_clusters.py\", line 6, in <module>\r\n from ...jobs import update_clusters\r\nImportError: No module named 'atmo.clusters.jobs'\r\n```\n", "before_files": [{"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom django.core.management.base import BaseCommand\n\nfrom ...jobs import update_clusters\n\n\nclass Command(BaseCommand):\n help = 'Go through active clusters and update their status'\n\n def handle(self, *args, **options):\n self.stdout.write('Updating cluster info...', ending='')\n update_clusters()\n self.stdout.write('done.')\n", "path": "atmo/clusters/management/commands/update_clusters.py"}]}
1,163
115
gh_patches_debug_39196
rasdani/github-patches
git_diff
liqd__a4-meinberlin-3705
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> testing 4295: 402 error for poll export **URL:** https://meinberlin-dev.liqd.net/dashboard/modules/umfrage-24/poll/export/ **user:** initiator, moderator, group member **expected behaviour:** download export **behaviour:** 403 error **important screensize:** **device & browser:** big sur, firefox **Comment/Question:** Screenshot? </issue> <code> [start of meinberlin/apps/polls/exports.py] 1 from django.utils.translation import ugettext as _ 2 from rules.contrib.views import PermissionRequiredMixin 3 4 from adhocracy4.comments.models import Comment 5 from adhocracy4.exports import mixins 6 from adhocracy4.exports import views as export_views 7 from adhocracy4.polls import models as poll_models 8 from meinberlin.apps.users.models import User 9 10 11 class PollCommentExportView( 12 PermissionRequiredMixin, 13 mixins.ExportModelFieldsMixin, 14 mixins.UserGeneratedContentExportMixin, 15 mixins.ItemExportWithLinkMixin, 16 mixins.CommentExportWithRepliesToMixin, 17 export_views.BaseItemExportView 18 ): 19 20 model = Comment 21 22 fields = ['id', 'comment', 'created'] 23 permission_required = 'a4projects.change_project' 24 25 def get_permission_object(self): 26 return self.module.project 27 28 def get_queryset(self): 29 comments = ( 30 Comment.objects.filter(poll__module=self.module) | 31 Comment.objects.filter(parent_comment__poll__module=self.module) 32 ) 33 return comments 34 35 def get_virtual_fields(self, virtual): 36 virtual.setdefault('id', _('ID')) 37 virtual.setdefault('comment', _('Comment')) 38 virtual.setdefault('created', _('Created')) 39 return super().get_virtual_fields(virtual) 40 41 @property 42 def raise_exception(self): 43 return self.request.user.is_authenticated 44 45 46 class PollExportView( 47 PermissionRequiredMixin, 48 export_views.BaseItemExportView 49 ): 50 51 permission_required = 'a4projects.change_project' 52 53 def get_queryset(self): 54 creators_vote = poll_models.Vote.objects.filter( 55 choice__question__poll=self.poll).values_list('creator', flat=True) 56 creators_answer = poll_models.Answer.objects.filter( 57 question__poll=self.poll).values_list('creator', flat=True) 58 creator_ids = list(set(creators_vote).union(set(creators_answer))) 59 return User.objects.filter(pk__in=creator_ids) 60 61 @property 62 def poll(self): 63 return poll_models.Poll.objects.get(module=self.module) 64 65 @property 66 def single_choice_questions(self): 67 return self.poll.questions.filter( 68 multiple_choice=False, 69 is_open=False).order_by('id') 70 71 @property 72 def multiple_choice_questions(self): 73 return self.poll.questions.filter(multiple_choice=True).order_by('id') 74 75 @property 76 def open_questions(self): 77 return self.poll.questions.filter(is_open=True).order_by('id') 78 79 def get_virtual_fields(self, virtual): 80 virtual = super().get_virtual_fields(virtual) 81 virtual = self.get_virtual_fields_choice_questions( 82 virtual, self.single_choice_questions) 83 virtual = self.get_virtual_fields_choice_questions( 84 virtual, self.multiple_choice_questions) 85 virtual = self.get_virtual_fields_open_questions( 86 virtual, self.open_questions) 87 88 return virtual 89 90 def get_virtual_fields_choice_questions(self, virtual, choice_questions): 91 for question in choice_questions.all(): 92 for choice in question.choices.all(): 93 identifier = 'Q' + str(question.pk) + '_A' + str(choice.pk) 94 virtual[(choice, False)] = identifier 95 if choice.is_other_choice: 96 identifier_answer = identifier + '_text' 97 virtual[(choice, True)] = identifier_answer 98 99 return virtual 100 101 def get_virtual_fields_open_questions(self, virtual, open_questions): 102 for question in open_questions.all(): 103 identifier = 'Q' + str(question.pk) 104 virtual[(question, False)] = identifier 105 identifier_answer = identifier + '_text' 106 virtual[(question, True)] = identifier_answer 107 108 return virtual 109 110 def get_field_data(self, user, field): 111 field_object, is_text_field = field 112 113 if type(field_object) == poll_models.Choice: 114 votes_qs = poll_models.Vote.objects.filter( 115 choice=field_object, 116 creator=user) 117 if not is_text_field: 118 value = int(votes_qs.exists()) 119 else: 120 vote = votes_qs.first() 121 if vote: 122 value = poll_models.OtherVote.objects.get(vote=vote).answer 123 else: 124 value = '' 125 else: # field_object is question 126 answers_qs = poll_models.Answer.objects.filter( 127 question=field_object, 128 creator=user) 129 if not is_text_field: 130 value = int(answers_qs.exists()) 131 else: 132 answer = answers_qs.first() 133 if answer: 134 value = answer.answer 135 else: 136 value = '' 137 138 return value 139 [end of meinberlin/apps/polls/exports.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/polls/exports.py b/meinberlin/apps/polls/exports.py --- a/meinberlin/apps/polls/exports.py +++ b/meinberlin/apps/polls/exports.py @@ -50,6 +50,9 @@ permission_required = 'a4projects.change_project' + def get_permission_object(self): + return self.module.project + def get_queryset(self): creators_vote = poll_models.Vote.objects.filter( choice__question__poll=self.poll).values_list('creator', flat=True) @@ -63,47 +66,37 @@ return poll_models.Poll.objects.get(module=self.module) @property - def single_choice_questions(self): - return self.poll.questions.filter( - multiple_choice=False, - is_open=False).order_by('id') - - @property - def multiple_choice_questions(self): - return self.poll.questions.filter(multiple_choice=True).order_by('id') - - @property - def open_questions(self): - return self.poll.questions.filter(is_open=True).order_by('id') + def questions(self): + return self.poll.questions.all() def get_virtual_fields(self, virtual): virtual = super().get_virtual_fields(virtual) - virtual = self.get_virtual_fields_choice_questions( - virtual, self.single_choice_questions) - virtual = self.get_virtual_fields_choice_questions( - virtual, self.multiple_choice_questions) - virtual = self.get_virtual_fields_open_questions( - virtual, self.open_questions) + + for question in self.questions: + if question.is_open: + virtual = \ + self.get_virtual_field_open_question(virtual, question) + else: + virtual = \ + self.get_virtual_field_choice_question(virtual, question) return virtual - def get_virtual_fields_choice_questions(self, virtual, choice_questions): - for question in choice_questions.all(): - for choice in question.choices.all(): - identifier = 'Q' + str(question.pk) + '_A' + str(choice.pk) - virtual[(choice, False)] = identifier - if choice.is_other_choice: - identifier_answer = identifier + '_text' - virtual[(choice, True)] = identifier_answer + def get_virtual_field_choice_question(self, virtual, choice_question): + for choice in choice_question.choices.all(): + identifier = 'Q' + str(choice_question.pk) + '_A' + str(choice.pk) + virtual[(choice, False)] = identifier + if choice.is_other_choice: + identifier_answer = identifier + '_text' + virtual[(choice, True)] = identifier_answer return virtual - def get_virtual_fields_open_questions(self, virtual, open_questions): - for question in open_questions.all(): - identifier = 'Q' + str(question.pk) - virtual[(question, False)] = identifier - identifier_answer = identifier + '_text' - virtual[(question, True)] = identifier_answer + def get_virtual_field_open_question(self, virtual, open_question): + identifier = 'Q' + str(open_question.pk) + virtual[(open_question, False)] = identifier + identifier_answer = identifier + '_text' + virtual[(open_question, True)] = identifier_answer return virtual
{"golden_diff": "diff --git a/meinberlin/apps/polls/exports.py b/meinberlin/apps/polls/exports.py\n--- a/meinberlin/apps/polls/exports.py\n+++ b/meinberlin/apps/polls/exports.py\n@@ -50,6 +50,9 @@\n \n permission_required = 'a4projects.change_project'\n \n+ def get_permission_object(self):\n+ return self.module.project\n+\n def get_queryset(self):\n creators_vote = poll_models.Vote.objects.filter(\n choice__question__poll=self.poll).values_list('creator', flat=True)\n@@ -63,47 +66,37 @@\n return poll_models.Poll.objects.get(module=self.module)\n \n @property\n- def single_choice_questions(self):\n- return self.poll.questions.filter(\n- multiple_choice=False,\n- is_open=False).order_by('id')\n-\n- @property\n- def multiple_choice_questions(self):\n- return self.poll.questions.filter(multiple_choice=True).order_by('id')\n-\n- @property\n- def open_questions(self):\n- return self.poll.questions.filter(is_open=True).order_by('id')\n+ def questions(self):\n+ return self.poll.questions.all()\n \n def get_virtual_fields(self, virtual):\n virtual = super().get_virtual_fields(virtual)\n- virtual = self.get_virtual_fields_choice_questions(\n- virtual, self.single_choice_questions)\n- virtual = self.get_virtual_fields_choice_questions(\n- virtual, self.multiple_choice_questions)\n- virtual = self.get_virtual_fields_open_questions(\n- virtual, self.open_questions)\n+\n+ for question in self.questions:\n+ if question.is_open:\n+ virtual = \\\n+ self.get_virtual_field_open_question(virtual, question)\n+ else:\n+ virtual = \\\n+ self.get_virtual_field_choice_question(virtual, question)\n \n return virtual\n \n- def get_virtual_fields_choice_questions(self, virtual, choice_questions):\n- for question in choice_questions.all():\n- for choice in question.choices.all():\n- identifier = 'Q' + str(question.pk) + '_A' + str(choice.pk)\n- virtual[(choice, False)] = identifier\n- if choice.is_other_choice:\n- identifier_answer = identifier + '_text'\n- virtual[(choice, True)] = identifier_answer\n+ def get_virtual_field_choice_question(self, virtual, choice_question):\n+ for choice in choice_question.choices.all():\n+ identifier = 'Q' + str(choice_question.pk) + '_A' + str(choice.pk)\n+ virtual[(choice, False)] = identifier\n+ if choice.is_other_choice:\n+ identifier_answer = identifier + '_text'\n+ virtual[(choice, True)] = identifier_answer\n \n return virtual\n \n- def get_virtual_fields_open_questions(self, virtual, open_questions):\n- for question in open_questions.all():\n- identifier = 'Q' + str(question.pk)\n- virtual[(question, False)] = identifier\n- identifier_answer = identifier + '_text'\n- virtual[(question, True)] = identifier_answer\n+ def get_virtual_field_open_question(self, virtual, open_question):\n+ identifier = 'Q' + str(open_question.pk)\n+ virtual[(open_question, False)] = identifier\n+ identifier_answer = identifier + '_text'\n+ virtual[(open_question, True)] = identifier_answer\n \n return virtual\n", "issue": "testing 4295: 402 error for poll export\n**URL:** https://meinberlin-dev.liqd.net/dashboard/modules/umfrage-24/poll/export/\r\n**user:** initiator, moderator, group member\r\n**expected behaviour:** download export\r\n**behaviour:** 403 error\r\n**important screensize:**\r\n**device & browser:** big sur, firefox\r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n\n", "before_files": [{"content": "from django.utils.translation import ugettext as _\nfrom rules.contrib.views import PermissionRequiredMixin\n\nfrom adhocracy4.comments.models import Comment\nfrom adhocracy4.exports import mixins\nfrom adhocracy4.exports import views as export_views\nfrom adhocracy4.polls import models as poll_models\nfrom meinberlin.apps.users.models import User\n\n\nclass PollCommentExportView(\n PermissionRequiredMixin,\n mixins.ExportModelFieldsMixin,\n mixins.UserGeneratedContentExportMixin,\n mixins.ItemExportWithLinkMixin,\n mixins.CommentExportWithRepliesToMixin,\n export_views.BaseItemExportView\n):\n\n model = Comment\n\n fields = ['id', 'comment', 'created']\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.module.project\n\n def get_queryset(self):\n comments = (\n Comment.objects.filter(poll__module=self.module) |\n Comment.objects.filter(parent_comment__poll__module=self.module)\n )\n return comments\n\n def get_virtual_fields(self, virtual):\n virtual.setdefault('id', _('ID'))\n virtual.setdefault('comment', _('Comment'))\n virtual.setdefault('created', _('Created'))\n return super().get_virtual_fields(virtual)\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated\n\n\nclass PollExportView(\n PermissionRequiredMixin,\n export_views.BaseItemExportView\n):\n\n permission_required = 'a4projects.change_project'\n\n def get_queryset(self):\n creators_vote = poll_models.Vote.objects.filter(\n choice__question__poll=self.poll).values_list('creator', flat=True)\n creators_answer = poll_models.Answer.objects.filter(\n question__poll=self.poll).values_list('creator', flat=True)\n creator_ids = list(set(creators_vote).union(set(creators_answer)))\n return User.objects.filter(pk__in=creator_ids)\n\n @property\n def poll(self):\n return poll_models.Poll.objects.get(module=self.module)\n\n @property\n def single_choice_questions(self):\n return self.poll.questions.filter(\n multiple_choice=False,\n is_open=False).order_by('id')\n\n @property\n def multiple_choice_questions(self):\n return self.poll.questions.filter(multiple_choice=True).order_by('id')\n\n @property\n def open_questions(self):\n return self.poll.questions.filter(is_open=True).order_by('id')\n\n def get_virtual_fields(self, virtual):\n virtual = super().get_virtual_fields(virtual)\n virtual = self.get_virtual_fields_choice_questions(\n virtual, self.single_choice_questions)\n virtual = self.get_virtual_fields_choice_questions(\n virtual, self.multiple_choice_questions)\n virtual = self.get_virtual_fields_open_questions(\n virtual, self.open_questions)\n\n return virtual\n\n def get_virtual_fields_choice_questions(self, virtual, choice_questions):\n for question in choice_questions.all():\n for choice in question.choices.all():\n identifier = 'Q' + str(question.pk) + '_A' + str(choice.pk)\n virtual[(choice, False)] = identifier\n if choice.is_other_choice:\n identifier_answer = identifier + '_text'\n virtual[(choice, True)] = identifier_answer\n\n return virtual\n\n def get_virtual_fields_open_questions(self, virtual, open_questions):\n for question in open_questions.all():\n identifier = 'Q' + str(question.pk)\n virtual[(question, False)] = identifier\n identifier_answer = identifier + '_text'\n virtual[(question, True)] = identifier_answer\n\n return virtual\n\n def get_field_data(self, user, field):\n field_object, is_text_field = field\n\n if type(field_object) == poll_models.Choice:\n votes_qs = poll_models.Vote.objects.filter(\n choice=field_object,\n creator=user)\n if not is_text_field:\n value = int(votes_qs.exists())\n else:\n vote = votes_qs.first()\n if vote:\n value = poll_models.OtherVote.objects.get(vote=vote).answer\n else:\n value = ''\n else: # field_object is question\n answers_qs = poll_models.Answer.objects.filter(\n question=field_object,\n creator=user)\n if not is_text_field:\n value = int(answers_qs.exists())\n else:\n answer = answers_qs.first()\n if answer:\n value = answer.answer\n else:\n value = ''\n\n return value\n", "path": "meinberlin/apps/polls/exports.py"}]}
1,888
731
gh_patches_debug_52919
rasdani/github-patches
git_diff
great-expectations__great_expectations-3469
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use cleaner solution for non-truncating division in python 2 Prefer `from __future__ import division` to `1.*x/y` </issue> <code> [start of great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py] 1 import logging 2 from functools import reduce 3 4 from great_expectations.execution_engine import ( 5 PandasExecutionEngine, 6 SparkDFExecutionEngine, 7 SqlAlchemyExecutionEngine, 8 ) 9 from great_expectations.expectations.metrics.import_manager import F, sa 10 from great_expectations.expectations.metrics.map_metric_provider import ( 11 MulticolumnMapMetricProvider, 12 multicolumn_condition_partial, 13 ) 14 15 logger = logging.getLogger(__name__) 16 17 18 class SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider): 19 condition_metric_name = "select_column_values.unique.within_record" 20 condition_domain_keys = ( 21 "batch_id", 22 "table", 23 "column_list", 24 "row_condition", 25 "condition_parser", 26 "ignore_row_if", 27 ) 28 29 @multicolumn_condition_partial(engine=PandasExecutionEngine) 30 def _pandas(cls, column_list, **kwargs): 31 num_columns = len(column_list.columns) 32 row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns 33 return row_wise_cond 34 35 @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine) 36 def _sqlalchemy(cls, column_list, **kwargs): 37 """ 38 The present approach relies on an inefficient query condition construction implementation, whose computational 39 cost is O(num_columns^2). However, until a more efficient implementation compatible with SQLAlchemy is 40 available, this is the only feasible mechanism under the current architecture, where map metric providers must 41 return a condition. Nevertheless, SQL query length limit is 1GB (sufficient for most practical scenarios). 42 """ 43 num_columns = len(column_list) 44 45 # An arbitrary "num_columns" value used for issuing an explanatory message as a warning. 46 if num_columns > 100: 47 logger.warning( 48 f"""Batch data with {num_columns} columns is detected. Computing the "{cls.condition_metric_name}" \ 49 metric for wide tables using SQLAlchemy leads to long WHERE clauses for the underlying database engine to process. 50 """ 51 ) 52 53 conditions = sa.or_( 54 *( 55 sa.or_( 56 column_list[idx_src] == column_list[idx_dest], 57 sa.and_( 58 column_list[idx_src] == None, column_list[idx_dest] == None 59 ), 60 ) 61 for idx_src in range(num_columns - 1) 62 for idx_dest in range(idx_src + 1, num_columns) 63 ) 64 ) 65 row_wise_cond = sa.not_(sa.or_(conditions)) 66 return row_wise_cond 67 68 @multicolumn_condition_partial(engine=SparkDFExecutionEngine) 69 def _spark(cls, column_list, **kwargs): 70 column_names = column_list.columns 71 num_columns = len(column_names) 72 73 conditions = [] 74 for idx_src in range(num_columns - 1): 75 for idx_dest in range(idx_src + 1, num_columns): 76 conditions.append( 77 F.col(column_names[idx_src]).eqNullSafe( 78 F.col(column_names[idx_dest]) 79 ) 80 ) 81 82 row_wise_cond = ~reduce(lambda a, b: a | b, conditions) 83 return row_wise_cond 84 [end of great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py --- a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py +++ b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py @@ -62,7 +62,7 @@ for idx_dest in range(idx_src + 1, num_columns) ) ) - row_wise_cond = sa.not_(sa.or_(conditions)) + row_wise_cond = sa.not_(conditions) return row_wise_cond @multicolumn_condition_partial(engine=SparkDFExecutionEngine)
{"golden_diff": "diff --git a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n--- a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n+++ b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n@@ -62,7 +62,7 @@\n for idx_dest in range(idx_src + 1, num_columns)\n )\n )\n- row_wise_cond = sa.not_(sa.or_(conditions))\n+ row_wise_cond = sa.not_(conditions)\n return row_wise_cond\n \n @multicolumn_condition_partial(engine=SparkDFExecutionEngine)\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import logging\nfrom functools import reduce\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import F, sa\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n MulticolumnMapMetricProvider,\n multicolumn_condition_partial,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider):\n condition_metric_name = \"select_column_values.unique.within_record\"\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_list\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n\n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n num_columns = len(column_list.columns)\n row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns\n return row_wise_cond\n\n @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(cls, column_list, **kwargs):\n \"\"\"\n The present approach relies on an inefficient query condition construction implementation, whose computational\n cost is O(num_columns^2). However, until a more efficient implementation compatible with SQLAlchemy is\n available, this is the only feasible mechanism under the current architecture, where map metric providers must\n return a condition. Nevertheless, SQL query length limit is 1GB (sufficient for most practical scenarios).\n \"\"\"\n num_columns = len(column_list)\n\n # An arbitrary \"num_columns\" value used for issuing an explanatory message as a warning.\n if num_columns > 100:\n logger.warning(\n f\"\"\"Batch data with {num_columns} columns is detected. Computing the \"{cls.condition_metric_name}\" \\\nmetric for wide tables using SQLAlchemy leads to long WHERE clauses for the underlying database engine to process.\n\"\"\"\n )\n\n conditions = sa.or_(\n *(\n sa.or_(\n column_list[idx_src] == column_list[idx_dest],\n sa.and_(\n column_list[idx_src] == None, column_list[idx_dest] == None\n ),\n )\n for idx_src in range(num_columns - 1)\n for idx_dest in range(idx_src + 1, num_columns)\n )\n )\n row_wise_cond = sa.not_(sa.or_(conditions))\n return row_wise_cond\n\n @multicolumn_condition_partial(engine=SparkDFExecutionEngine)\n def _spark(cls, column_list, **kwargs):\n column_names = column_list.columns\n num_columns = len(column_names)\n\n conditions = []\n for idx_src in range(num_columns - 1):\n for idx_dest in range(idx_src + 1, num_columns):\n conditions.append(\n F.col(column_names[idx_src]).eqNullSafe(\n F.col(column_names[idx_dest])\n )\n )\n\n row_wise_cond = ~reduce(lambda a, b: a | b, conditions)\n return row_wise_cond\n", "path": "great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py"}]}
1,415
180
gh_patches_debug_43188
rasdani/github-patches
git_diff
pyqtgraph__pyqtgraph-2357
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Axes zoom area does not resize in 0.12.4 ### Short description When a plot is first generated with a given size, say `width ` and `height` in pixels, the entire axes areas on the left and bottom of the plot are zoomable. When the plot size is resized such that the plot is larger (e.g. the window is grabbed along an edge or corner and expanded), only the top `height` portion of the y-axes or the left `width` portion of the x-axis retain the ability to zoom the plot using the scroll wheel. The axes area outside (i.e. the lower portion of the y-axis or the right portion of the x-axis) are not zoomable. If hovering over the plot, not on an axes, there is no issue with zooming. Reverting to 0.12.3 fixes issue. I was able to reproduce this in both custom plots and many of the pyqtgraph.example scripts. ### Tested environment(s) * PyQtGraph version: 0.12.4 * Qt Python binding: PySide2 5.15.2.1 Qt 5.15.2 * Python version: 3.7 * NumPy version: 1.21.6 * Operating system: Windows 10 Enterprise 21H2 * Installation method: pip </issue> <code> [start of pyqtgraph/graphicsItems/GraphicsWidget.py] 1 from ..Qt import QtGui, QtWidgets 2 from .GraphicsItem import GraphicsItem 3 4 __all__ = ['GraphicsWidget'] 5 6 class GraphicsWidget(GraphicsItem, QtWidgets.QGraphicsWidget): 7 8 _qtBaseClass = QtWidgets.QGraphicsWidget 9 def __init__(self, *args, **kargs): 10 """ 11 **Bases:** :class:`GraphicsItem <pyqtgraph.GraphicsItem>`, :class:`QtWidgets.QGraphicsWidget` 12 13 Extends QGraphicsWidget with several helpful methods and workarounds for PyQt bugs. 14 Most of the extra functionality is inherited from :class:`GraphicsItem <pyqtgraph.GraphicsItem>`. 15 """ 16 QtWidgets.QGraphicsWidget.__init__(self, *args, **kargs) 17 GraphicsItem.__init__(self) 18 19 # cache bouding rect and geometry 20 self._boundingRectCache = self._previousGeometry = None 21 self._painterPathCache = None 22 23 ## done by GraphicsItem init 24 #GraphicsScene.registerObject(self) ## workaround for pyqt bug in graphicsscene.items() 25 26 # Removed due to https://bugreports.qt-project.org/browse/PYSIDE-86 27 #def itemChange(self, change, value): 28 ## BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing! 29 ##ret = QtWidgets.QGraphicsWidget.itemChange(self, change, value) ## segv occurs here 30 ## The default behavior is just to return the value argument, so we'll do that 31 ## without calling the original method. 32 #ret = value 33 #if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]: 34 #self._updateView() 35 #return ret 36 37 def setFixedHeight(self, h): 38 self.setMaximumHeight(h) 39 self.setMinimumHeight(h) 40 41 def setFixedWidth(self, h): 42 self.setMaximumWidth(h) 43 self.setMinimumWidth(h) 44 45 def height(self): 46 return self.geometry().height() 47 48 def width(self): 49 return self.geometry().width() 50 51 def boundingRect(self): 52 geometry = self.geometry() 53 if geometry != self._previousGeometry: 54 self._painterPathCache = None 55 56 br = self.mapRectFromParent(geometry).normalized() 57 self._boundingRectCache = br 58 self._previousGeometry = geometry 59 else: 60 br = self._boundingRectCache 61 62 return br 63 64 def shape(self): ## No idea why this is necessary, but rotated items do not receive clicks otherwise. 65 p = self._painterPathCache 66 if p is None: 67 self._painterPathCache = p = QtGui.QPainterPath() 68 p.addRect(self.boundingRect()) 69 70 return p 71 [end of pyqtgraph/graphicsItems/GraphicsWidget.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyqtgraph/graphicsItems/GraphicsWidget.py b/pyqtgraph/graphicsItems/GraphicsWidget.py --- a/pyqtgraph/graphicsItems/GraphicsWidget.py +++ b/pyqtgraph/graphicsItems/GraphicsWidget.py @@ -3,36 +3,43 @@ __all__ = ['GraphicsWidget'] + class GraphicsWidget(GraphicsItem, QtWidgets.QGraphicsWidget): _qtBaseClass = QtWidgets.QGraphicsWidget - def __init__(self, *args, **kargs): + + def __init__(self, *args, **kwargs): """ **Bases:** :class:`GraphicsItem <pyqtgraph.GraphicsItem>`, :class:`QtWidgets.QGraphicsWidget` Extends QGraphicsWidget with several helpful methods and workarounds for PyQt bugs. Most of the extra functionality is inherited from :class:`GraphicsItem <pyqtgraph.GraphicsItem>`. """ - QtWidgets.QGraphicsWidget.__init__(self, *args, **kargs) + QtWidgets.QGraphicsWidget.__init__(self, *args, **kwargs) GraphicsItem.__init__(self) - # cache bouding rect and geometry + # cache bounding rect and geometry self._boundingRectCache = self._previousGeometry = None self._painterPathCache = None - - ## done by GraphicsItem init - #GraphicsScene.registerObject(self) ## workaround for pyqt bug in graphicsscene.items() + self.geometryChanged.connect(self._resetCachedProperties) + + # done by GraphicsItem init + # GraphicsScene.registerObject(self) # workaround for pyqt bug in GraphicsScene.items() # Removed due to https://bugreports.qt-project.org/browse/PYSIDE-86 - #def itemChange(self, change, value): - ## BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing! - ##ret = QtWidgets.QGraphicsWidget.itemChange(self, change, value) ## segv occurs here - ## The default behavior is just to return the value argument, so we'll do that - ## without calling the original method. - #ret = value - #if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]: - #self._updateView() - #return ret + # def itemChange(self, change, value): + # # BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing! + # # ret = QtWidgets.QGraphicsWidget.itemChange(self, change, value) # segv occurs here + # # The default behavior is just to return the value argument, so we'll do that + # # without calling the original method. + # ret = value + # if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]: + # self._updateView() + # return ret + + def _resetCachedProperties(self): + self._boundingRectCache = self._previousGeometry = None + self._painterPathCache = None def setFixedHeight(self, h): self.setMaximumHeight(h) @@ -41,10 +48,10 @@ def setFixedWidth(self, h): self.setMaximumWidth(h) self.setMinimumWidth(h) - + def height(self): return self.geometry().height() - + def width(self): return self.geometry().width() @@ -52,19 +59,16 @@ geometry = self.geometry() if geometry != self._previousGeometry: self._painterPathCache = None - br = self.mapRectFromParent(geometry).normalized() self._boundingRectCache = br self._previousGeometry = geometry else: br = self._boundingRectCache - return br - def shape(self): ## No idea why this is necessary, but rotated items do not receive clicks otherwise. + def shape(self): p = self._painterPathCache if p is None: self._painterPathCache = p = QtGui.QPainterPath() p.addRect(self.boundingRect()) - return p
{"golden_diff": "diff --git a/pyqtgraph/graphicsItems/GraphicsWidget.py b/pyqtgraph/graphicsItems/GraphicsWidget.py\n--- a/pyqtgraph/graphicsItems/GraphicsWidget.py\n+++ b/pyqtgraph/graphicsItems/GraphicsWidget.py\n@@ -3,36 +3,43 @@\n \n __all__ = ['GraphicsWidget']\n \n+\n class GraphicsWidget(GraphicsItem, QtWidgets.QGraphicsWidget):\n \n _qtBaseClass = QtWidgets.QGraphicsWidget\n- def __init__(self, *args, **kargs):\n+\n+ def __init__(self, *args, **kwargs):\n \"\"\"\n **Bases:** :class:`GraphicsItem <pyqtgraph.GraphicsItem>`, :class:`QtWidgets.QGraphicsWidget`\n \n Extends QGraphicsWidget with several helpful methods and workarounds for PyQt bugs. \n Most of the extra functionality is inherited from :class:`GraphicsItem <pyqtgraph.GraphicsItem>`.\n \"\"\"\n- QtWidgets.QGraphicsWidget.__init__(self, *args, **kargs)\n+ QtWidgets.QGraphicsWidget.__init__(self, *args, **kwargs)\n GraphicsItem.__init__(self)\n \n- # cache bouding rect and geometry\n+ # cache bounding rect and geometry\n self._boundingRectCache = self._previousGeometry = None\n self._painterPathCache = None\n- \n- ## done by GraphicsItem init\n- #GraphicsScene.registerObject(self) ## workaround for pyqt bug in graphicsscene.items()\n+ self.geometryChanged.connect(self._resetCachedProperties)\n+\n+ # done by GraphicsItem init\n+ # GraphicsScene.registerObject(self) # workaround for pyqt bug in GraphicsScene.items()\n \n # Removed due to https://bugreports.qt-project.org/browse/PYSIDE-86\n- #def itemChange(self, change, value):\n- ## BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing!\n- ##ret = QtWidgets.QGraphicsWidget.itemChange(self, change, value) ## segv occurs here\n- ## The default behavior is just to return the value argument, so we'll do that\n- ## without calling the original method.\n- #ret = value\n- #if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:\n- #self._updateView()\n- #return ret\n+ # def itemChange(self, change, value):\n+ # # BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing!\n+ # # ret = QtWidgets.QGraphicsWidget.itemChange(self, change, value) # segv occurs here\n+ # # The default behavior is just to return the value argument, so we'll do that\n+ # # without calling the original method.\n+ # ret = value\n+ # if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:\n+ # self._updateView()\n+ # return ret\n+\n+ def _resetCachedProperties(self):\n+ self._boundingRectCache = self._previousGeometry = None\n+ self._painterPathCache = None\n \n def setFixedHeight(self, h):\n self.setMaximumHeight(h)\n@@ -41,10 +48,10 @@\n def setFixedWidth(self, h):\n self.setMaximumWidth(h)\n self.setMinimumWidth(h)\n- \n+\n def height(self):\n return self.geometry().height()\n- \n+\n def width(self):\n return self.geometry().width()\n \n@@ -52,19 +59,16 @@\n geometry = self.geometry()\n if geometry != self._previousGeometry:\n self._painterPathCache = None\n- \n br = self.mapRectFromParent(geometry).normalized()\n self._boundingRectCache = br\n self._previousGeometry = geometry\n else:\n br = self._boundingRectCache\n-\n return br\n \n- def shape(self): ## No idea why this is necessary, but rotated items do not receive clicks otherwise.\n+ def shape(self):\n p = self._painterPathCache\n if p is None:\n self._painterPathCache = p = QtGui.QPainterPath()\n p.addRect(self.boundingRect())\n-\n return p\n", "issue": "Axes zoom area does not resize in 0.12.4\n### Short description\r\nWhen a plot is first generated with a given size, say `width ` and `height` in pixels, the entire axes areas on the left and bottom of the plot are zoomable. When the plot size is resized such that the plot is larger (e.g. the window is grabbed along an edge or corner and expanded), only the top `height` portion of the y-axes or the left `width` portion of the x-axis retain the ability to zoom the plot using the scroll wheel. The axes area outside (i.e. the lower portion of the y-axis or the right portion of the x-axis) are not zoomable. If hovering over the plot, not on an axes, there is no issue with zooming. Reverting to 0.12.3 fixes issue.\r\n\r\nI was able to reproduce this in both custom plots and many of the pyqtgraph.example scripts.\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: 0.12.4\r\n * Qt Python binding: PySide2 5.15.2.1 Qt 5.15.2\r\n * Python version: 3.7\r\n * NumPy version: 1.21.6\r\n * Operating system: Windows 10 Enterprise 21H2\r\n * Installation method: pip\r\n\n", "before_files": [{"content": "from ..Qt import QtGui, QtWidgets\nfrom .GraphicsItem import GraphicsItem\n\n__all__ = ['GraphicsWidget']\n\nclass GraphicsWidget(GraphicsItem, QtWidgets.QGraphicsWidget):\n \n _qtBaseClass = QtWidgets.QGraphicsWidget\n def __init__(self, *args, **kargs):\n \"\"\"\n **Bases:** :class:`GraphicsItem <pyqtgraph.GraphicsItem>`, :class:`QtWidgets.QGraphicsWidget`\n \n Extends QGraphicsWidget with several helpful methods and workarounds for PyQt bugs. \n Most of the extra functionality is inherited from :class:`GraphicsItem <pyqtgraph.GraphicsItem>`.\n \"\"\"\n QtWidgets.QGraphicsWidget.__init__(self, *args, **kargs)\n GraphicsItem.__init__(self)\n\n # cache bouding rect and geometry\n self._boundingRectCache = self._previousGeometry = None\n self._painterPathCache = None\n \n ## done by GraphicsItem init\n #GraphicsScene.registerObject(self) ## workaround for pyqt bug in graphicsscene.items()\n\n # Removed due to https://bugreports.qt-project.org/browse/PYSIDE-86\n #def itemChange(self, change, value):\n ## BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing!\n ##ret = QtWidgets.QGraphicsWidget.itemChange(self, change, value) ## segv occurs here\n ## The default behavior is just to return the value argument, so we'll do that\n ## without calling the original method.\n #ret = value\n #if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:\n #self._updateView()\n #return ret\n\n def setFixedHeight(self, h):\n self.setMaximumHeight(h)\n self.setMinimumHeight(h)\n\n def setFixedWidth(self, h):\n self.setMaximumWidth(h)\n self.setMinimumWidth(h)\n \n def height(self):\n return self.geometry().height()\n \n def width(self):\n return self.geometry().width()\n\n def boundingRect(self):\n geometry = self.geometry()\n if geometry != self._previousGeometry:\n self._painterPathCache = None\n \n br = self.mapRectFromParent(geometry).normalized()\n self._boundingRectCache = br\n self._previousGeometry = geometry\n else:\n br = self._boundingRectCache\n\n return br\n\n def shape(self): ## No idea why this is necessary, but rotated items do not receive clicks otherwise.\n p = self._painterPathCache\n if p is None:\n self._painterPathCache = p = QtGui.QPainterPath()\n p.addRect(self.boundingRect())\n\n return p\n", "path": "pyqtgraph/graphicsItems/GraphicsWidget.py"}]}
1,542
915
gh_patches_debug_41159
rasdani/github-patches
git_diff
azavea__raster-vision-328
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix raster stats bug If you run compute_raster_stats on 4-channel imagery (yielding stats for 4 channels), and use a `channel_order` of [0, 1, 2] in your raster_transformer, and then switch to using 3-channel imagery, it leads to an error because currently the `means` do not have the `channel_order` applied to them before being subtracted from the raster. In other words, 4 channel means is subtracted from a 3 channel raster. </issue> <code> [start of src/rastervision/builders/raster_transformer_builder.py] 1 from rastervision.core.raster_transformer import RasterTransformer 2 3 4 def build(config): 5 return RasterTransformer(config) 6 [end of src/rastervision/builders/raster_transformer_builder.py] [start of src/rastervision/core/raster_transformer.py] 1 import numpy as np 2 3 from rastervision.core.raster_stats import RasterStats 4 5 6 class RasterTransformer(object): 7 """Transforms chips according to a config.""" 8 9 def __init__(self, options): 10 """Construct a new RasterTransformer. 11 12 Args: 13 options: protos.raster_transformer_pb2.RasterTransformer 14 """ 15 self.options = options 16 self.raster_stats = None 17 if options.stats_uri: 18 self.raster_stats = RasterStats() 19 self.raster_stats.load(options.stats_uri) 20 21 def transform(self, chip): 22 """Transform a chip. 23 24 Selects a subset of the channels and transforms non-uint8 to 25 uint8 values using options.stats_uri 26 27 Args: 28 chip: [height, width, channels] numpy array 29 30 Returns: 31 [height, width, channels] uint8 numpy array where channels is equal 32 to len(self.options.channel_order) 33 """ 34 if chip.dtype != np.uint8: 35 if self.raster_stats: 36 # Subtract mean and divide by std to get zscores. 37 means = np.array(self.raster_stats.means) 38 means = means[np.newaxis, np.newaxis, :].astype(np.float) 39 stds = np.array(self.raster_stats.stds) 40 stds = stds[np.newaxis, np.newaxis, :].astype(np.float) 41 42 # Don't transform NODATA zero values. 43 nodata = chip == 0 44 45 chip = chip - means 46 chip = chip / stds 47 48 # Make zscores that fall between -3 and 3 span 0 to 255. 49 chip += 3 50 chip /= 6 51 52 chip = np.clip(chip, 0, 1) 53 chip *= 255 54 chip = chip.astype(np.uint8) 55 56 chip[nodata] = 0 57 else: 58 raise ValueError( 59 'Need to provide stats_uri for non-uint8 rasters.') 60 61 if self.options.channel_order: 62 return chip[:, :, self.options.channel_order] 63 return chip 64 [end of src/rastervision/core/raster_transformer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/rastervision/builders/raster_transformer_builder.py b/src/rastervision/builders/raster_transformer_builder.py --- a/src/rastervision/builders/raster_transformer_builder.py +++ b/src/rastervision/builders/raster_transformer_builder.py @@ -1,5 +1,12 @@ from rastervision.core.raster_transformer import RasterTransformer +from rastervision.core.raster_stats import RasterStats def build(config): - return RasterTransformer(config) + raster_stats = None + if config.stats_uri: + raster_stats = RasterStats() + raster_stats.load(config.stats_uri) + + return RasterTransformer( + channel_order=config.channel_order, raster_stats=raster_stats) diff --git a/src/rastervision/core/raster_transformer.py b/src/rastervision/core/raster_transformer.py --- a/src/rastervision/core/raster_transformer.py +++ b/src/rastervision/core/raster_transformer.py @@ -1,43 +1,50 @@ import numpy as np -from rastervision.core.raster_stats import RasterStats - class RasterTransformer(object): - """Transforms chips according to a config.""" + """Transforms raw chips to be input to a neural network.""" - def __init__(self, options): + def __init__(self, channel_order=None, raster_stats=None): """Construct a new RasterTransformer. Args: - options: protos.raster_transformer_pb2.RasterTransformer + channel_order: numpy array of length n where n is the number of + channels to use and the values are channel indices + raster_stats: (RasterStats) used to transform chip to have + desired statistics """ - self.options = options - self.raster_stats = None - if options.stats_uri: - self.raster_stats = RasterStats() - self.raster_stats.load(options.stats_uri) + self.channel_order = channel_order + self.raster_stats = raster_stats def transform(self, chip): """Transform a chip. Selects a subset of the channels and transforms non-uint8 to - uint8 values using options.stats_uri + uint8 values using raster_stats. Args: chip: [height, width, channels] numpy array Returns: [height, width, channels] uint8 numpy array where channels is equal - to len(self.options.channel_order) + to len(channel_order) """ + if self.channel_order is None: + channel_order = np.arange(chip.shape[2]) + else: + channel_order = self.channel_order + + chip = chip[:, :, channel_order] + if chip.dtype != np.uint8: if self.raster_stats: # Subtract mean and divide by std to get zscores. means = np.array(self.raster_stats.means) - means = means[np.newaxis, np.newaxis, :].astype(np.float) + means = means[np.newaxis, np.newaxis, channel_order].astype( + np.float) stds = np.array(self.raster_stats.stds) - stds = stds[np.newaxis, np.newaxis, :].astype(np.float) + stds = stds[np.newaxis, np.newaxis, channel_order].astype( + np.float) # Don't transform NODATA zero values. nodata = chip == 0 @@ -56,8 +63,6 @@ chip[nodata] = 0 else: raise ValueError( - 'Need to provide stats_uri for non-uint8 rasters.') + 'Need to provide raster_stats for non-uint8 rasters.') - if self.options.channel_order: - return chip[:, :, self.options.channel_order] return chip
{"golden_diff": "diff --git a/src/rastervision/builders/raster_transformer_builder.py b/src/rastervision/builders/raster_transformer_builder.py\n--- a/src/rastervision/builders/raster_transformer_builder.py\n+++ b/src/rastervision/builders/raster_transformer_builder.py\n@@ -1,5 +1,12 @@\n from rastervision.core.raster_transformer import RasterTransformer\n+from rastervision.core.raster_stats import RasterStats\n \n \n def build(config):\n- return RasterTransformer(config)\n+ raster_stats = None\n+ if config.stats_uri:\n+ raster_stats = RasterStats()\n+ raster_stats.load(config.stats_uri)\n+\n+ return RasterTransformer(\n+ channel_order=config.channel_order, raster_stats=raster_stats)\ndiff --git a/src/rastervision/core/raster_transformer.py b/src/rastervision/core/raster_transformer.py\n--- a/src/rastervision/core/raster_transformer.py\n+++ b/src/rastervision/core/raster_transformer.py\n@@ -1,43 +1,50 @@\n import numpy as np\n \n-from rastervision.core.raster_stats import RasterStats\n-\n \n class RasterTransformer(object):\n- \"\"\"Transforms chips according to a config.\"\"\"\n+ \"\"\"Transforms raw chips to be input to a neural network.\"\"\"\n \n- def __init__(self, options):\n+ def __init__(self, channel_order=None, raster_stats=None):\n \"\"\"Construct a new RasterTransformer.\n \n Args:\n- options: protos.raster_transformer_pb2.RasterTransformer\n+ channel_order: numpy array of length n where n is the number of\n+ channels to use and the values are channel indices\n+ raster_stats: (RasterStats) used to transform chip to have\n+ desired statistics\n \"\"\"\n- self.options = options\n- self.raster_stats = None\n- if options.stats_uri:\n- self.raster_stats = RasterStats()\n- self.raster_stats.load(options.stats_uri)\n+ self.channel_order = channel_order\n+ self.raster_stats = raster_stats\n \n def transform(self, chip):\n \"\"\"Transform a chip.\n \n Selects a subset of the channels and transforms non-uint8 to\n- uint8 values using options.stats_uri\n+ uint8 values using raster_stats.\n \n Args:\n chip: [height, width, channels] numpy array\n \n Returns:\n [height, width, channels] uint8 numpy array where channels is equal\n- to len(self.options.channel_order)\n+ to len(channel_order)\n \"\"\"\n+ if self.channel_order is None:\n+ channel_order = np.arange(chip.shape[2])\n+ else:\n+ channel_order = self.channel_order\n+\n+ chip = chip[:, :, channel_order]\n+\n if chip.dtype != np.uint8:\n if self.raster_stats:\n # Subtract mean and divide by std to get zscores.\n means = np.array(self.raster_stats.means)\n- means = means[np.newaxis, np.newaxis, :].astype(np.float)\n+ means = means[np.newaxis, np.newaxis, channel_order].astype(\n+ np.float)\n stds = np.array(self.raster_stats.stds)\n- stds = stds[np.newaxis, np.newaxis, :].astype(np.float)\n+ stds = stds[np.newaxis, np.newaxis, channel_order].astype(\n+ np.float)\n \n # Don't transform NODATA zero values.\n nodata = chip == 0\n@@ -56,8 +63,6 @@\n chip[nodata] = 0\n else:\n raise ValueError(\n- 'Need to provide stats_uri for non-uint8 rasters.')\n+ 'Need to provide raster_stats for non-uint8 rasters.')\n \n- if self.options.channel_order:\n- return chip[:, :, self.options.channel_order]\n return chip\n", "issue": "Fix raster stats bug\nIf you run compute_raster_stats on 4-channel imagery (yielding stats for 4 channels), and use a `channel_order` of [0, 1, 2] in your raster_transformer, and then switch to using 3-channel imagery, it leads to an error because currently the `means` do not have the `channel_order` applied to them before being subtracted from the raster. In other words, 4 channel means is subtracted from a 3 channel raster.\n", "before_files": [{"content": "from rastervision.core.raster_transformer import RasterTransformer\n\n\ndef build(config):\n return RasterTransformer(config)\n", "path": "src/rastervision/builders/raster_transformer_builder.py"}, {"content": "import numpy as np\n\nfrom rastervision.core.raster_stats import RasterStats\n\n\nclass RasterTransformer(object):\n \"\"\"Transforms chips according to a config.\"\"\"\n\n def __init__(self, options):\n \"\"\"Construct a new RasterTransformer.\n\n Args:\n options: protos.raster_transformer_pb2.RasterTransformer\n \"\"\"\n self.options = options\n self.raster_stats = None\n if options.stats_uri:\n self.raster_stats = RasterStats()\n self.raster_stats.load(options.stats_uri)\n\n def transform(self, chip):\n \"\"\"Transform a chip.\n\n Selects a subset of the channels and transforms non-uint8 to\n uint8 values using options.stats_uri\n\n Args:\n chip: [height, width, channels] numpy array\n\n Returns:\n [height, width, channels] uint8 numpy array where channels is equal\n to len(self.options.channel_order)\n \"\"\"\n if chip.dtype != np.uint8:\n if self.raster_stats:\n # Subtract mean and divide by std to get zscores.\n means = np.array(self.raster_stats.means)\n means = means[np.newaxis, np.newaxis, :].astype(np.float)\n stds = np.array(self.raster_stats.stds)\n stds = stds[np.newaxis, np.newaxis, :].astype(np.float)\n\n # Don't transform NODATA zero values.\n nodata = chip == 0\n\n chip = chip - means\n chip = chip / stds\n\n # Make zscores that fall between -3 and 3 span 0 to 255.\n chip += 3\n chip /= 6\n\n chip = np.clip(chip, 0, 1)\n chip *= 255\n chip = chip.astype(np.uint8)\n\n chip[nodata] = 0\n else:\n raise ValueError(\n 'Need to provide stats_uri for non-uint8 rasters.')\n\n if self.options.channel_order:\n return chip[:, :, self.options.channel_order]\n return chip\n", "path": "src/rastervision/core/raster_transformer.py"}]}
1,292
855
gh_patches_debug_140
rasdani/github-patches
git_diff
d2l-ai__d2l-en-2078
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [MXNet] matplotlib >=3.5 raises TypeError with ax.plot_wireframe in MXNet ndarray With the latest version of matplotlib, multiple notebooks fail with a type error in mxnet (mxnet==1.7.0 & CUDA 10.2). Some of the affected sections include [optimization intro](https://d2l.ai/chapter_optimization/optimization-intro.html), [integral calculus](https://d2l.ai/chapter_appendix-mathematics-for-deep-learning/integral-calculus.html), [multivariable calculus](https://d2l.ai/chapter_appendix-mathematics-for-deep-learning/multivariable-calculus.html) etc. ``` TypeError: no implementation found for 'numpy.column_stack' on types that implement __array_function__: [<class 'mxnet.numpy.ndarray'>, <class 'numpy.ndarray'>] ``` Please see attached traceback and reproduction instructions below. Steps to reproduce the issue. 1. Setup the d2l environment (using `static/build.yml`) 2. While setting up the environment, it will automatically install the latest version of matplotlib (i.e. `matplotlib==3.5.1` as of today). Run one of the notebooks which is affected (mentioned above) <details> <summary>Click to expand: Error Traceback</summary> ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) Input In [7], in <module> 9 # Plot function 10 ax = d2l.plt.figure().add_subplot(111, projection='3d') ---> 11 ax.plot_wireframe(x, y, z, **{'rstride': 10, 'cstride': 10}) 12 ax.plot_wireframe(x, y, w, **{'rstride': 10, 'cstride': 10}, color='purple') 13 d2l.plt.xlabel('x') File ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/matplotlib/_api/deprecation.py:412, in delete_parameter.<locals>.wrapper(*inner_args, **inner_kwargs) 402 deprecation_addendum = ( 403 f"If any parameter follows {name!r}, they should be passed as " 404 f"keyword, not positionally.") 405 warn_deprecated( 406 since, 407 name=repr(name), (...) 410 else deprecation_addendum, 411 **kwargs) --> 412 return func(*inner_args, **inner_kwargs) File ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/mpl_toolkits/mplot3d/axes3d.py:1908, in Axes3D.plot_wireframe(self, X, Y, Z, *args, **kwargs) 1906 linec = art3d.Line3DCollection(lines, *args, **kwargs) 1907 self.add_collection(linec) -> 1908 self.auto_scale_xyz(X, Y, Z, had_data) 1910 return linec File ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/mpl_toolkits/mplot3d/axes3d.py:658, in Axes3D.auto_scale_xyz(self, X, Y, Z, had_data) 656 self.xy_dataLim.update_from_data_y(Y, not had_data) 657 if Z is not None: --> 658 self.zz_dataLim.update_from_data_x(Z, not had_data) 659 # Let autoscale_view figure out how to use this data. 660 self.autoscale_view() File ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/matplotlib/transforms.py:922, in Bbox.update_from_data_x(self, x, ignore) 906 """ 907 Update the x-bounds of the `Bbox` based on the passed in data. After 908 updating, the bounds will have positive *width*, and *x0* will be the (...) 919 - When ``None``, use the last value passed to :meth:`ignore`. 920 """ 921 x = np.ravel(x) --> 922 self.update_from_data_xy(np.column_stack([x, np.ones(x.size)]), 923 ignore=ignore, updatey=False) File <__array_function__ internals>:180, in column_stack(*args, **kwargs) TypeError: no implementation found for 'numpy.column_stack' on types that implement __array_function__: [<class 'mxnet.numpy.ndarray'>, <class 'numpy.ndarray'>] ``` </details> This is another issue validating the need of #2044. A simple solution for now is to pin the matplotlib version to 1.4. I'll send a PR for this. cc @astonzhang </issue> <code> [start of setup.py] 1 from setuptools import setup, find_packages 2 import d2l 3 4 requirements = [ 5 'jupyter', 6 'numpy', 7 'matplotlib==3.4', 8 'requests', 9 'pandas', 10 'gym' 11 ] 12 13 setup( 14 name='d2l', 15 version=d2l.__version__, 16 python_requires='>=3.5', 17 author='D2L Developers', 18 author_email='[email protected]', 19 url='https://d2l.ai', 20 description='Dive into Deep Learning', 21 license='MIT-0', 22 packages=find_packages(), 23 zip_safe=True, 24 install_requires=requirements, 25 ) 26 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ requirements = [ 'jupyter', 'numpy', - 'matplotlib==3.4', + 'matplotlib', 'requests', 'pandas', 'gym'
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,7 +4,7 @@\n requirements = [\n 'jupyter',\n 'numpy',\n- 'matplotlib==3.4',\n+ 'matplotlib',\n 'requests',\n 'pandas',\n 'gym'\n", "issue": "[MXNet] matplotlib >=3.5 raises TypeError with ax.plot_wireframe in MXNet ndarray\nWith the latest version of matplotlib, multiple notebooks fail with a type error in mxnet (mxnet==1.7.0 & CUDA 10.2). Some of the affected sections include [optimization intro](https://d2l.ai/chapter_optimization/optimization-intro.html), [integral calculus](https://d2l.ai/chapter_appendix-mathematics-for-deep-learning/integral-calculus.html), [multivariable calculus](https://d2l.ai/chapter_appendix-mathematics-for-deep-learning/multivariable-calculus.html) etc.\r\n\r\n```\r\nTypeError: no implementation found for 'numpy.column_stack' on types that implement __array_function__: [<class 'mxnet.numpy.ndarray'>, <class 'numpy.ndarray'>]\r\n```\r\n\r\nPlease see attached traceback and reproduction instructions below.\r\n\r\nSteps to reproduce the issue.\r\n\r\n1. Setup the d2l environment (using `static/build.yml`)\r\n2. While setting up the environment, it will automatically install the latest version of matplotlib (i.e. `matplotlib==3.5.1` as of today). \r\n\r\nRun one of the notebooks which is affected (mentioned above) \r\n\r\n<details>\r\n <summary>Click to expand: Error Traceback</summary>\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\nInput In [7], in <module>\r\n 9 # Plot function\r\n 10 ax = d2l.plt.figure().add_subplot(111, projection='3d')\r\n---> 11 ax.plot_wireframe(x, y, z, **{'rstride': 10, 'cstride': 10})\r\n 12 ax.plot_wireframe(x, y, w, **{'rstride': 10, 'cstride': 10}, color='purple')\r\n 13 d2l.plt.xlabel('x')\r\n\r\nFile ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/matplotlib/_api/deprecation.py:412, in delete_parameter.<locals>.wrapper(*inner_args, **inner_kwargs)\r\n 402 deprecation_addendum = (\r\n 403 f\"If any parameter follows {name!r}, they should be passed as \"\r\n 404 f\"keyword, not positionally.\")\r\n 405 warn_deprecated(\r\n 406 since,\r\n 407 name=repr(name),\r\n (...)\r\n 410 else deprecation_addendum,\r\n 411 **kwargs)\r\n--> 412 return func(*inner_args, **inner_kwargs)\r\n\r\nFile ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/mpl_toolkits/mplot3d/axes3d.py:1908, in Axes3D.plot_wireframe(self, X, Y, Z, *args, **kwargs)\r\n 1906 linec = art3d.Line3DCollection(lines, *args, **kwargs)\r\n 1907 self.add_collection(linec)\r\n-> 1908 self.auto_scale_xyz(X, Y, Z, had_data)\r\n 1910 return linec\r\n\r\nFile ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/mpl_toolkits/mplot3d/axes3d.py:658, in Axes3D.auto_scale_xyz(self, X, Y, Z, had_data)\r\n 656 self.xy_dataLim.update_from_data_y(Y, not had_data)\r\n 657 if Z is not None:\r\n--> 658 self.zz_dataLim.update_from_data_x(Z, not had_data)\r\n 659 # Let autoscale_view figure out how to use this data.\r\n 660 self.autoscale_view()\r\n\r\nFile ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/matplotlib/transforms.py:922, in Bbox.update_from_data_x(self, x, ignore)\r\n 906 \"\"\"\r\n 907 Update the x-bounds of the `Bbox` based on the passed in data. After\r\n 908 updating, the bounds will have positive *width*, and *x0* will be the\r\n (...)\r\n 919 - When ``None``, use the last value passed to :meth:`ignore`.\r\n 920 \"\"\"\r\n 921 x = np.ravel(x)\r\n--> 922 self.update_from_data_xy(np.column_stack([x, np.ones(x.size)]),\r\n 923 ignore=ignore, updatey=False)\r\n\r\nFile <__array_function__ internals>:180, in column_stack(*args, **kwargs)\r\n\r\nTypeError: no implementation found for 'numpy.column_stack' on types that implement __array_function__: [<class 'mxnet.numpy.ndarray'>, <class 'numpy.ndarray'>]\r\n```\r\n\r\n</details>\r\n\r\nThis is another issue validating the need of #2044.\r\n\r\nA simple solution for now is to pin the matplotlib version to 1.4. I'll send a PR for this.\r\n\r\ncc @astonzhang \n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport d2l\n\nrequirements = [\n 'jupyter',\n 'numpy',\n 'matplotlib==3.4',\n 'requests',\n 'pandas',\n 'gym'\n]\n\nsetup(\n name='d2l',\n version=d2l.__version__,\n python_requires='>=3.5',\n author='D2L Developers',\n author_email='[email protected]',\n url='https://d2l.ai',\n description='Dive into Deep Learning',\n license='MIT-0',\n packages=find_packages(),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py"}]}
1,832
71
gh_patches_debug_34829
rasdani/github-patches
git_diff
liqd__adhocracy4-168
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> improve accessibility of image upload * add alt attribute with the filename * if there is no image uploaded the image tag should not be there * the label's `for` attribute doesn't reference the file input's id. The first part causes the HTML to be invalid, which is part of the BITV Test "4.1.1a Valides HTML". The third part is part of the BITV Test "3.3.2a Formularfelder richtig beschriftet". </issue> <code> [start of adhocracy4/images/widgets.py] 1 from os.path import basename 2 3 from django.contrib.staticfiles.storage import staticfiles_storage 4 from django.forms import widgets 5 from django.template import loader 6 from django.utils.html import conditional_escape 7 from django.utils.translation import ugettext 8 9 10 class ImageInputWidget(widgets.ClearableFileInput): 11 12 """ 13 A project-specific improved version of the clearable file upload. 14 15 Allows to upload and delete uploaded files. It doesn't passing attributes 16 using the positional `attrs` argument and hard codes css files. 17 """ 18 class Media: 19 js = (staticfiles_storage.url('a4images/imageUploader.js'),) 20 21 def render(self, name, value, attrs=None): 22 23 has_image_set = self.is_initial(value) 24 is_required = self.is_required 25 26 file_placeholder = ugettext('Select a picture from your local folder.') 27 file_input = super().render(name, None, { 28 'id': name, 29 'class': 'form-control form-control-file' 30 }) 31 32 if has_image_set: 33 file_name = basename(value.name) 34 file_url = conditional_escape(value.url) 35 else: 36 file_name = "" 37 file_url = "" 38 39 text_input = widgets.TextInput().render('__noname__', file_name, { 40 'class': 'form-control form-control-file-dummy', 41 'placeholder': file_placeholder, 42 'tabindex': '-1' 43 }) 44 45 checkbox_id = self.clear_checkbox_id(name) 46 checkbox_name = self.clear_checkbox_name(name) 47 checkbox_input = widgets.CheckboxInput().render(checkbox_name, False, { 48 'id': checkbox_id, 49 'class': 'clear-image', 50 'data-upload-clear': name, 51 }) 52 53 context = { 54 'name': name, 55 'has_image_set': has_image_set, 56 'is_required': is_required, 57 'file_url': file_url, 58 'file_input': file_input, 59 'file_id': name + '-file', 60 'text_input': text_input, 61 'checkbox_input': checkbox_input, 62 'checkbox_id': checkbox_id 63 } 64 65 return loader.render_to_string( 66 'a4images/image_upload_widget.html', 67 context 68 ) 69 70 def value_from_datadict(self, data, files, name): 71 """ 72 Modify value_from_datadict, so that delete takes precedence over 73 upload. 74 """ 75 file_value = super(widgets.ClearableFileInput, self)\ 76 .value_from_datadict(data, files, name) 77 checkbox_value = widgets.CheckboxInput()\ 78 .value_from_datadict(data, files, self.clear_checkbox_name(name)) 79 if not self.is_required and checkbox_value: 80 return False 81 return file_value 82 [end of adhocracy4/images/widgets.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/adhocracy4/images/widgets.py b/adhocracy4/images/widgets.py --- a/adhocracy4/images/widgets.py +++ b/adhocracy4/images/widgets.py @@ -19,13 +19,13 @@ js = (staticfiles_storage.url('a4images/imageUploader.js'),) def render(self, name, value, attrs=None): - + html_id = attrs and attrs.get('id', name) or name has_image_set = self.is_initial(value) is_required = self.is_required file_placeholder = ugettext('Select a picture from your local folder.') file_input = super().render(name, None, { - 'id': name, + 'id': html_id, 'class': 'form-control form-control-file' }) @@ -39,7 +39,8 @@ text_input = widgets.TextInput().render('__noname__', file_name, { 'class': 'form-control form-control-file-dummy', 'placeholder': file_placeholder, - 'tabindex': '-1' + 'tabindex': '-1', + 'id': 'text-{}'.format(html_id) }) checkbox_id = self.clear_checkbox_id(name) @@ -47,16 +48,16 @@ checkbox_input = widgets.CheckboxInput().render(checkbox_name, False, { 'id': checkbox_id, 'class': 'clear-image', - 'data-upload-clear': name, + 'data-upload-clear': html_id, }) context = { - 'name': name, + 'id': html_id, 'has_image_set': has_image_set, 'is_required': is_required, 'file_url': file_url, 'file_input': file_input, - 'file_id': name + '-file', + 'file_id': html_id + '-file', 'text_input': text_input, 'checkbox_input': checkbox_input, 'checkbox_id': checkbox_id
{"golden_diff": "diff --git a/adhocracy4/images/widgets.py b/adhocracy4/images/widgets.py\n--- a/adhocracy4/images/widgets.py\n+++ b/adhocracy4/images/widgets.py\n@@ -19,13 +19,13 @@\n js = (staticfiles_storage.url('a4images/imageUploader.js'),)\n \n def render(self, name, value, attrs=None):\n-\n+ html_id = attrs and attrs.get('id', name) or name\n has_image_set = self.is_initial(value)\n is_required = self.is_required\n \n file_placeholder = ugettext('Select a picture from your local folder.')\n file_input = super().render(name, None, {\n- 'id': name,\n+ 'id': html_id,\n 'class': 'form-control form-control-file'\n })\n \n@@ -39,7 +39,8 @@\n text_input = widgets.TextInput().render('__noname__', file_name, {\n 'class': 'form-control form-control-file-dummy',\n 'placeholder': file_placeholder,\n- 'tabindex': '-1'\n+ 'tabindex': '-1',\n+ 'id': 'text-{}'.format(html_id)\n })\n \n checkbox_id = self.clear_checkbox_id(name)\n@@ -47,16 +48,16 @@\n checkbox_input = widgets.CheckboxInput().render(checkbox_name, False, {\n 'id': checkbox_id,\n 'class': 'clear-image',\n- 'data-upload-clear': name,\n+ 'data-upload-clear': html_id,\n })\n \n context = {\n- 'name': name,\n+ 'id': html_id,\n 'has_image_set': has_image_set,\n 'is_required': is_required,\n 'file_url': file_url,\n 'file_input': file_input,\n- 'file_id': name + '-file',\n+ 'file_id': html_id + '-file',\n 'text_input': text_input,\n 'checkbox_input': checkbox_input,\n 'checkbox_id': checkbox_id\n", "issue": "improve accessibility of image upload\n* add alt attribute with the filename\r\n* if there is no image uploaded the image tag should not be there\r\n* the label's `for` attribute doesn't reference the file input's id.\r\n\r\nThe first part causes the HTML to be invalid, which is part of the BITV Test \"4.1.1a Valides HTML\".\r\nThe third part is part of the BITV Test \"3.3.2a Formularfelder richtig beschriftet\".\n", "before_files": [{"content": "from os.path import basename\n\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.forms import widgets\nfrom django.template import loader\nfrom django.utils.html import conditional_escape\nfrom django.utils.translation import ugettext\n\n\nclass ImageInputWidget(widgets.ClearableFileInput):\n\n \"\"\"\n A project-specific improved version of the clearable file upload.\n\n Allows to upload and delete uploaded files. It doesn't passing attributes\n using the positional `attrs` argument and hard codes css files.\n \"\"\"\n class Media:\n js = (staticfiles_storage.url('a4images/imageUploader.js'),)\n\n def render(self, name, value, attrs=None):\n\n has_image_set = self.is_initial(value)\n is_required = self.is_required\n\n file_placeholder = ugettext('Select a picture from your local folder.')\n file_input = super().render(name, None, {\n 'id': name,\n 'class': 'form-control form-control-file'\n })\n\n if has_image_set:\n file_name = basename(value.name)\n file_url = conditional_escape(value.url)\n else:\n file_name = \"\"\n file_url = \"\"\n\n text_input = widgets.TextInput().render('__noname__', file_name, {\n 'class': 'form-control form-control-file-dummy',\n 'placeholder': file_placeholder,\n 'tabindex': '-1'\n })\n\n checkbox_id = self.clear_checkbox_id(name)\n checkbox_name = self.clear_checkbox_name(name)\n checkbox_input = widgets.CheckboxInput().render(checkbox_name, False, {\n 'id': checkbox_id,\n 'class': 'clear-image',\n 'data-upload-clear': name,\n })\n\n context = {\n 'name': name,\n 'has_image_set': has_image_set,\n 'is_required': is_required,\n 'file_url': file_url,\n 'file_input': file_input,\n 'file_id': name + '-file',\n 'text_input': text_input,\n 'checkbox_input': checkbox_input,\n 'checkbox_id': checkbox_id\n }\n\n return loader.render_to_string(\n 'a4images/image_upload_widget.html',\n context\n )\n\n def value_from_datadict(self, data, files, name):\n \"\"\"\n Modify value_from_datadict, so that delete takes precedence over\n upload.\n \"\"\"\n file_value = super(widgets.ClearableFileInput, self)\\\n .value_from_datadict(data, files, name)\n checkbox_value = widgets.CheckboxInput()\\\n .value_from_datadict(data, files, self.clear_checkbox_name(name))\n if not self.is_required and checkbox_value:\n return False\n return file_value\n", "path": "adhocracy4/images/widgets.py"}]}
1,367
437
gh_patches_debug_15533
rasdani/github-patches
git_diff
voxel51__fiftyone-1660
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Support Fortran ordered masks in the App Currently fortran ordered masks are flipped. ```py import fiftyone as fo import fiftyone.zoo as foz import numpy as np dataset = foz.load_zoo_dataset("quickstart", max_samples=1).select_fields().clone() sample = dataset.first() contiguous = np.asarray([[True, False], [True, False]]) sample["contiguous"] = fo.Segmentation(mask=contiguous) sample["fortran"] = fo.Segmentation(mask=np.asfortranarray(contiguous)) sample.save() session = fo.Session(dataset) ``` <img width="1792" alt="flipped" src="https://user-images.githubusercontent.com/19821840/159953546-5eef71bc-d111-4667-a271-6c4e34e1b7da.png"> </issue> <code> [start of fiftyone/server/json_util.py] 1 """ 2 FiftyOne server json utilies. 3 4 | Copyright 2017-2022, Voxel51, Inc. 5 | `voxel51.com <https://voxel51.com/>`_ 6 | 7 """ 8 from bson import ObjectId, json_util 9 from collections import OrderedDict 10 from datetime import date, datetime 11 from json import JSONEncoder 12 import math 13 14 from fiftyone.core.sample import Sample, SampleView 15 from fiftyone.core.stages import ViewStage 16 import fiftyone.core.utils as fou 17 18 19 _MASK_CLASSES = {"Detection", "Heatmap", "Segmentation"} 20 21 22 def _handle_bytes(o): 23 for k, v in o.items(): 24 if isinstance(v, bytes): 25 o[k] = str(fou.deserialize_numpy_array(v).shape) 26 elif isinstance(v, dict): 27 o[k] = _handle_bytes(v) 28 29 return o 30 31 32 def _handle_numpy_array(raw, _cls=None): 33 if _cls not in _MASK_CLASSES: 34 return str(fou.deserialize_numpy_array(raw).shape) 35 36 return fou.serialize_numpy_array( 37 fou.deserialize_numpy_array(raw), ascii=True 38 ) 39 40 41 def _handle_date(dt): 42 return { 43 "_cls": "DateTime", 44 "datetime": fou.datetime_to_timestamp(dt), 45 } 46 47 48 def _is_invalid_number(value): 49 if not isinstance(value, float): 50 return False 51 52 return math.isnan(value) or math.isinf(value) 53 54 55 def convert(d): 56 if isinstance(d, (dict, OrderedDict)): 57 for k, v in d.items(): 58 if isinstance(v, bytes): 59 d[k] = _handle_numpy_array(v, d.get("_cls", None)) 60 elif isinstance(v, (date, datetime)): 61 d[k] = _handle_date(v) 62 elif isinstance(v, ObjectId): 63 d[k] = str(v) 64 elif isinstance(v, (dict, OrderedDict, list)): 65 convert(v) 66 elif _is_invalid_number(v): 67 d[k] = str(v) 68 69 if isinstance(d, list): 70 for idx, i in enumerate(d): 71 if isinstance(i, tuple): 72 d[idx] = list(i) 73 i = d[idx] 74 75 if isinstance(i, bytes): 76 d[idx] = _handle_numpy_array(i) 77 elif isinstance(i, (date, datetime)): 78 d[idx] = _handle_date(i) 79 elif isinstance(i, ObjectId): 80 d[idx] = str(i) 81 elif isinstance(i, (dict, OrderedDict, list)): 82 convert(i) 83 elif _is_invalid_number(i): 84 d[idx] = str(i) 85 86 87 class FiftyOneJSONEncoder(JSONEncoder): 88 """JSON encoder for the FiftyOne server. 89 90 Any classes with non-standard serialization methods should 91 be accounted for in the `default()` method. 92 """ 93 94 def default(self, o): # pylint: disable=E0202 95 """Returns the serialized representation of the objects 96 97 Args: 98 o: the object 99 100 Returns: 101 str 102 """ 103 if isinstance(o, (Sample, SampleView)): 104 return _handle_bytes(o.to_mongo_dict(include_id=True)) 105 if issubclass(type(o), ViewStage): 106 return o._serialize() 107 if isinstance(o, ObjectId): 108 return str(o) 109 if isinstance(o, float): 110 return json_util.dumps(o) 111 return super().default(o) 112 113 @staticmethod 114 def dumps(*args, **kwargs): 115 """Defined for overriding the default SocketIO `json` interface""" 116 kwargs["cls"] = FiftyOneJSONEncoder 117 return json_util.dumps( 118 json_util.loads( 119 json_util.dumps(*args, **kwargs), parse_constant=lambda c: c 120 ), 121 **kwargs 122 ) 123 124 @staticmethod 125 def loads(*args, **kwargs): 126 """Defined for overriding the default SocketIO `json` interface""" 127 return json_util.loads(*args, **kwargs) 128 [end of fiftyone/server/json_util.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/fiftyone/server/json_util.py b/fiftyone/server/json_util.py --- a/fiftyone/server/json_util.py +++ b/fiftyone/server/json_util.py @@ -10,6 +10,7 @@ from datetime import date, datetime from json import JSONEncoder import math +import numpy as np from fiftyone.core.sample import Sample, SampleView from fiftyone.core.stages import ViewStage @@ -33,9 +34,12 @@ if _cls not in _MASK_CLASSES: return str(fou.deserialize_numpy_array(raw).shape) - return fou.serialize_numpy_array( - fou.deserialize_numpy_array(raw), ascii=True - ) + array = fou.deserialize_numpy_array(raw) + + if np.isfortran(array): + array = np.ascontiguousarray(array) + + return fou.serialize_numpy_array(array, ascii=True) def _handle_date(dt):
{"golden_diff": "diff --git a/fiftyone/server/json_util.py b/fiftyone/server/json_util.py\n--- a/fiftyone/server/json_util.py\n+++ b/fiftyone/server/json_util.py\n@@ -10,6 +10,7 @@\n from datetime import date, datetime\n from json import JSONEncoder\n import math\n+import numpy as np\n \n from fiftyone.core.sample import Sample, SampleView\n from fiftyone.core.stages import ViewStage\n@@ -33,9 +34,12 @@\n if _cls not in _MASK_CLASSES:\n return str(fou.deserialize_numpy_array(raw).shape)\n \n- return fou.serialize_numpy_array(\n- fou.deserialize_numpy_array(raw), ascii=True\n- )\n+ array = fou.deserialize_numpy_array(raw)\n+\n+ if np.isfortran(array):\n+ array = np.ascontiguousarray(array)\n+\n+ return fou.serialize_numpy_array(array, ascii=True)\n \n \n def _handle_date(dt):\n", "issue": "[BUG] Support Fortran ordered masks in the App\nCurrently fortran ordered masks are flipped.\r\n\r\n```py\r\nimport fiftyone as fo\r\nimport fiftyone.zoo as foz\r\nimport numpy as np\r\n\r\ndataset = foz.load_zoo_dataset(\"quickstart\", max_samples=1).select_fields().clone()\r\nsample = dataset.first()\r\n\r\ncontiguous = np.asarray([[True, False], [True, False]])\r\nsample[\"contiguous\"] = fo.Segmentation(mask=contiguous)\r\nsample[\"fortran\"] = fo.Segmentation(mask=np.asfortranarray(contiguous))\r\nsample.save()\r\n\r\nsession = fo.Session(dataset)\r\n```\r\n<img width=\"1792\" alt=\"flipped\" src=\"https://user-images.githubusercontent.com/19821840/159953546-5eef71bc-d111-4667-a271-6c4e34e1b7da.png\">\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nFiftyOne server json utilies.\n\n| Copyright 2017-2022, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nfrom bson import ObjectId, json_util\nfrom collections import OrderedDict\nfrom datetime import date, datetime\nfrom json import JSONEncoder\nimport math\n\nfrom fiftyone.core.sample import Sample, SampleView\nfrom fiftyone.core.stages import ViewStage\nimport fiftyone.core.utils as fou\n\n\n_MASK_CLASSES = {\"Detection\", \"Heatmap\", \"Segmentation\"}\n\n\ndef _handle_bytes(o):\n for k, v in o.items():\n if isinstance(v, bytes):\n o[k] = str(fou.deserialize_numpy_array(v).shape)\n elif isinstance(v, dict):\n o[k] = _handle_bytes(v)\n\n return o\n\n\ndef _handle_numpy_array(raw, _cls=None):\n if _cls not in _MASK_CLASSES:\n return str(fou.deserialize_numpy_array(raw).shape)\n\n return fou.serialize_numpy_array(\n fou.deserialize_numpy_array(raw), ascii=True\n )\n\n\ndef _handle_date(dt):\n return {\n \"_cls\": \"DateTime\",\n \"datetime\": fou.datetime_to_timestamp(dt),\n }\n\n\ndef _is_invalid_number(value):\n if not isinstance(value, float):\n return False\n\n return math.isnan(value) or math.isinf(value)\n\n\ndef convert(d):\n if isinstance(d, (dict, OrderedDict)):\n for k, v in d.items():\n if isinstance(v, bytes):\n d[k] = _handle_numpy_array(v, d.get(\"_cls\", None))\n elif isinstance(v, (date, datetime)):\n d[k] = _handle_date(v)\n elif isinstance(v, ObjectId):\n d[k] = str(v)\n elif isinstance(v, (dict, OrderedDict, list)):\n convert(v)\n elif _is_invalid_number(v):\n d[k] = str(v)\n\n if isinstance(d, list):\n for idx, i in enumerate(d):\n if isinstance(i, tuple):\n d[idx] = list(i)\n i = d[idx]\n\n if isinstance(i, bytes):\n d[idx] = _handle_numpy_array(i)\n elif isinstance(i, (date, datetime)):\n d[idx] = _handle_date(i)\n elif isinstance(i, ObjectId):\n d[idx] = str(i)\n elif isinstance(i, (dict, OrderedDict, list)):\n convert(i)\n elif _is_invalid_number(i):\n d[idx] = str(i)\n\n\nclass FiftyOneJSONEncoder(JSONEncoder):\n \"\"\"JSON encoder for the FiftyOne server.\n\n Any classes with non-standard serialization methods should\n be accounted for in the `default()` method.\n \"\"\"\n\n def default(self, o): # pylint: disable=E0202\n \"\"\"Returns the serialized representation of the objects\n\n Args:\n o: the object\n\n Returns:\n str\n \"\"\"\n if isinstance(o, (Sample, SampleView)):\n return _handle_bytes(o.to_mongo_dict(include_id=True))\n if issubclass(type(o), ViewStage):\n return o._serialize()\n if isinstance(o, ObjectId):\n return str(o)\n if isinstance(o, float):\n return json_util.dumps(o)\n return super().default(o)\n\n @staticmethod\n def dumps(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n kwargs[\"cls\"] = FiftyOneJSONEncoder\n return json_util.dumps(\n json_util.loads(\n json_util.dumps(*args, **kwargs), parse_constant=lambda c: c\n ),\n **kwargs\n )\n\n @staticmethod\n def loads(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n return json_util.loads(*args, **kwargs)\n", "path": "fiftyone/server/json_util.py"}]}
1,847
205
gh_patches_debug_1764
rasdani/github-patches
git_diff
apple__coremltools-298
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Why is six pinned to 1.10.0? Is there any reason for [six to be pinned to version 1.10.0](https://github.com/apple/coremltools/blob/master/setup.py#L44). This gives transitive dependency issues sometimes. /cc @mats-claassen </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 import os 4 from setuptools import setup 5 6 README = os.path.join(os.getcwd(), "README.rst") 7 8 9 with open(README) as f: 10 long_description = f.read() 11 12 setup(name='coremltools', 13 version='2.0', 14 description='Community Tools for CoreML', 15 long_description=long_description, 16 author='Apple Inc.', 17 author_email='[email protected]', 18 url='', 19 packages=[ 20 'coremltools', 21 'coremltools._deps', 22 'coremltools.converters', 23 'coremltools.converters.caffe', 24 'coremltools.converters.sklearn', 25 'coremltools.converters.xgboost', 26 'coremltools.converters.libsvm', 27 'coremltools.converters.keras', 28 'coremltools.graph_visualization', 29 'coremltools.models', 30 'coremltools.models.neural_network', 31 'coremltools.proto', 32 'coremltools._scripts' 33 ], 34 package_data={'': ['LICENSE.txt', 'README.rst', 'libcaffeconverter.so', 'libcoremlpython.so'], 35 'coremltools': ['graph_visualization/__init__.py', 36 'graph_visualization/app.js', 37 'graph_visualization/index.html', 38 'graph_visualization/style.css', 39 'graph_visualization/assets/*', 40 'graph_visualization/icons/*'] 41 }, 42 install_requires=[ 43 'numpy >= 1.10.0', 44 'protobuf >= 3.1.0', 45 'six==1.10.0' 46 ], 47 entry_points = { 48 'console_scripts': ['coremlconverter = coremltools:_main'] 49 }, 50 classifiers=[ 51 'Development Status :: 4 - Beta', 52 'Intended Audience :: End Users/Desktop', 53 'Intended Audience :: Developers', 54 'Operating System :: MacOS :: MacOS X', 55 'Programming Language :: Python :: 2.7', 56 'Programming Language :: Python :: 3.5', 57 'Programming Language :: Python :: 3.6', 58 'Topic :: Scientific/Engineering', 59 'Topic :: Software Development' 60 ], 61 license='BSD' 62 ) 63 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ install_requires=[ 'numpy >= 1.10.0', 'protobuf >= 3.1.0', - 'six==1.10.0' + 'six>=1.10.0' ], entry_points = { 'console_scripts': ['coremlconverter = coremltools:_main']
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -42,7 +42,7 @@\n install_requires=[\n 'numpy >= 1.10.0',\n 'protobuf >= 3.1.0',\n- 'six==1.10.0'\n+ 'six>=1.10.0'\n ],\n entry_points = {\n 'console_scripts': ['coremlconverter = coremltools:_main']\n", "issue": "Why is six pinned to 1.10.0?\nIs there any reason for [six to be pinned to version 1.10.0](https://github.com/apple/coremltools/blob/master/setup.py#L44). This gives transitive dependency issues sometimes.\r\n\r\n/cc @mats-claassen\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup\n\nREADME = os.path.join(os.getcwd(), \"README.rst\")\n\n\nwith open(README) as f:\n long_description = f.read()\n\nsetup(name='coremltools',\n version='2.0',\n description='Community Tools for CoreML',\n long_description=long_description,\n author='Apple Inc.',\n author_email='[email protected]',\n url='',\n packages=[\n 'coremltools',\n 'coremltools._deps',\n 'coremltools.converters',\n 'coremltools.converters.caffe',\n 'coremltools.converters.sklearn',\n 'coremltools.converters.xgboost',\n 'coremltools.converters.libsvm',\n 'coremltools.converters.keras',\n 'coremltools.graph_visualization',\n 'coremltools.models',\n 'coremltools.models.neural_network',\n 'coremltools.proto',\n 'coremltools._scripts'\n ],\n package_data={'': ['LICENSE.txt', 'README.rst', 'libcaffeconverter.so', 'libcoremlpython.so'],\n 'coremltools': ['graph_visualization/__init__.py',\n 'graph_visualization/app.js',\n 'graph_visualization/index.html',\n 'graph_visualization/style.css',\n 'graph_visualization/assets/*',\n 'graph_visualization/icons/*']\n },\n install_requires=[\n 'numpy >= 1.10.0',\n 'protobuf >= 3.1.0',\n 'six==1.10.0'\n ],\n entry_points = {\n 'console_scripts': ['coremlconverter = coremltools:_main']\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Developers',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development'\n ],\n license='BSD'\n)\n", "path": "setup.py"}]}
1,176
107
gh_patches_debug_36300
rasdani/github-patches
git_diff
fidals__shopelectro-767
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use SiteDriver class instead of seleniumrequests.Remote It will bring ability to use `shopelectro.selenium` classes in tests. </issue> <code> [start of shopelectro/selenium/pages/order.py] 1 from shopelectro.models import PaymentOptions 2 from shopelectro.selenium.elements import Input, Button 3 from shopelectro.selenium.pages import Page 4 5 from selenium.webdriver.common.by import By 6 7 from pages.models import CustomPage 8 9 # @todo #682:120m Implement and reuse shopelectro.selenium.OrderPage for selenium tests. 10 11 12 class OrderPage(Page): 13 14 def __init__(self, driver): 15 super().__init__(driver) 16 self.submit_button = Button(self.driver, (By.ID, 'submit-order')) 17 18 @property 19 def path(self): 20 return CustomPage.objects.get(slug='order').url 21 22 def fill_contacts( 23 self, name='Name', city='Санкт-Петербург', phone='2222222222', email='[email protected]', 24 ): 25 contacts = { 26 'id_name': name, 27 'id_city': city, 28 'id_phone': phone, 29 'id_email': email, 30 } 31 32 for id_, value in contacts.items(): 33 Input(self.driver, (By.ID, id_)).send_keys(value) 34 35 def make_order(self): 36 self.submit_button.click() 37 38 def select_payment_type(self, payment_option: PaymentOptions): 39 if payment_option not in PaymentOptions: 40 raise ValueError( 41 'An invalid payment type provided.' 42 f'It should be one of: {PaymentOptions}' 43 ) 44 45 item = Button( 46 self.driver, 47 (By.CSS, f'input[name="payment_type"][value="{payment_option.name}"]'), 48 ) 49 item.click() 50 [end of shopelectro/selenium/pages/order.py] [start of shopelectro/selenium/pages/page.py] 1 from shopelectro.selenium import SiteDriver 2 3 from selenium.webdriver.common.by import By 4 from selenium.webdriver.support import expected_conditions as EC 5 6 7 class Page: 8 """ 9 Represent a typical Shopelectro's page. 10 11 Contains cross-page elements: header, footer, ... 12 """ 13 14 def __init__(self, driver: SiteDriver): 15 if not isinstance(driver, SiteDriver): 16 raise TypeError('Driver must be an instance of shopelectro.selenium.SiteDriver') 17 self.driver = driver 18 self.path: str 19 20 def load(self): 21 if not self.path: 22 raise ValueError(f'Set a page path to {self.__class__.__name__}') 23 self.driver.get(self.path) 24 self.driver.wait.until(EC.visibility_of_element_located( 25 (By.TAG_NAME, 'body') 26 )) 27 [end of shopelectro/selenium/pages/page.py] [start of shopelectro/selenium/pages/success.py] 1 from shopelectro.selenium.pages import Page 2 3 from pages.models import CustomPage 4 5 6 class SuccessPage(Page): 7 8 @property 9 def path(self): 10 CustomPage.objects.get(slug='order-success').url 11 12 def is_success(self): 13 return 'Заказ принят' in self.driver.find_element_by_tag_name('h1').text 14 [end of shopelectro/selenium/pages/success.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/shopelectro/selenium/pages/order.py b/shopelectro/selenium/pages/order.py --- a/shopelectro/selenium/pages/order.py +++ b/shopelectro/selenium/pages/order.py @@ -3,6 +3,7 @@ from shopelectro.selenium.pages import Page from selenium.webdriver.common.by import By +from selenium.webdriver.support import expected_conditions as EC from pages.models import CustomPage @@ -34,6 +35,7 @@ def make_order(self): self.submit_button.click() + self.driver.wait.until(EC.url_changes(self.path)) def select_payment_type(self, payment_option: PaymentOptions): if payment_option not in PaymentOptions: diff --git a/shopelectro/selenium/pages/page.py b/shopelectro/selenium/pages/page.py --- a/shopelectro/selenium/pages/page.py +++ b/shopelectro/selenium/pages/page.py @@ -1,3 +1,5 @@ +from functools import wraps + from shopelectro.selenium import SiteDriver from selenium.webdriver.common.by import By @@ -17,10 +19,17 @@ self.driver = driver self.path: str + def wait_loaded(self): + def loaded(driver): + is_sync = EC.url_contains(self.path) + is_rendered = EC.visibility_of_element_located( + (By.TAG_NAME, 'body') + ) + return is_sync(driver) and is_rendered(driver) + self.driver.wait.until(loaded) + def load(self): if not self.path: raise ValueError(f'Set a page path to {self.__class__.__name__}') self.driver.get(self.path) - self.driver.wait.until(EC.visibility_of_element_located( - (By.TAG_NAME, 'body') - )) + self.wait_loaded() diff --git a/shopelectro/selenium/pages/success.py b/shopelectro/selenium/pages/success.py --- a/shopelectro/selenium/pages/success.py +++ b/shopelectro/selenium/pages/success.py @@ -1,3 +1,6 @@ +from selenium.webdriver.common.by import By +from selenium.webdriver.support import expected_conditions as EC + from shopelectro.selenium.pages import Page from pages.models import CustomPage @@ -7,7 +10,10 @@ @property def path(self): - CustomPage.objects.get(slug='order-success').url + return CustomPage.objects.get(slug='order-success').url def is_success(self): - return 'Заказ принят' in self.driver.find_element_by_tag_name('h1').text + h1 = self.driver.wait.until( + EC.visibility_of_element_located((By.TAG_NAME, 'h1')) + ).text + return 'Заказ принят' in h1
{"golden_diff": "diff --git a/shopelectro/selenium/pages/order.py b/shopelectro/selenium/pages/order.py\n--- a/shopelectro/selenium/pages/order.py\n+++ b/shopelectro/selenium/pages/order.py\n@@ -3,6 +3,7 @@\n from shopelectro.selenium.pages import Page\n \n from selenium.webdriver.common.by import By\n+from selenium.webdriver.support import expected_conditions as EC\n \n from pages.models import CustomPage\n \n@@ -34,6 +35,7 @@\n \n def make_order(self):\n self.submit_button.click()\n+ self.driver.wait.until(EC.url_changes(self.path))\n \n def select_payment_type(self, payment_option: PaymentOptions):\n if payment_option not in PaymentOptions:\ndiff --git a/shopelectro/selenium/pages/page.py b/shopelectro/selenium/pages/page.py\n--- a/shopelectro/selenium/pages/page.py\n+++ b/shopelectro/selenium/pages/page.py\n@@ -1,3 +1,5 @@\n+from functools import wraps\n+\n from shopelectro.selenium import SiteDriver\n \n from selenium.webdriver.common.by import By\n@@ -17,10 +19,17 @@\n self.driver = driver\n self.path: str\n \n+ def wait_loaded(self):\n+ def loaded(driver):\n+ is_sync = EC.url_contains(self.path)\n+ is_rendered = EC.visibility_of_element_located(\n+ (By.TAG_NAME, 'body')\n+ )\n+ return is_sync(driver) and is_rendered(driver)\n+ self.driver.wait.until(loaded)\n+\n def load(self):\n if not self.path:\n raise ValueError(f'Set a page path to {self.__class__.__name__}')\n self.driver.get(self.path)\n- self.driver.wait.until(EC.visibility_of_element_located(\n- (By.TAG_NAME, 'body')\n- ))\n+ self.wait_loaded()\ndiff --git a/shopelectro/selenium/pages/success.py b/shopelectro/selenium/pages/success.py\n--- a/shopelectro/selenium/pages/success.py\n+++ b/shopelectro/selenium/pages/success.py\n@@ -1,3 +1,6 @@\n+from selenium.webdriver.common.by import By\n+from selenium.webdriver.support import expected_conditions as EC\n+\n from shopelectro.selenium.pages import Page\n \n from pages.models import CustomPage\n@@ -7,7 +10,10 @@\n \n @property\n def path(self):\n- CustomPage.objects.get(slug='order-success').url\n+ return CustomPage.objects.get(slug='order-success').url\n \n def is_success(self):\n- return '\u0417\u0430\u043a\u0430\u0437 \u043f\u0440\u0438\u043d\u044f\u0442' in self.driver.find_element_by_tag_name('h1').text\n+ h1 = self.driver.wait.until(\n+ EC.visibility_of_element_located((By.TAG_NAME, 'h1'))\n+ ).text\n+ return '\u0417\u0430\u043a\u0430\u0437 \u043f\u0440\u0438\u043d\u044f\u0442' in h1\n", "issue": "Use SiteDriver class instead of seleniumrequests.Remote\nIt will bring ability to use `shopelectro.selenium` classes in tests. \n", "before_files": [{"content": "from shopelectro.models import PaymentOptions\nfrom shopelectro.selenium.elements import Input, Button\nfrom shopelectro.selenium.pages import Page\n\nfrom selenium.webdriver.common.by import By\n\nfrom pages.models import CustomPage\n\n# @todo #682:120m Implement and reuse shopelectro.selenium.OrderPage for selenium tests.\n\n\nclass OrderPage(Page):\n\n def __init__(self, driver):\n super().__init__(driver)\n self.submit_button = Button(self.driver, (By.ID, 'submit-order'))\n\n @property\n def path(self):\n return CustomPage.objects.get(slug='order').url\n\n def fill_contacts(\n self, name='Name', city='\u0421\u0430\u043d\u043a\u0442-\u041f\u0435\u0442\u0435\u0440\u0431\u0443\u0440\u0433', phone='2222222222', email='[email protected]',\n ):\n contacts = {\n 'id_name': name,\n 'id_city': city,\n 'id_phone': phone,\n 'id_email': email,\n }\n\n for id_, value in contacts.items():\n Input(self.driver, (By.ID, id_)).send_keys(value)\n\n def make_order(self):\n self.submit_button.click()\n\n def select_payment_type(self, payment_option: PaymentOptions):\n if payment_option not in PaymentOptions:\n raise ValueError(\n 'An invalid payment type provided.'\n f'It should be one of: {PaymentOptions}'\n )\n\n item = Button(\n self.driver,\n (By.CSS, f'input[name=\"payment_type\"][value=\"{payment_option.name}\"]'),\n )\n item.click()\n", "path": "shopelectro/selenium/pages/order.py"}, {"content": "from shopelectro.selenium import SiteDriver\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass Page:\n \"\"\"\n Represent a typical Shopelectro's page.\n\n Contains cross-page elements: header, footer, ...\n \"\"\"\n\n def __init__(self, driver: SiteDriver):\n if not isinstance(driver, SiteDriver):\n raise TypeError('Driver must be an instance of shopelectro.selenium.SiteDriver')\n self.driver = driver\n self.path: str\n\n def load(self):\n if not self.path:\n raise ValueError(f'Set a page path to {self.__class__.__name__}')\n self.driver.get(self.path)\n self.driver.wait.until(EC.visibility_of_element_located(\n (By.TAG_NAME, 'body')\n ))\n", "path": "shopelectro/selenium/pages/page.py"}, {"content": "from shopelectro.selenium.pages import Page\n\nfrom pages.models import CustomPage\n\n\nclass SuccessPage(Page):\n\n @property\n def path(self):\n CustomPage.objects.get(slug='order-success').url\n\n def is_success(self):\n return '\u0417\u0430\u043a\u0430\u0437 \u043f\u0440\u0438\u043d\u044f\u0442' in self.driver.find_element_by_tag_name('h1').text\n", "path": "shopelectro/selenium/pages/success.py"}]}
1,369
630
gh_patches_debug_564
rasdani/github-patches
git_diff
mabel-dev__opteryx-1695
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ✨ Memory Pool Optimizations ### Thanks for stopping by to let us know something could be better! **Is your feature request related to a problem? Please describe.** _A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]_ **Describe the solution you'd like** _A clear and concise description of what you want to happen._ **Describe alternatives you've considered** _A clear and concise description of any alternative solutions or features you've considered._ **Additional context** _Add any other context or screenshots about the feature request here._ </issue> <code> [start of opteryx/__version__.py] 1 __build__ = 527 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 Store the version here so: 17 1) we don't load dependencies by storing it in __init__.py 18 2) we can import it in setup.py for the same reason 19 """ 20 from enum import Enum # isort: skip 21 22 23 class VersionStatus(Enum): 24 ALPHA = "alpha" 25 BETA = "beta" 26 RELEASE = "release" 27 28 29 _major = 0 30 _minor = 16 31 _revision = 0 32 _status = VersionStatus.ALPHA 33 34 __author__ = "@joocer" 35 __version__ = f"{_major}.{_minor}.{_revision}" + ( 36 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else "" 37 ) 38 [end of opteryx/__version__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opteryx/__version__.py b/opteryx/__version__.py --- a/opteryx/__version__.py +++ b/opteryx/__version__.py @@ -1,4 +1,4 @@ -__build__ = 527 +__build__ = 532 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License.
{"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 527\n+__build__ = 532\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n", "issue": "\u2728 Memory Pool Optimizations\n### Thanks for stopping by to let us know something could be better!\r\n\r\n**Is your feature request related to a problem? Please describe.** _A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]_\r\n\r\n**Describe the solution you'd like** _A clear and concise description of what you want to happen._\r\n\r\n**Describe alternatives you've considered** _A clear and concise description of any alternative solutions or features you've considered._\r\n\r\n**Additional context** _Add any other context or screenshots about the feature request here._\r\n\n", "before_files": [{"content": "__build__ = 527\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 16\n_revision = 0\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}]}
1,013
102
gh_patches_debug_308
rasdani/github-patches
git_diff
zulip__zulip-13077
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Upgrade pip from 19.1.1 and pip-tools from 3.8.0 Followup issue from #13067. pip-tools 3.9.0 or 4.0.0 fails to resolve dependencies from Git URLs (jazzband/pip-tools#851): `pip._internal.exceptions.DistributionNotFound: No matching distribution found for zulip==0.6.1_git (from -r requirements/common.in (line 135))` while pip 19.2 breaks pip-tools 3.8.0 (jazzband/pip-tools#853): `TypeError: __init__() got an unexpected keyword argument 'find_links'` </issue> <code> [start of version.py] 1 import os 2 3 ZULIP_VERSION = "2.0.4+git" 4 # Add information on number of commits and commit hash to version, if available 5 zulip_git_version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zulip-git-version') 6 if os.path.exists(zulip_git_version_file): 7 with open(zulip_git_version_file) as f: 8 version = f.read().strip() 9 if version: 10 ZULIP_VERSION = version 11 12 LATEST_MAJOR_VERSION = "2.0" 13 LATEST_RELEASE_VERSION = "2.0.4" 14 LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.org/2019/03/01/zulip-2-0-released/" 15 16 # Bump the minor PROVISION_VERSION to indicate that folks should provision 17 # only when going from an old version of the code to a newer version. Bump 18 # the major version to indicate that folks should provision in both 19 # directions. 20 21 # Typically, 22 # * adding a dependency only requires a minor version bump; 23 # * removing a dependency requires a major version bump; 24 # * upgrading a dependency requires a major version bump, unless the 25 # upgraded dependency is backwards compatible with all of our 26 # historical commits sharing the same major version, in which case a 27 # minor version bump suffices. 28 29 PROVISION_VERSION = '49.2' 30 [end of version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/version.py b/version.py --- a/version.py +++ b/version.py @@ -26,4 +26,4 @@ # historical commits sharing the same major version, in which case a # minor version bump suffices. -PROVISION_VERSION = '49.2' +PROVISION_VERSION = '49.3'
{"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -26,4 +26,4 @@\n # historical commits sharing the same major version, in which case a\n # minor version bump suffices.\n \n-PROVISION_VERSION = '49.2'\n+PROVISION_VERSION = '49.3'\n", "issue": "Upgrade pip from 19.1.1 and pip-tools from 3.8.0\nFollowup issue from #13067. pip-tools 3.9.0 or 4.0.0 fails to resolve dependencies from Git URLs (jazzband/pip-tools#851):\r\n\r\n`pip._internal.exceptions.DistributionNotFound: No matching distribution found for zulip==0.6.1_git (from -r requirements/common.in (line 135))`\r\n\r\nwhile pip 19.2 breaks pip-tools 3.8.0 (jazzband/pip-tools#853):\r\n\r\n`TypeError: __init__() got an unexpected keyword argument 'find_links'`\n", "before_files": [{"content": "import os\n\nZULIP_VERSION = \"2.0.4+git\"\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zulip-git-version')\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n version = f.read().strip()\n if version:\n ZULIP_VERSION = version\n\nLATEST_MAJOR_VERSION = \"2.0\"\nLATEST_RELEASE_VERSION = \"2.0.4\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.org/2019/03/01/zulip-2-0-released/\"\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = '49.2'\n", "path": "version.py"}]}
1,034
79
gh_patches_debug_17290
rasdani/github-patches
git_diff
joke2k__faker-919
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Brazilian RG (identity card) Add Generator to Brazilian RG (identity card) ### Steps to reproduce fake = Faker('pt_Br') fake.rg() ### Expected behavior return like this rules: https://www.ngmatematica.com/2014/02/como-determinar-o-digito-verificador-do.html 8 digits + 1 checksum digit ### Actual behavior New feature </issue> <code> [start of faker/providers/ssn/pt_BR/__init__.py] 1 # coding=utf-8 2 3 from __future__ import unicode_literals 4 from .. import Provider as SsnProvider 5 6 7 def checksum(digits): 8 """ 9 Returns the checksum of CPF digits. 10 References to the algorithm: 11 https://pt.wikipedia.org/wiki/Cadastro_de_pessoas_f%C3%ADsicas#Algoritmo 12 https://metacpan.org/source/MAMAWE/Algorithm-CheckDigits-v1.3.0/lib/Algorithm/CheckDigits/M11_004.pm 13 """ 14 s = 0 15 p = len(digits) + 1 16 for i in range(0, len(digits)): 17 s += digits[i] * p 18 p -= 1 19 20 reminder = s % 11 21 if reminder == 0 or reminder == 1: 22 return 0 23 else: 24 return 11 - reminder 25 26 27 class Provider(SsnProvider): 28 """ 29 Provider for Brazilian SSN also known in Brazil as CPF. 30 There are two methods Provider.ssn and Provider.cpf 31 The snn returns a valid number with numbers only 32 The cpf return a valid number formatted with brazilian mask. eg nnn.nnn.nnn-nn 33 """ 34 35 def ssn(self): 36 digits = self.generator.random.sample(range(10), 9) 37 38 dv = checksum(digits) 39 digits.append(dv) 40 digits.append(checksum(digits)) 41 42 return ''.join(map(str, digits)) 43 44 def cpf(self): 45 c = self.ssn() 46 return c[:3] + '.' + c[3:6] + '.' + c[6:9] + '-' + c[9:] 47 [end of faker/providers/ssn/pt_BR/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/faker/providers/ssn/pt_BR/__init__.py b/faker/providers/ssn/pt_BR/__init__.py --- a/faker/providers/ssn/pt_BR/__init__.py +++ b/faker/providers/ssn/pt_BR/__init__.py @@ -1,6 +1,7 @@ # coding=utf-8 from __future__ import unicode_literals + from .. import Provider as SsnProvider @@ -44,3 +45,22 @@ def cpf(self): c = self.ssn() return c[:3] + '.' + c[3:6] + '.' + c[6:9] + '-' + c[9:] + + def rg(self): + """ + Brazilian RG, return plain numbers. + Check: https://www.ngmatematica.com/2014/02/como-determinar-o-digito-verificador-do.html + """ + + digits = self.generator.random.sample(range(0, 9), 8) + checksum = sum(i * digits[i - 2] for i in range(2, 10)) + last_digit = 11 - (checksum % 11) + + if last_digit == 10: + digits.append('X') + elif last_digit == 11: + digits.append(0) + else: + digits.append(last_digit) + + return ''.join(map(str, digits))
{"golden_diff": "diff --git a/faker/providers/ssn/pt_BR/__init__.py b/faker/providers/ssn/pt_BR/__init__.py\n--- a/faker/providers/ssn/pt_BR/__init__.py\n+++ b/faker/providers/ssn/pt_BR/__init__.py\n@@ -1,6 +1,7 @@\n # coding=utf-8\n \n from __future__ import unicode_literals\n+\n from .. import Provider as SsnProvider\n \n \n@@ -44,3 +45,22 @@\n def cpf(self):\n c = self.ssn()\n return c[:3] + '.' + c[3:6] + '.' + c[6:9] + '-' + c[9:]\n+\n+ def rg(self):\n+ \"\"\"\n+ Brazilian RG, return plain numbers.\n+ Check: https://www.ngmatematica.com/2014/02/como-determinar-o-digito-verificador-do.html\n+ \"\"\"\n+\n+ digits = self.generator.random.sample(range(0, 9), 8)\n+ checksum = sum(i * digits[i - 2] for i in range(2, 10))\n+ last_digit = 11 - (checksum % 11)\n+\n+ if last_digit == 10:\n+ digits.append('X')\n+ elif last_digit == 11:\n+ digits.append(0)\n+ else:\n+ digits.append(last_digit)\n+\n+ return ''.join(map(str, digits))\n", "issue": "Brazilian RG (identity card)\nAdd Generator to Brazilian RG (identity card)\r\n\r\n### Steps to reproduce\r\nfake = Faker('pt_Br')\r\nfake.rg()\r\n\r\n### Expected behavior\r\nreturn like this rules:\r\nhttps://www.ngmatematica.com/2014/02/como-determinar-o-digito-verificador-do.html\r\n8 digits + 1 checksum digit\r\n### Actual behavior\r\nNew feature\r\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\nfrom .. import Provider as SsnProvider\n\n\ndef checksum(digits):\n \"\"\"\n Returns the checksum of CPF digits.\n References to the algorithm:\n https://pt.wikipedia.org/wiki/Cadastro_de_pessoas_f%C3%ADsicas#Algoritmo\n https://metacpan.org/source/MAMAWE/Algorithm-CheckDigits-v1.3.0/lib/Algorithm/CheckDigits/M11_004.pm\n \"\"\"\n s = 0\n p = len(digits) + 1\n for i in range(0, len(digits)):\n s += digits[i] * p\n p -= 1\n\n reminder = s % 11\n if reminder == 0 or reminder == 1:\n return 0\n else:\n return 11 - reminder\n\n\nclass Provider(SsnProvider):\n \"\"\"\n Provider for Brazilian SSN also known in Brazil as CPF.\n There are two methods Provider.ssn and Provider.cpf\n The snn returns a valid number with numbers only\n The cpf return a valid number formatted with brazilian mask. eg nnn.nnn.nnn-nn\n \"\"\"\n\n def ssn(self):\n digits = self.generator.random.sample(range(10), 9)\n\n dv = checksum(digits)\n digits.append(dv)\n digits.append(checksum(digits))\n\n return ''.join(map(str, digits))\n\n def cpf(self):\n c = self.ssn()\n return c[:3] + '.' + c[3:6] + '.' + c[6:9] + '-' + c[9:]\n", "path": "faker/providers/ssn/pt_BR/__init__.py"}]}
1,080
325
gh_patches_debug_34130
rasdani/github-patches
git_diff
azavea__raster-vision-1560
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve Dataset.from_uris methods When using the `from_uris` methods (such as in `SemanticSegmentationSlidingWindowGeoDataset`), it's easy to forget to pass in an important argument due to the use of kwargs. For example, size and stride are needed, and `label_vector_default_class_id` defaults to None which counterintuitively removes all the vectors. We should fix these and related problems. This issue was originally noted in https://github.com/azavea/raster-vision/pull/1476 </issue> <code> [start of rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py] 1 from typing import TYPE_CHECKING, Dict, Optional 2 from copy import deepcopy 3 4 from rastervision.core.data.vector_transformer import VectorTransformer 5 from rastervision.core.data.vector_transformer.label_maker.filter import ( 6 create_filter) 7 from rastervision.core.data.utils.geojson import features_to_geojson 8 9 if TYPE_CHECKING: 10 from rastervision.core.data import ClassConfig, CRSTransformer 11 12 13 class ClassInferenceTransformer(VectorTransformer): 14 """Infers missing class_ids from GeoJSON features. 15 16 Rules: 17 1) If class_id is in feature['properties'], use it. 18 2) If class_config is set and class_name or label are in 19 feature['properties'] and in class_config, use corresponding 20 class_id. 21 3) If class_id_to_filter is set and filter is true when applied to 22 feature, use corresponding class_id. 23 4) Otherwise, return the default_class_id 24 """ 25 26 def __init__(self, 27 default_class_id: Optional[int], 28 class_config: Optional['ClassConfig'] = None, 29 class_id_to_filter: Optional[Dict[int, list]] = None): 30 self.class_config = class_config 31 self.class_id_to_filter = class_id_to_filter 32 self.default_class_id = default_class_id 33 34 if self.class_id_to_filter is not None: 35 self.class_id_to_filter = {} 36 for class_id, filter_exp in class_id_to_filter.items(): 37 self.class_id_to_filter[int(class_id)] = create_filter( 38 filter_exp) 39 40 @staticmethod 41 def infer_feature_class_id( 42 feature: dict, 43 default_class_id: Optional[int], 44 class_config: Optional['ClassConfig'] = None, 45 class_id_to_filter: Optional[Dict[int, list]] = None 46 ) -> Optional[int]: 47 """Infer the class_id for a GeoJSON feature. 48 49 Rules: 50 1) If class_id is in feature['properties'], use it. 51 2) If class_config is set and class_name or label are in 52 feature['properties'] and in class_config, use corresponding 53 class_id. 54 3) If class_id_to_filter is set and filter is true when applied to 55 feature, use corresponding class_id. 56 4) Otherwise, return the default_class_id. 57 58 Args: 59 feature (dict): GeoJSON feature. 60 61 Returns: 62 Optional[int]: Inferred class ID. 63 """ 64 class_id = feature.get('properties', {}).get('class_id') 65 if class_id is not None: 66 return class_id 67 68 if class_config is not None: 69 class_name = feature.get('properties', {}).get('class_name') 70 if class_name in class_config.names: 71 return class_config.names.index(class_name) 72 73 label = feature.get('properties', {}).get('label') 74 if label in class_config.names: 75 return class_config.names.index(label) 76 77 if class_id_to_filter is not None: 78 for class_id, filter_fn in class_id_to_filter.items(): 79 if filter_fn(feature): 80 return class_id 81 82 return default_class_id 83 84 def transform(self, 85 geojson: dict, 86 crs_transformer: Optional['CRSTransformer'] = None) -> dict: 87 """Add class_id to feature properties and drop features with no class. 88 89 For each feature in geojson, the class_id is inferred and is set into 90 feature['properties']. If the class_id is None (because none of the 91 rules apply and the default_class_id is None), the feature is dropped. 92 """ 93 new_features = [] 94 for feature in geojson['features']: 95 class_id = self.infer_feature_class_id( 96 feature, 97 default_class_id=self.default_class_id, 98 class_config=self.class_config, 99 class_id_to_filter=self.class_id_to_filter) 100 if class_id is not None: 101 feature = deepcopy(feature) 102 properties = feature.get('properties', {}) 103 properties['class_id'] = class_id 104 feature['properties'] = properties 105 new_features.append(feature) 106 new_geojson = features_to_geojson(new_features) 107 return new_geojson 108 [end of rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py b/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py --- a/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py +++ b/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py @@ -1,5 +1,6 @@ from typing import TYPE_CHECKING, Dict, Optional from copy import deepcopy +import logging from rastervision.core.data.vector_transformer import VectorTransformer from rastervision.core.data.vector_transformer.label_maker.filter import ( @@ -9,6 +10,8 @@ if TYPE_CHECKING: from rastervision.core.data import ClassConfig, CRSTransformer +log = logging.getLogger(__name__) + class ClassInferenceTransformer(VectorTransformer): """Infers missing class_ids from GeoJSON features. @@ -91,6 +94,7 @@ rules apply and the default_class_id is None), the feature is dropped. """ new_features = [] + warned = False for feature in geojson['features']: class_id = self.infer_feature_class_id( feature, @@ -103,5 +107,13 @@ properties['class_id'] = class_id feature['properties'] = properties new_features.append(feature) + elif not warned: + log.warning( + 'ClassInferenceTransformer is dropping vector features because ' + 'class_id cannot be inferred. To avoid this behavior, ' + 'set default_class_id to a non-None value in ' + 'ClassInferenceTransformer.') + warned = True + new_geojson = features_to_geojson(new_features) return new_geojson
{"golden_diff": "diff --git a/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py b/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py\n--- a/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py\n+++ b/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py\n@@ -1,5 +1,6 @@\n from typing import TYPE_CHECKING, Dict, Optional\n from copy import deepcopy\n+import logging\n \n from rastervision.core.data.vector_transformer import VectorTransformer\n from rastervision.core.data.vector_transformer.label_maker.filter import (\n@@ -9,6 +10,8 @@\n if TYPE_CHECKING:\n from rastervision.core.data import ClassConfig, CRSTransformer\n \n+log = logging.getLogger(__name__)\n+\n \n class ClassInferenceTransformer(VectorTransformer):\n \"\"\"Infers missing class_ids from GeoJSON features.\n@@ -91,6 +94,7 @@\n rules apply and the default_class_id is None), the feature is dropped.\n \"\"\"\n new_features = []\n+ warned = False\n for feature in geojson['features']:\n class_id = self.infer_feature_class_id(\n feature,\n@@ -103,5 +107,13 @@\n properties['class_id'] = class_id\n feature['properties'] = properties\n new_features.append(feature)\n+ elif not warned:\n+ log.warning(\n+ 'ClassInferenceTransformer is dropping vector features because '\n+ 'class_id cannot be inferred. To avoid this behavior, '\n+ 'set default_class_id to a non-None value in '\n+ 'ClassInferenceTransformer.')\n+ warned = True\n+\n new_geojson = features_to_geojson(new_features)\n return new_geojson\n", "issue": "Improve Dataset.from_uris methods\nWhen using the `from_uris` methods (such as in `SemanticSegmentationSlidingWindowGeoDataset`), it's easy to forget to pass in an important argument due to the use of kwargs. For example, size and stride are needed, and `label_vector_default_class_id` defaults to None which counterintuitively removes all the vectors. We should fix these and related problems.\r\n\r\nThis issue was originally noted in https://github.com/azavea/raster-vision/pull/1476\r\n\r\n\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Dict, Optional\nfrom copy import deepcopy\n\nfrom rastervision.core.data.vector_transformer import VectorTransformer\nfrom rastervision.core.data.vector_transformer.label_maker.filter import (\n create_filter)\nfrom rastervision.core.data.utils.geojson import features_to_geojson\n\nif TYPE_CHECKING:\n from rastervision.core.data import ClassConfig, CRSTransformer\n\n\nclass ClassInferenceTransformer(VectorTransformer):\n \"\"\"Infers missing class_ids from GeoJSON features.\n\n Rules:\n 1) If class_id is in feature['properties'], use it.\n 2) If class_config is set and class_name or label are in\n feature['properties'] and in class_config, use corresponding\n class_id.\n 3) If class_id_to_filter is set and filter is true when applied to\n feature, use corresponding class_id.\n 4) Otherwise, return the default_class_id\n \"\"\"\n\n def __init__(self,\n default_class_id: Optional[int],\n class_config: Optional['ClassConfig'] = None,\n class_id_to_filter: Optional[Dict[int, list]] = None):\n self.class_config = class_config\n self.class_id_to_filter = class_id_to_filter\n self.default_class_id = default_class_id\n\n if self.class_id_to_filter is not None:\n self.class_id_to_filter = {}\n for class_id, filter_exp in class_id_to_filter.items():\n self.class_id_to_filter[int(class_id)] = create_filter(\n filter_exp)\n\n @staticmethod\n def infer_feature_class_id(\n feature: dict,\n default_class_id: Optional[int],\n class_config: Optional['ClassConfig'] = None,\n class_id_to_filter: Optional[Dict[int, list]] = None\n ) -> Optional[int]:\n \"\"\"Infer the class_id for a GeoJSON feature.\n\n Rules:\n 1) If class_id is in feature['properties'], use it.\n 2) If class_config is set and class_name or label are in\n feature['properties'] and in class_config, use corresponding\n class_id.\n 3) If class_id_to_filter is set and filter is true when applied to\n feature, use corresponding class_id.\n 4) Otherwise, return the default_class_id.\n\n Args:\n feature (dict): GeoJSON feature.\n\n Returns:\n Optional[int]: Inferred class ID.\n \"\"\"\n class_id = feature.get('properties', {}).get('class_id')\n if class_id is not None:\n return class_id\n\n if class_config is not None:\n class_name = feature.get('properties', {}).get('class_name')\n if class_name in class_config.names:\n return class_config.names.index(class_name)\n\n label = feature.get('properties', {}).get('label')\n if label in class_config.names:\n return class_config.names.index(label)\n\n if class_id_to_filter is not None:\n for class_id, filter_fn in class_id_to_filter.items():\n if filter_fn(feature):\n return class_id\n\n return default_class_id\n\n def transform(self,\n geojson: dict,\n crs_transformer: Optional['CRSTransformer'] = None) -> dict:\n \"\"\"Add class_id to feature properties and drop features with no class.\n\n For each feature in geojson, the class_id is inferred and is set into\n feature['properties']. If the class_id is None (because none of the\n rules apply and the default_class_id is None), the feature is dropped.\n \"\"\"\n new_features = []\n for feature in geojson['features']:\n class_id = self.infer_feature_class_id(\n feature,\n default_class_id=self.default_class_id,\n class_config=self.class_config,\n class_id_to_filter=self.class_id_to_filter)\n if class_id is not None:\n feature = deepcopy(feature)\n properties = feature.get('properties', {})\n properties['class_id'] = class_id\n feature['properties'] = properties\n new_features.append(feature)\n new_geojson = features_to_geojson(new_features)\n return new_geojson\n", "path": "rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py"}]}
1,786
405
gh_patches_debug_1246
rasdani/github-patches
git_diff
getsentry__sentry-15491
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Simple typo in the compact docstring for utils.functional ## Important Details How are you running Sentry? * [ ] On-Premise docker [Version xyz] * [ ] Saas (sentry.io) * [x] Other [briefly describe your environment] Observed documentation - not running sentry. ## Description Simple typo should be values rather than valules. ## Steps to Reproduce 1. Observe docstring in utils.functional.compact method ### What you expected to happen Should be values rather than valules. ### Possible Solution Replace valules with values. </issue> <code> [start of src/sentry/utils/functional.py] 1 from __future__ import absolute_import 2 3 import six 4 5 from django.utils.functional import empty 6 7 8 def extract_lazy_object(lo): 9 """ 10 Unwrap a LazyObject and return the inner object. Whatever that may be. 11 12 ProTip: This is relying on `django.utils.functional.empty`, which may 13 or may not be removed in the future. It's 100% undocumented. 14 """ 15 if not hasattr(lo, "_wrapped"): 16 return lo 17 if lo._wrapped is empty: 18 lo._setup() 19 return lo._wrapped 20 21 22 def apply_values(function, mapping): 23 """\ 24 Applies ``function`` to a sequence containing all of the values in the 25 provided mapping, returing a new mapping with the values replaced with 26 the results of the provided function. 27 28 >>> apply_values( 29 ... lambda values: map(u'{} fish'.format, values), 30 ... {1: 'red', 2: 'blue'}, 31 ... ) 32 {1: u'red fish', 2: u'blue fish'} 33 """ 34 if not mapping: 35 return {} 36 37 keys, values = zip(*mapping.items()) 38 return dict(zip(keys, function(values))) 39 40 41 def compact(seq): 42 """ 43 Removes ``None`` values from various sequence-based data structures. 44 45 dict: 46 Removes keys with a corresponding ``None`` value. 47 48 list: 49 Removes ``None`` valules. 50 51 >>> compact({'foo': 'bar', 'baz': None}) 52 {'foo': 'bar'} 53 54 >>> compact([1, None, 2]) 55 [1, 2] 56 """ 57 if isinstance(seq, dict): 58 return {k: v for k, v in six.iteritems(seq) if v is not None} 59 60 elif isinstance(seq, list): 61 return [k for k in seq if k is not None] 62 [end of src/sentry/utils/functional.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/sentry/utils/functional.py b/src/sentry/utils/functional.py --- a/src/sentry/utils/functional.py +++ b/src/sentry/utils/functional.py @@ -46,7 +46,7 @@ Removes keys with a corresponding ``None`` value. list: - Removes ``None`` valules. + Removes ``None`` values. >>> compact({'foo': 'bar', 'baz': None}) {'foo': 'bar'}
{"golden_diff": "diff --git a/src/sentry/utils/functional.py b/src/sentry/utils/functional.py\n--- a/src/sentry/utils/functional.py\n+++ b/src/sentry/utils/functional.py\n@@ -46,7 +46,7 @@\n Removes keys with a corresponding ``None`` value.\n \n list:\n- Removes ``None`` valules.\n+ Removes ``None`` values.\n \n >>> compact({'foo': 'bar', 'baz': None})\n {'foo': 'bar'}\n", "issue": "Simple typo in the compact docstring for utils.functional\n## Important Details\r\n\r\nHow are you running Sentry?\r\n\r\n* [ ] On-Premise docker [Version xyz]\r\n* [ ] Saas (sentry.io)\r\n* [x] Other [briefly describe your environment]\r\nObserved documentation - not running sentry.\r\n\r\n## Description\r\n\r\nSimple typo should be values rather than valules.\r\n\r\n## Steps to Reproduce\r\n\r\n1. Observe docstring in utils.functional.compact method\r\n\r\n### What you expected to happen\r\n\r\nShould be values rather than valules.\r\n\r\n### Possible Solution\r\n\r\nReplace valules with values.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport six\n\nfrom django.utils.functional import empty\n\n\ndef extract_lazy_object(lo):\n \"\"\"\n Unwrap a LazyObject and return the inner object. Whatever that may be.\n\n ProTip: This is relying on `django.utils.functional.empty`, which may\n or may not be removed in the future. It's 100% undocumented.\n \"\"\"\n if not hasattr(lo, \"_wrapped\"):\n return lo\n if lo._wrapped is empty:\n lo._setup()\n return lo._wrapped\n\n\ndef apply_values(function, mapping):\n \"\"\"\\\n Applies ``function`` to a sequence containing all of the values in the\n provided mapping, returing a new mapping with the values replaced with\n the results of the provided function.\n\n >>> apply_values(\n ... lambda values: map(u'{} fish'.format, values),\n ... {1: 'red', 2: 'blue'},\n ... )\n {1: u'red fish', 2: u'blue fish'}\n \"\"\"\n if not mapping:\n return {}\n\n keys, values = zip(*mapping.items())\n return dict(zip(keys, function(values)))\n\n\ndef compact(seq):\n \"\"\"\n Removes ``None`` values from various sequence-based data structures.\n\n dict:\n Removes keys with a corresponding ``None`` value.\n\n list:\n Removes ``None`` valules.\n\n >>> compact({'foo': 'bar', 'baz': None})\n {'foo': 'bar'}\n\n >>> compact([1, None, 2])\n [1, 2]\n \"\"\"\n if isinstance(seq, dict):\n return {k: v for k, v in six.iteritems(seq) if v is not None}\n\n elif isinstance(seq, list):\n return [k for k in seq if k is not None]\n", "path": "src/sentry/utils/functional.py"}]}
1,176
107
gh_patches_debug_65703
rasdani/github-patches
git_diff
carpentries__amy-1793
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: assignment form queryset may return duplicate results Introduced in v2.16, AssignmentForm contains a queryset that may yield duplicate results due to the filtering used. </issue> <code> [start of amy/dashboard/forms.py] 1 from django import forms 2 from django.core.exceptions import ValidationError 3 from django.db.models import Q 4 from django_countries.fields import CountryField 5 6 from workshops.models import ( 7 Language, 8 GenderMixin, 9 Person, 10 TrainingProgress, 11 TrainingRequirement, 12 ) 13 14 from workshops.forms import BootstrapHelper 15 # this is used instead of Django Autocomplete Light widgets 16 # see issue #1330: https://github.com/swcarpentry/amy/issues/1330 17 from workshops.fields import ( 18 Select2Widget, 19 ModelSelect2MultipleWidget, 20 RadioSelectWithOther, 21 ) 22 23 24 class AssignmentForm(forms.Form): 25 assigned_to = forms.ModelChoiceField( 26 label="Assigned to:", 27 required=False, 28 queryset=Person.objects.filter( 29 Q(is_superuser=True) | Q(groups__name="administrators") 30 ), 31 widget=Select2Widget(), 32 ) 33 helper = BootstrapHelper( 34 add_submit_button=False, 35 add_cancel_button=False, 36 wider_labels=True, 37 use_get_method=True, 38 form_id="assignment-form" 39 ) 40 41 42 class AutoUpdateProfileForm(forms.ModelForm): 43 username = forms.CharField(disabled=True, required=False) 44 email = forms.CharField( 45 disabled=True, required=False, 46 label=Person._meta.get_field('email').verbose_name, 47 help_text=Person._meta.get_field('email').help_text, 48 ) 49 github = forms.CharField( 50 disabled=True, required=False, 51 help_text='If you want to change your github username, please email ' 52 'us at <a href="mailto:[email protected]">' 53 '[email protected]</a>.') 54 55 country = CountryField().formfield( 56 required=False, 57 help_text='Your country of residence.', 58 widget=Select2Widget, 59 ) 60 61 languages = forms.ModelMultipleChoiceField( 62 label='Languages', 63 required=False, 64 queryset=Language.objects.all(), 65 widget=ModelSelect2MultipleWidget(data_view='language-lookup') 66 ) 67 68 helper = BootstrapHelper(add_cancel_button=False) 69 70 class Meta: 71 model = Person 72 fields = [ 73 'personal', 74 'middle', 75 'family', 76 'email', 77 'secondary_email', 78 'gender', 79 'gender_other', 80 'may_contact', 81 'publish_profile', 82 'lesson_publication_consent', 83 'country', 84 'airport', 85 'github', 86 'twitter', 87 'url', 88 'username', 89 'affiliation', 90 'domains', 91 'lessons', 92 'languages', 93 'occupation', 94 'orcid', 95 ] 96 readonly_fields = ( 97 'username', 98 'github', 99 ) 100 widgets = { 101 'gender': RadioSelectWithOther('gender_other'), 102 'domains': forms.CheckboxSelectMultiple(), 103 'lessons': forms.CheckboxSelectMultiple(), 104 'airport': Select2Widget, 105 } 106 107 def __init__(self, *args, **kwargs): 108 super().__init__(*args, **kwargs) 109 110 # set up a layout object for the helper 111 self.helper.layout = self.helper.build_default_layout(self) 112 113 # set up `*WithOther` widgets so that they can display additional 114 # fields inline 115 self['gender'].field.widget.other_field = self['gender_other'] 116 117 # remove additional fields 118 self.helper.layout.fields.remove('gender_other') 119 120 def clean(self): 121 super().clean() 122 errors = dict() 123 124 # 1: require "other gender" field if "other" was selected in 125 # "gender" field 126 gender = self.cleaned_data.get('gender', '') 127 gender_other = self.cleaned_data.get('gender_other', '') 128 if gender == GenderMixin.OTHER and not gender_other: 129 errors['gender'] = ValidationError("This field is required.") 130 elif gender != GenderMixin.OTHER and gender_other: 131 errors['gender'] = ValidationError( 132 'If you entered data in "Other" field, please select that ' 133 "option.") 134 135 # raise errors if any present 136 if errors: 137 raise ValidationError(errors) 138 139 140 class SendHomeworkForm(forms.ModelForm): 141 url = forms.URLField(label='URL') 142 requirement = forms.ModelChoiceField( 143 queryset=TrainingRequirement.objects.filter(name__endswith="Homework"), 144 label="Type", required=True, 145 ) 146 147 helper = BootstrapHelper(add_cancel_button=False) 148 149 class Meta: 150 model = TrainingProgress 151 fields = [ 152 'requirement', 153 'url', 154 ] 155 156 157 class SearchForm(forms.Form): 158 """Represent general searching form.""" 159 160 term = forms.CharField(label="Term", max_length=100) 161 no_redirect = forms.BooleanField(required=False, initial=False, widget=forms.HiddenInput) 162 helper = BootstrapHelper(add_cancel_button=False, use_get_method=True) 163 [end of amy/dashboard/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/amy/dashboard/forms.py b/amy/dashboard/forms.py --- a/amy/dashboard/forms.py +++ b/amy/dashboard/forms.py @@ -27,7 +27,7 @@ required=False, queryset=Person.objects.filter( Q(is_superuser=True) | Q(groups__name="administrators") - ), + ).distinct(), widget=Select2Widget(), ) helper = BootstrapHelper(
{"golden_diff": "diff --git a/amy/dashboard/forms.py b/amy/dashboard/forms.py\n--- a/amy/dashboard/forms.py\n+++ b/amy/dashboard/forms.py\n@@ -27,7 +27,7 @@\n required=False,\n queryset=Person.objects.filter(\n Q(is_superuser=True) | Q(groups__name=\"administrators\")\n- ),\n+ ).distinct(),\n widget=Select2Widget(),\n )\n helper = BootstrapHelper(\n", "issue": "Bug: assignment form queryset may return duplicate results\nIntroduced in v2.16, AssignmentForm contains a queryset that may yield duplicate results due to the filtering used.\n", "before_files": [{"content": "from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django_countries.fields import CountryField\n\nfrom workshops.models import (\n Language,\n GenderMixin,\n Person,\n TrainingProgress,\n TrainingRequirement,\n)\n\nfrom workshops.forms import BootstrapHelper\n# this is used instead of Django Autocomplete Light widgets\n# see issue #1330: https://github.com/swcarpentry/amy/issues/1330\nfrom workshops.fields import (\n Select2Widget,\n ModelSelect2MultipleWidget,\n RadioSelectWithOther,\n)\n\n\nclass AssignmentForm(forms.Form):\n assigned_to = forms.ModelChoiceField(\n label=\"Assigned to:\",\n required=False,\n queryset=Person.objects.filter(\n Q(is_superuser=True) | Q(groups__name=\"administrators\")\n ),\n widget=Select2Widget(),\n )\n helper = BootstrapHelper(\n add_submit_button=False,\n add_cancel_button=False,\n wider_labels=True,\n use_get_method=True,\n form_id=\"assignment-form\"\n )\n\n\nclass AutoUpdateProfileForm(forms.ModelForm):\n username = forms.CharField(disabled=True, required=False)\n email = forms.CharField(\n disabled=True, required=False,\n label=Person._meta.get_field('email').verbose_name,\n help_text=Person._meta.get_field('email').help_text,\n )\n github = forms.CharField(\n disabled=True, required=False,\n help_text='If you want to change your github username, please email '\n 'us at <a href=\"mailto:[email protected]\">'\n '[email protected]</a>.')\n\n country = CountryField().formfield(\n required=False,\n help_text='Your country of residence.',\n widget=Select2Widget,\n )\n\n languages = forms.ModelMultipleChoiceField(\n label='Languages',\n required=False,\n queryset=Language.objects.all(),\n widget=ModelSelect2MultipleWidget(data_view='language-lookup')\n )\n\n helper = BootstrapHelper(add_cancel_button=False)\n\n class Meta:\n model = Person\n fields = [\n 'personal',\n 'middle',\n 'family',\n 'email',\n 'secondary_email',\n 'gender',\n 'gender_other',\n 'may_contact',\n 'publish_profile',\n 'lesson_publication_consent',\n 'country',\n 'airport',\n 'github',\n 'twitter',\n 'url',\n 'username',\n 'affiliation',\n 'domains',\n 'lessons',\n 'languages',\n 'occupation',\n 'orcid',\n ]\n readonly_fields = (\n 'username',\n 'github',\n )\n widgets = {\n 'gender': RadioSelectWithOther('gender_other'),\n 'domains': forms.CheckboxSelectMultiple(),\n 'lessons': forms.CheckboxSelectMultiple(),\n 'airport': Select2Widget,\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # set up a layout object for the helper\n self.helper.layout = self.helper.build_default_layout(self)\n\n # set up `*WithOther` widgets so that they can display additional\n # fields inline\n self['gender'].field.widget.other_field = self['gender_other']\n\n # remove additional fields\n self.helper.layout.fields.remove('gender_other')\n\n def clean(self):\n super().clean()\n errors = dict()\n\n # 1: require \"other gender\" field if \"other\" was selected in\n # \"gender\" field\n gender = self.cleaned_data.get('gender', '')\n gender_other = self.cleaned_data.get('gender_other', '')\n if gender == GenderMixin.OTHER and not gender_other:\n errors['gender'] = ValidationError(\"This field is required.\")\n elif gender != GenderMixin.OTHER and gender_other:\n errors['gender'] = ValidationError(\n 'If you entered data in \"Other\" field, please select that '\n \"option.\")\n\n # raise errors if any present\n if errors:\n raise ValidationError(errors)\n\n\nclass SendHomeworkForm(forms.ModelForm):\n url = forms.URLField(label='URL')\n requirement = forms.ModelChoiceField(\n queryset=TrainingRequirement.objects.filter(name__endswith=\"Homework\"),\n label=\"Type\", required=True,\n )\n\n helper = BootstrapHelper(add_cancel_button=False)\n\n class Meta:\n model = TrainingProgress\n fields = [\n 'requirement',\n 'url',\n ]\n\n\nclass SearchForm(forms.Form):\n \"\"\"Represent general searching form.\"\"\"\n\n term = forms.CharField(label=\"Term\", max_length=100)\n no_redirect = forms.BooleanField(required=False, initial=False, widget=forms.HiddenInput)\n helper = BootstrapHelper(add_cancel_button=False, use_get_method=True)\n", "path": "amy/dashboard/forms.py"}]}
1,962
94
gh_patches_debug_201
rasdani/github-patches
git_diff
blaze__blaze-475
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Make blaze.test() return True or False @asmeurer suggests this. Currently we're passing through pytest.main() which is like the error code from command line programs. <!--- @huboard:{"order":398.859375,"milestone_order":452,"custom_state":""} --> </issue> <code> [start of blaze/__init__.py] 1 from __future__ import absolute_import, division, print_function 2 3 import logging 4 5 from dynd import nd 6 from pandas import DataFrame 7 import h5py 8 9 from multipledispatch import halt_ordering, restart_ordering 10 11 halt_ordering() # Turn off multipledispatch ordering 12 13 from .expr import * 14 from .expr.functions import * 15 from .api import * 16 from .data.csv import * 17 from .data.json import * 18 from .data.hdf5 import * 19 from .compute.python import * 20 from .data.meta import * 21 from .compute.pandas import * 22 from .compute.numpy import * 23 from .compute.core import * 24 from .compute.core import compute 25 from .sql import * 26 27 try: 28 from .spark import * 29 except ImportError: 30 pass 31 try: 32 from .compute.pytables import * 33 except ImportError: 34 pass 35 try: 36 from .compute.chunks import * 37 except ImportError: 38 pass 39 try: 40 from .bcolz import * 41 except ImportError: 42 pass 43 try: 44 from .mongo import * 45 except ImportError: 46 pass 47 48 restart_ordering() # Restart multipledispatch ordering and do ordering 49 50 logging.basicConfig() 51 logger = logging.getLogger(__name__) 52 logger.setLevel(logging.WARNING) 53 54 55 inf = float('inf') 56 nan = float('nan') 57 58 __version__ = '0.6.1' 59 60 # If IPython is already loaded, register the Blaze catalog magic 61 # from . import catalog 62 # import sys 63 # if 'IPython' in sys.modules: 64 # catalog.register_ipy_magic() 65 # del sys 66 67 def print_versions(): 68 """Print all the versions of software that Blaze relies on.""" 69 import sys, platform 70 import numpy as np 71 import dynd 72 import datashape 73 print("-=" * 38) 74 print("Blaze version: %s" % __version__) 75 print("Datashape version: %s" % datashape.__version__) 76 print("NumPy version: %s" % np.__version__) 77 print("DyND version: %s / LibDyND %s" % 78 (dynd.__version__, dynd.__libdynd_version__)) 79 print("Python version: %s" % sys.version) 80 (sysname, nodename, release, version, machine, processor) = \ 81 platform.uname() 82 print("Platform: %s-%s-%s (%s)" % (sysname, release, machine, version)) 83 if sysname == "Linux": 84 print("Linux dist: %s" % " ".join(platform.linux_distribution()[:-1])) 85 if not processor: 86 processor = "not recognized" 87 print("Processor: %s" % processor) 88 print("Byte-ordering: %s" % sys.byteorder) 89 print("-=" * 38) 90 91 92 def test(verbose=False, junitfile=None, exit=False): 93 """ 94 Runs the full Blaze test suite, outputting 95 the results of the tests to sys.stdout. 96 97 This uses py.test to discover which tests to 98 run, and runs tests in any 'tests' subdirectory 99 within the Blaze module. 100 101 Parameters 102 ---------- 103 verbose : int, optional 104 Value 0 prints very little, 1 prints a little bit, 105 and 2 prints the test names while testing. 106 junitfile : string, optional 107 If provided, writes the test results to an junit xml 108 style xml file. This is useful for running the tests 109 in a CI server such as Jenkins. 110 exit : bool, optional 111 If True, the function will call sys.exit with an 112 error code after the tests are finished. 113 """ 114 import os 115 import sys 116 import pytest 117 118 args = [] 119 120 if verbose: 121 args.append('--verbose') 122 123 # Output an xunit file if requested 124 if junitfile is not None: 125 args.append('--junit-xml=%s' % junitfile) 126 127 # Add all 'tests' subdirectories to the options 128 rootdir = os.path.dirname(__file__) 129 for root, dirs, files in os.walk(rootdir): 130 if 'tests' in dirs: 131 testsdir = os.path.join(root, 'tests') 132 args.append(testsdir) 133 print('Test dir: %s' % testsdir[len(rootdir) + 1:]) 134 # print versions (handy when reporting problems) 135 print_versions() 136 sys.stdout.flush() 137 138 # Ask pytest to do its thing 139 error_code = pytest.main(args=args) 140 if exit: 141 return sys.exit(error_code) 142 return error_code 143 [end of blaze/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/blaze/__init__.py b/blaze/__init__.py --- a/blaze/__init__.py +++ b/blaze/__init__.py @@ -139,4 +139,4 @@ error_code = pytest.main(args=args) if exit: return sys.exit(error_code) - return error_code + return error_code == 0
{"golden_diff": "diff --git a/blaze/__init__.py b/blaze/__init__.py\n--- a/blaze/__init__.py\n+++ b/blaze/__init__.py\n@@ -139,4 +139,4 @@\n error_code = pytest.main(args=args)\n if exit:\n return sys.exit(error_code)\n- return error_code\n+ return error_code == 0\n", "issue": "Make blaze.test() return True or False\n@asmeurer suggests this. Currently we're passing through pytest.main() which is like the error code from command line programs.\n\n<!---\n@huboard:{\"order\":398.859375,\"milestone_order\":452,\"custom_state\":\"\"}\n-->\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport logging\n\nfrom dynd import nd\nfrom pandas import DataFrame\nimport h5py\n\nfrom multipledispatch import halt_ordering, restart_ordering\n\nhalt_ordering() # Turn off multipledispatch ordering\n\nfrom .expr import *\nfrom .expr.functions import *\nfrom .api import *\nfrom .data.csv import *\nfrom .data.json import *\nfrom .data.hdf5 import *\nfrom .compute.python import *\nfrom .data.meta import *\nfrom .compute.pandas import *\nfrom .compute.numpy import *\nfrom .compute.core import *\nfrom .compute.core import compute\nfrom .sql import *\n\ntry:\n from .spark import *\nexcept ImportError:\n pass\ntry:\n from .compute.pytables import *\nexcept ImportError:\n pass\ntry:\n from .compute.chunks import *\nexcept ImportError:\n pass\ntry:\n from .bcolz import *\nexcept ImportError:\n pass\ntry:\n from .mongo import *\nexcept ImportError:\n pass\n\nrestart_ordering() # Restart multipledispatch ordering and do ordering\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.WARNING)\n\n\ninf = float('inf')\nnan = float('nan')\n\n__version__ = '0.6.1'\n\n# If IPython is already loaded, register the Blaze catalog magic\n# from . import catalog\n# import sys\n# if 'IPython' in sys.modules:\n# catalog.register_ipy_magic()\n# del sys\n\ndef print_versions():\n \"\"\"Print all the versions of software that Blaze relies on.\"\"\"\n import sys, platform\n import numpy as np\n import dynd\n import datashape\n print(\"-=\" * 38)\n print(\"Blaze version: %s\" % __version__)\n print(\"Datashape version: %s\" % datashape.__version__)\n print(\"NumPy version: %s\" % np.__version__)\n print(\"DyND version: %s / LibDyND %s\" %\n (dynd.__version__, dynd.__libdynd_version__))\n print(\"Python version: %s\" % sys.version)\n (sysname, nodename, release, version, machine, processor) = \\\n platform.uname()\n print(\"Platform: %s-%s-%s (%s)\" % (sysname, release, machine, version))\n if sysname == \"Linux\":\n print(\"Linux dist: %s\" % \" \".join(platform.linux_distribution()[:-1]))\n if not processor:\n processor = \"not recognized\"\n print(\"Processor: %s\" % processor)\n print(\"Byte-ordering: %s\" % sys.byteorder)\n print(\"-=\" * 38)\n\n\ndef test(verbose=False, junitfile=None, exit=False):\n \"\"\"\n Runs the full Blaze test suite, outputting\n the results of the tests to sys.stdout.\n\n This uses py.test to discover which tests to\n run, and runs tests in any 'tests' subdirectory\n within the Blaze module.\n\n Parameters\n ----------\n verbose : int, optional\n Value 0 prints very little, 1 prints a little bit,\n and 2 prints the test names while testing.\n junitfile : string, optional\n If provided, writes the test results to an junit xml\n style xml file. This is useful for running the tests\n in a CI server such as Jenkins.\n exit : bool, optional\n If True, the function will call sys.exit with an\n error code after the tests are finished.\n \"\"\"\n import os\n import sys\n import pytest\n\n args = []\n\n if verbose:\n args.append('--verbose')\n\n # Output an xunit file if requested\n if junitfile is not None:\n args.append('--junit-xml=%s' % junitfile)\n\n # Add all 'tests' subdirectories to the options\n rootdir = os.path.dirname(__file__)\n for root, dirs, files in os.walk(rootdir):\n if 'tests' in dirs:\n testsdir = os.path.join(root, 'tests')\n args.append(testsdir)\n print('Test dir: %s' % testsdir[len(rootdir) + 1:])\n # print versions (handy when reporting problems)\n print_versions()\n sys.stdout.flush()\n\n # Ask pytest to do its thing\n error_code = pytest.main(args=args)\n if exit:\n return sys.exit(error_code)\n return error_code\n", "path": "blaze/__init__.py"}]}
1,902
86
gh_patches_debug_30662
rasdani/github-patches
git_diff
goauthentik__authentik-4428
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> System Tasks: Show start timestamp and calculate Duration **Is your feature request related to a problem? Please describe.** For debugging purposes, I need the info when a task started, and when it finished. **Describe the solution you'd like** I have seen that the TaskInfo object actually holds that information, but it is not returned by the API, and not shown in the "SystemTasks" table of the web UI. It would also make sense to calculate the duration for easier debugging. **Describe alternatives you've considered** I could look this up in the database, but this would be questionable UX, since there is already a view in the web app which should show this information. **Additional context** (none) </issue> <code> [start of authentik/admin/api/tasks.py] 1 """Tasks API""" 2 from importlib import import_module 3 4 from django.contrib import messages 5 from django.http.response import Http404 6 from django.utils.translation import gettext_lazy as _ 7 from drf_spectacular.types import OpenApiTypes 8 from drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema 9 from rest_framework.decorators import action 10 from rest_framework.fields import CharField, ChoiceField, DateTimeField, ListField 11 from rest_framework.permissions import IsAdminUser 12 from rest_framework.request import Request 13 from rest_framework.response import Response 14 from rest_framework.viewsets import ViewSet 15 from structlog.stdlib import get_logger 16 17 from authentik.core.api.utils import PassiveSerializer 18 from authentik.events.monitored_tasks import TaskInfo, TaskResultStatus 19 20 LOGGER = get_logger() 21 22 23 class TaskSerializer(PassiveSerializer): 24 """Serialize TaskInfo and TaskResult""" 25 26 task_name = CharField() 27 task_description = CharField() 28 task_finish_timestamp = DateTimeField(source="finish_time") 29 30 status = ChoiceField( 31 source="result.status.name", 32 choices=[(x.name, x.name) for x in TaskResultStatus], 33 ) 34 messages = ListField(source="result.messages") 35 36 def to_representation(self, instance): 37 """When a new version of authentik adds fields to TaskInfo, 38 the API will fail with an AttributeError, as the classes 39 are pickled in cache. In that case, just delete the info""" 40 try: 41 return super().to_representation(instance) 42 except AttributeError: # pragma: no cover 43 if isinstance(self.instance, list): 44 for inst in self.instance: 45 inst.delete() 46 else: 47 self.instance.delete() 48 return {} 49 50 51 class TaskViewSet(ViewSet): 52 """Read-only view set that returns all background tasks""" 53 54 permission_classes = [IsAdminUser] 55 serializer_class = TaskSerializer 56 57 @extend_schema( 58 responses={ 59 200: TaskSerializer(many=False), 60 404: OpenApiResponse(description="Task not found"), 61 }, 62 parameters=[ 63 OpenApiParameter( 64 "id", 65 type=OpenApiTypes.STR, 66 location=OpenApiParameter.PATH, 67 required=True, 68 ), 69 ], 70 ) 71 # pylint: disable=invalid-name 72 def retrieve(self, request: Request, pk=None) -> Response: 73 """Get a single system task""" 74 task = TaskInfo.by_name(pk) 75 if not task: 76 raise Http404 77 return Response(TaskSerializer(task, many=False).data) 78 79 @extend_schema(responses={200: TaskSerializer(many=True)}) 80 def list(self, request: Request) -> Response: 81 """List system tasks""" 82 tasks = sorted(TaskInfo.all().values(), key=lambda task: task.task_name) 83 return Response(TaskSerializer(tasks, many=True).data) 84 85 @extend_schema( 86 request=OpenApiTypes.NONE, 87 responses={ 88 204: OpenApiResponse(description="Task retried successfully"), 89 404: OpenApiResponse(description="Task not found"), 90 500: OpenApiResponse(description="Failed to retry task"), 91 }, 92 parameters=[ 93 OpenApiParameter( 94 "id", 95 type=OpenApiTypes.STR, 96 location=OpenApiParameter.PATH, 97 required=True, 98 ), 99 ], 100 ) 101 @action(detail=True, methods=["post"]) 102 # pylint: disable=invalid-name 103 def retry(self, request: Request, pk=None) -> Response: 104 """Retry task""" 105 task = TaskInfo.by_name(pk) 106 if not task: 107 raise Http404 108 try: 109 task_module = import_module(task.task_call_module) 110 task_func = getattr(task_module, task.task_call_func) 111 LOGGER.debug("Running task", task=task_func) 112 task_func.delay(*task.task_call_args, **task.task_call_kwargs) 113 messages.success( 114 self.request, 115 _("Successfully re-scheduled Task %(name)s!" % {"name": task.task_name}), 116 ) 117 return Response(status=204) 118 except (ImportError, AttributeError): # pragma: no cover 119 LOGGER.warning("Failed to run task, remove state", task=task) 120 # if we get an import error, the module path has probably changed 121 task.delete() 122 return Response(status=500) 123 [end of authentik/admin/api/tasks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/authentik/admin/api/tasks.py b/authentik/admin/api/tasks.py --- a/authentik/admin/api/tasks.py +++ b/authentik/admin/api/tasks.py @@ -7,7 +7,13 @@ from drf_spectacular.types import OpenApiTypes from drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema from rest_framework.decorators import action -from rest_framework.fields import CharField, ChoiceField, DateTimeField, ListField +from rest_framework.fields import ( + CharField, + ChoiceField, + DateTimeField, + ListField, + SerializerMethodField, +) from rest_framework.permissions import IsAdminUser from rest_framework.request import Request from rest_framework.response import Response @@ -26,6 +32,7 @@ task_name = CharField() task_description = CharField() task_finish_timestamp = DateTimeField(source="finish_time") + task_duration = SerializerMethodField() status = ChoiceField( source="result.status.name", @@ -33,7 +40,11 @@ ) messages = ListField(source="result.messages") - def to_representation(self, instance): + def get_task_duration(self, instance: TaskInfo) -> int: + """Get the duration a task took to run""" + return max(instance.finish_timestamp - instance.start_timestamp, 0) + + def to_representation(self, instance: TaskInfo): """When a new version of authentik adds fields to TaskInfo, the API will fail with an AttributeError, as the classes are pickled in cache. In that case, just delete the info"""
{"golden_diff": "diff --git a/authentik/admin/api/tasks.py b/authentik/admin/api/tasks.py\n--- a/authentik/admin/api/tasks.py\n+++ b/authentik/admin/api/tasks.py\n@@ -7,7 +7,13 @@\n from drf_spectacular.types import OpenApiTypes\n from drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema\n from rest_framework.decorators import action\n-from rest_framework.fields import CharField, ChoiceField, DateTimeField, ListField\n+from rest_framework.fields import (\n+ CharField,\n+ ChoiceField,\n+ DateTimeField,\n+ ListField,\n+ SerializerMethodField,\n+)\n from rest_framework.permissions import IsAdminUser\n from rest_framework.request import Request\n from rest_framework.response import Response\n@@ -26,6 +32,7 @@\n task_name = CharField()\n task_description = CharField()\n task_finish_timestamp = DateTimeField(source=\"finish_time\")\n+ task_duration = SerializerMethodField()\n \n status = ChoiceField(\n source=\"result.status.name\",\n@@ -33,7 +40,11 @@\n )\n messages = ListField(source=\"result.messages\")\n \n- def to_representation(self, instance):\n+ def get_task_duration(self, instance: TaskInfo) -> int:\n+ \"\"\"Get the duration a task took to run\"\"\"\n+ return max(instance.finish_timestamp - instance.start_timestamp, 0)\n+\n+ def to_representation(self, instance: TaskInfo):\n \"\"\"When a new version of authentik adds fields to TaskInfo,\n the API will fail with an AttributeError, as the classes\n are pickled in cache. In that case, just delete the info\"\"\"\n", "issue": "System Tasks: Show start timestamp and calculate Duration\n**Is your feature request related to a problem? Please describe.**\r\nFor debugging purposes, I need the info when a task started, and when it finished. \r\n\r\n**Describe the solution you'd like**\r\nI have seen that the TaskInfo object actually holds that information, but it is not returned by the API, and not shown in the \"SystemTasks\" table of the web UI. \r\nIt would also make sense to calculate the duration for easier debugging.\r\n\r\n**Describe alternatives you've considered**\r\nI could look this up in the database, but this would be questionable UX, since there is already a view in the web app which should show this information.\r\n\r\n**Additional context**\r\n(none)\r\n\n", "before_files": [{"content": "\"\"\"Tasks API\"\"\"\nfrom importlib import import_module\n\nfrom django.contrib import messages\nfrom django.http.response import Http404\nfrom django.utils.translation import gettext_lazy as _\nfrom drf_spectacular.types import OpenApiTypes\nfrom drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema\nfrom rest_framework.decorators import action\nfrom rest_framework.fields import CharField, ChoiceField, DateTimeField, ListField\nfrom rest_framework.permissions import IsAdminUser\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ViewSet\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.api.utils import PassiveSerializer\nfrom authentik.events.monitored_tasks import TaskInfo, TaskResultStatus\n\nLOGGER = get_logger()\n\n\nclass TaskSerializer(PassiveSerializer):\n \"\"\"Serialize TaskInfo and TaskResult\"\"\"\n\n task_name = CharField()\n task_description = CharField()\n task_finish_timestamp = DateTimeField(source=\"finish_time\")\n\n status = ChoiceField(\n source=\"result.status.name\",\n choices=[(x.name, x.name) for x in TaskResultStatus],\n )\n messages = ListField(source=\"result.messages\")\n\n def to_representation(self, instance):\n \"\"\"When a new version of authentik adds fields to TaskInfo,\n the API will fail with an AttributeError, as the classes\n are pickled in cache. In that case, just delete the info\"\"\"\n try:\n return super().to_representation(instance)\n except AttributeError: # pragma: no cover\n if isinstance(self.instance, list):\n for inst in self.instance:\n inst.delete()\n else:\n self.instance.delete()\n return {}\n\n\nclass TaskViewSet(ViewSet):\n \"\"\"Read-only view set that returns all background tasks\"\"\"\n\n permission_classes = [IsAdminUser]\n serializer_class = TaskSerializer\n\n @extend_schema(\n responses={\n 200: TaskSerializer(many=False),\n 404: OpenApiResponse(description=\"Task not found\"),\n },\n parameters=[\n OpenApiParameter(\n \"id\",\n type=OpenApiTypes.STR,\n location=OpenApiParameter.PATH,\n required=True,\n ),\n ],\n )\n # pylint: disable=invalid-name\n def retrieve(self, request: Request, pk=None) -> Response:\n \"\"\"Get a single system task\"\"\"\n task = TaskInfo.by_name(pk)\n if not task:\n raise Http404\n return Response(TaskSerializer(task, many=False).data)\n\n @extend_schema(responses={200: TaskSerializer(many=True)})\n def list(self, request: Request) -> Response:\n \"\"\"List system tasks\"\"\"\n tasks = sorted(TaskInfo.all().values(), key=lambda task: task.task_name)\n return Response(TaskSerializer(tasks, many=True).data)\n\n @extend_schema(\n request=OpenApiTypes.NONE,\n responses={\n 204: OpenApiResponse(description=\"Task retried successfully\"),\n 404: OpenApiResponse(description=\"Task not found\"),\n 500: OpenApiResponse(description=\"Failed to retry task\"),\n },\n parameters=[\n OpenApiParameter(\n \"id\",\n type=OpenApiTypes.STR,\n location=OpenApiParameter.PATH,\n required=True,\n ),\n ],\n )\n @action(detail=True, methods=[\"post\"])\n # pylint: disable=invalid-name\n def retry(self, request: Request, pk=None) -> Response:\n \"\"\"Retry task\"\"\"\n task = TaskInfo.by_name(pk)\n if not task:\n raise Http404\n try:\n task_module = import_module(task.task_call_module)\n task_func = getattr(task_module, task.task_call_func)\n LOGGER.debug(\"Running task\", task=task_func)\n task_func.delay(*task.task_call_args, **task.task_call_kwargs)\n messages.success(\n self.request,\n _(\"Successfully re-scheduled Task %(name)s!\" % {\"name\": task.task_name}),\n )\n return Response(status=204)\n except (ImportError, AttributeError): # pragma: no cover\n LOGGER.warning(\"Failed to run task, remove state\", task=task)\n # if we get an import error, the module path has probably changed\n task.delete()\n return Response(status=500)\n", "path": "authentik/admin/api/tasks.py"}]}
1,873
361
gh_patches_debug_22614
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-269
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> E2502 is mistaken about IamInstanceProfile *cfn-lint version: 0.4.2* *Description of issue.* Linting a template returned: ``` E2502 Property IamInstanceProfile shouldn't be an ARN for Resources/BuildkiteSpotfleet/Properties/SpotFleetRequestConfigData/LaunchSpecifications/0/IamInstanceProfile/Arn/Fn::GetAtt ``` However that property can be an ARN according to https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-spotfleet-spotfleetrequestconfigdata-launchspecifications.html#cfn-ec2-spotfleet-spotfleetrequestconfigdata-launchspecifications-iaminstanceprofile It can be an `{"Arn": "profile_arn"}` structure. </issue> <code> [start of src/cfnlint/rules/resources/iam/InstanceProfile.py] 1 """ 2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 from cfnlint import CloudFormationLintRule 18 from cfnlint import RuleMatch 19 20 21 class InstanceProfile(CloudFormationLintRule): 22 """Check if IamInstanceProfile are used""" 23 id = 'E2502' 24 shortdesc = 'Check if IamInstanceProfile are using the name and not ARN' 25 description = 'See if there are any properties IamInstanceProfile' + \ 26 'are using name and not ARN' 27 source_url = 'https://github.com/awslabs/cfn-python-lint' 28 tags = ['properties'] 29 30 def match(self, cfn): 31 """Check CloudFormation IamInstanceProfile Parameters""" 32 33 matches = list() 34 35 # Build the list of keys 36 trees = cfn.search_deep_keys('Fn::GetAtt') 37 # Filter only resoureces 38 # Disable pylint for Pylint 2 39 # pylint: disable=W0110 40 trees = filter(lambda x: x[0] == 'Resources', trees) 41 for tree in trees: 42 if any(e == 'IamInstanceProfile' for e in tree): 43 obj = tree[-1] 44 objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type') 45 if objtype: 46 if objtype != 'AWS::IAM::InstanceProfile': 47 message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % ( 48 '/'.join(map(str, tree[:-1]))) 49 matches.append(RuleMatch(tree[:-1], message)) 50 else: 51 if obj[1] == 'Arn': 52 message = 'Property IamInstanceProfile shouldn\'t be an ARN for %s' % ( 53 '/'.join(map(str, tree[:-1]))) 54 matches.append(RuleMatch(tree[:-1], message)) 55 56 # Search Refs 57 trees = cfn.search_deep_keys('Ref') 58 # Filter only resoureces 59 trees = filter(lambda x: x[0] == 'Resources', trees) 60 for tree in trees: 61 if any(e == 'IamInstanceProfile' for e in tree): 62 obj = tree[-1] 63 objtype = cfn.template.get('Resources', {}).get(obj, {}).get('Type') 64 if objtype: 65 if objtype != 'AWS::IAM::InstanceProfile': 66 message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % ( 67 '/'.join(map(str, tree[:-1]))) 68 matches.append(RuleMatch(tree[:-1], message)) 69 70 return matches 71 [end of src/cfnlint/rules/resources/iam/InstanceProfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/resources/iam/InstanceProfile.py b/src/cfnlint/rules/resources/iam/InstanceProfile.py --- a/src/cfnlint/rules/resources/iam/InstanceProfile.py +++ b/src/cfnlint/rules/resources/iam/InstanceProfile.py @@ -48,10 +48,16 @@ '/'.join(map(str, tree[:-1]))) matches.append(RuleMatch(tree[:-1], message)) else: - if obj[1] == 'Arn': - message = 'Property IamInstanceProfile shouldn\'t be an ARN for %s' % ( - '/'.join(map(str, tree[:-1]))) - matches.append(RuleMatch(tree[:-1], message)) + if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']: + if obj[1] != 'Arn': + message = 'Property IamInstanceProfile should be an ARN for %s' % ( + '/'.join(map(str, tree[:-1]))) + matches.append(RuleMatch(tree[:-1], message)) + else: + if obj[1] == 'Arn': + message = 'Property IamInstanceProfile shouldn\'t be an ARN for %s' % ( + '/'.join(map(str, tree[:-1]))) + matches.append(RuleMatch(tree[:-1], message)) # Search Refs trees = cfn.search_deep_keys('Ref')
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/iam/InstanceProfile.py b/src/cfnlint/rules/resources/iam/InstanceProfile.py\n--- a/src/cfnlint/rules/resources/iam/InstanceProfile.py\n+++ b/src/cfnlint/rules/resources/iam/InstanceProfile.py\n@@ -48,10 +48,16 @@\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n- if obj[1] == 'Arn':\n- message = 'Property IamInstanceProfile shouldn\\'t be an ARN for %s' % (\n- '/'.join(map(str, tree[:-1])))\n- matches.append(RuleMatch(tree[:-1], message))\n+ if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:\n+ if obj[1] != 'Arn':\n+ message = 'Property IamInstanceProfile should be an ARN for %s' % (\n+ '/'.join(map(str, tree[:-1])))\n+ matches.append(RuleMatch(tree[:-1], message))\n+ else:\n+ if obj[1] == 'Arn':\n+ message = 'Property IamInstanceProfile shouldn\\'t be an ARN for %s' % (\n+ '/'.join(map(str, tree[:-1])))\n+ matches.append(RuleMatch(tree[:-1], message))\n \n # Search Refs\n trees = cfn.search_deep_keys('Ref')\n", "issue": "E2502 is mistaken about IamInstanceProfile\n*cfn-lint version: 0.4.2*\r\n\r\n*Description of issue.*\r\n\r\nLinting a template returned:\r\n```\r\nE2502 Property IamInstanceProfile shouldn't be an ARN for Resources/BuildkiteSpotfleet/Properties/SpotFleetRequestConfigData/LaunchSpecifications/0/IamInstanceProfile/Arn/Fn::GetAtt\r\n```\r\n\r\nHowever that property can be an ARN according to https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-spotfleet-spotfleetrequestconfigdata-launchspecifications.html#cfn-ec2-spotfleet-spotfleetrequestconfigdata-launchspecifications-iaminstanceprofile\r\n\r\nIt can be an `{\"Arn\": \"profile_arn\"}` structure.\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass InstanceProfile(CloudFormationLintRule):\n \"\"\"Check if IamInstanceProfile are used\"\"\"\n id = 'E2502'\n shortdesc = 'Check if IamInstanceProfile are using the name and not ARN'\n description = 'See if there are any properties IamInstanceProfile' + \\\n 'are using name and not ARN'\n source_url = 'https://github.com/awslabs/cfn-python-lint'\n tags = ['properties']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation IamInstanceProfile Parameters\"\"\"\n\n matches = list()\n\n # Build the list of keys\n trees = cfn.search_deep_keys('Fn::GetAtt')\n # Filter only resoureces\n # Disable pylint for Pylint 2\n # pylint: disable=W0110\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')\n if objtype:\n if objtype != 'AWS::IAM::InstanceProfile':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n if obj[1] == 'Arn':\n message = 'Property IamInstanceProfile shouldn\\'t be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n # Search Refs\n trees = cfn.search_deep_keys('Ref')\n # Filter only resoureces\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj, {}).get('Type')\n if objtype:\n if objtype != 'AWS::IAM::InstanceProfile':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n return matches\n", "path": "src/cfnlint/rules/resources/iam/InstanceProfile.py"}]}
1,612
330
gh_patches_debug_57932
rasdani/github-patches
git_diff
scrapy__scrapy-3825
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Issue with Twisted and Python 3.4 Twisted had a patch 3 days ago and it's causing test suite to fail for py34 environment. Twisted , according to their Readme, support Python 3.5+. This needs to be fixed if the builds need to pass </issue> <code> [start of setup.py] 1 from os.path import dirname, join 2 from pkg_resources import parse_version 3 from setuptools import setup, find_packages, __version__ as setuptools_version 4 5 6 with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f: 7 version = f.read().decode('ascii').strip() 8 9 10 def has_environment_marker_platform_impl_support(): 11 """Code extracted from 'pytest/setup.py' 12 https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31 13 14 The first known release to support environment marker with range operators 15 it is 18.5, see: 16 https://setuptools.readthedocs.io/en/latest/history.html#id235 17 """ 18 return parse_version(setuptools_version) >= parse_version('18.5') 19 20 21 extras_require = {} 22 23 if has_environment_marker_platform_impl_support(): 24 extras_require[':platform_python_implementation == "PyPy"'] = [ 25 'PyPyDispatcher>=2.1.0', 26 ] 27 28 29 setup( 30 name='Scrapy', 31 version=version, 32 url='https://scrapy.org', 33 description='A high-level Web Crawling and Web Scraping framework', 34 long_description=open('README.rst').read(), 35 author='Scrapy developers', 36 maintainer='Pablo Hoffman', 37 maintainer_email='[email protected]', 38 license='BSD', 39 packages=find_packages(exclude=('tests', 'tests.*')), 40 include_package_data=True, 41 zip_safe=False, 42 entry_points={ 43 'console_scripts': ['scrapy = scrapy.cmdline:execute'] 44 }, 45 classifiers=[ 46 'Framework :: Scrapy', 47 'Development Status :: 5 - Production/Stable', 48 'Environment :: Console', 49 'Intended Audience :: Developers', 50 'License :: OSI Approved :: BSD License', 51 'Operating System :: OS Independent', 52 'Programming Language :: Python', 53 'Programming Language :: Python :: 2', 54 'Programming Language :: Python :: 2.7', 55 'Programming Language :: Python :: 3', 56 'Programming Language :: Python :: 3.4', 57 'Programming Language :: Python :: 3.5', 58 'Programming Language :: Python :: 3.6', 59 'Programming Language :: Python :: 3.7', 60 'Programming Language :: Python :: Implementation :: CPython', 61 'Programming Language :: Python :: Implementation :: PyPy', 62 'Topic :: Internet :: WWW/HTTP', 63 'Topic :: Software Development :: Libraries :: Application Frameworks', 64 'Topic :: Software Development :: Libraries :: Python Modules', 65 ], 66 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', 67 install_requires=[ 68 'Twisted>=13.1.0', 69 'w3lib>=1.17.0', 70 'queuelib', 71 'lxml', 72 'pyOpenSSL', 73 'cssselect>=0.9', 74 'six>=1.5.2', 75 'parsel>=1.5', 76 'PyDispatcher>=2.0.5', 77 'service_identity', 78 ], 79 extras_require=extras_require, 80 ) 81 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -65,7 +65,8 @@ ], python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', install_requires=[ - 'Twisted>=13.1.0', + 'Twisted>=13.1.0;python_version!="3.4"', + 'Twisted>=13.1.0,<=19.2.0;python_version=="3.4"', 'w3lib>=1.17.0', 'queuelib', 'lxml',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,7 +65,8 @@\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=[\n- 'Twisted>=13.1.0',\n+ 'Twisted>=13.1.0;python_version!=\"3.4\"',\n+ 'Twisted>=13.1.0,<=19.2.0;python_version==\"3.4\"',\n 'w3lib>=1.17.0',\n 'queuelib',\n 'lxml',\n", "issue": "Issue with Twisted and Python 3.4 \nTwisted had a patch 3 days ago and it's causing test suite to fail for py34 environment. \r\nTwisted , according to their Readme, support Python 3.5+. This needs to be fixed if the builds need to pass\n", "before_files": [{"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\nextras_require = {}\n\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n 'PyPyDispatcher>=2.1.0',\n ]\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=[\n 'Twisted>=13.1.0',\n 'w3lib>=1.17.0',\n 'queuelib',\n 'lxml',\n 'pyOpenSSL',\n 'cssselect>=0.9',\n 'six>=1.5.2',\n 'parsel>=1.5',\n 'PyDispatcher>=2.0.5',\n 'service_identity',\n ],\n extras_require=extras_require,\n)\n", "path": "setup.py"}]}
1,426
154
gh_patches_debug_8898
rasdani/github-patches
git_diff
speechbrain__speechbrain-71
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Learning rate printing Now it will only print 0.00. Maybe we should print it with scientific notation. </issue> <code> [start of speechbrain/utils/train_logger.py] 1 import logging 2 from speechbrain.utils.edit_distance import wer_summary 3 4 logger = logging.getLogger(__name__) 5 6 7 class TrainLogger: 8 """Abstract class defining an interface for training loggers.""" 9 10 def log_stats( 11 self, 12 stats_meta, 13 train_stats=None, 14 valid_stats=None, 15 test_stats=None, 16 verbose=False, 17 ): 18 """Log the stats for one epoch. 19 20 Arguments 21 --------- 22 stats_meta : dict of str:scalar pairs 23 Meta information about the stats (e.g. epoch, learning-rate, etc.) 24 train_stats : dict of str:list pairs 25 Each loss type is represented with a str : list pair including 26 all the values for the training pass. 27 valid_stats : dict of str:list pairs 28 Each loss type is represented with a str : list pair including 29 all the values for the validation pass. 30 test_stats : dict of str:list pairs 31 Each loss type is represented with a str : list pair including 32 all the values for the test pass. 33 verbose : bool 34 Whether to also put logging information to the standard logger. 35 """ 36 raise NotImplementedError 37 38 39 class FileTrainLogger(TrainLogger): 40 """Text logger of training information 41 42 Arguments 43 --------- 44 save_file : str 45 The file to use for logging train information. 46 summary_fns : dict of str:function pairs 47 Each summary function should take a list produced as output 48 from a training/validation pass and summarize it to a single scalar. 49 """ 50 51 def __init__(self, save_file, summary_fns): 52 self.save_file = save_file 53 self.summary_fns = summary_fns 54 55 def _item_to_string(self, key, value, dataset=None): 56 """Convert one item to string, handling floats""" 57 if isinstance(value, float): 58 value = f"{value:.2f}" 59 if dataset is not None: 60 key = f"{dataset} {key}" 61 return f"{key}: {value}" 62 63 def _stats_to_string(self, stats, dataset=None): 64 """Convert all stats to a single string summary""" 65 return ", ".join( 66 [self._item_to_string(k, v, dataset) for k, v in stats.items()] 67 ) 68 69 def log_stats( 70 self, 71 stats_meta, 72 train_stats=None, 73 valid_stats=None, 74 test_stats=None, 75 verbose=True, 76 ): 77 """See TrainLogger.log_stats()""" 78 string_summary = self._stats_to_string(stats_meta) 79 for dataset, stats in [ 80 ("train", train_stats), 81 ("valid", valid_stats), 82 ("test", test_stats), 83 ]: 84 if stats is None: 85 continue 86 summary = {} 87 for stat, value_list in stats.items(): 88 summary[stat] = self.summary_fns[stat](value_list) 89 string_summary += " - " + self._stats_to_string(summary, dataset) 90 91 with open(self.save_file, "a") as fout: 92 print(string_summary, file=fout) 93 if verbose: 94 logger.info(string_summary) 95 96 97 class TensorboardLogger(TrainLogger): 98 """Logs training information in the format required by Tensorboard. 99 100 Arguments 101 --------- 102 save_dir : str 103 A directory for storing all the relevant logs 104 105 Raises 106 ------ 107 ImportError if Tensorboard is not installed. 108 """ 109 110 def __init__(self, save_dir): 111 self.save_dir = save_dir 112 113 # Raises ImportError if TensorBoard is not installed 114 from torch.utils.tensorboard import SummaryWriter 115 116 self.writer = SummaryWriter(self.save_dir) 117 self.global_step = {"train": {}, "valid": {}, "meta": 0} 118 119 def log_stats( 120 self, 121 stats_meta, 122 train_stats=None, 123 valid_stats=None, 124 test_stats=None, 125 verbose=False, 126 ): 127 """See TrainLogger.log_stats()""" 128 self.global_step["meta"] += 1 129 for name, value in stats_meta.items(): 130 self.writer.add_scalar(name, value, self.global_step["meta"]) 131 132 for dataset, stats in [ 133 ("train", train_stats), 134 ("valid", valid_stats), 135 ("test", test_stats), 136 ]: 137 if stats is None: 138 continue 139 for stat, value_list in stats.items(): 140 if stat not in self.global_step[dataset]: 141 self.global_step[dataset][stat] = 0 142 tag = f"{stat}/{dataset}" 143 for value in value_list: 144 new_global_step = self.global_step[dataset][stat] + 1 145 self.writer.add_scalar(tag, value, new_global_step) 146 self.global_step[dataset][stat] = new_global_step 147 148 149 def summarize_average(stat_list): 150 return float(sum(stat_list) / len(stat_list)) 151 152 153 def summarize_error_rate(stat_list): 154 summary = wer_summary(stat_list) 155 return summary["WER"] 156 [end of speechbrain/utils/train_logger.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/speechbrain/utils/train_logger.py b/speechbrain/utils/train_logger.py --- a/speechbrain/utils/train_logger.py +++ b/speechbrain/utils/train_logger.py @@ -54,8 +54,10 @@ def _item_to_string(self, key, value, dataset=None): """Convert one item to string, handling floats""" - if isinstance(value, float): + if isinstance(value, float) and 0.01 < value < 100.0: value = f"{value:.2f}" + elif isinstance(value, float): + value = f"{value:.2e}" if dataset is not None: key = f"{dataset} {key}" return f"{key}: {value}"
{"golden_diff": "diff --git a/speechbrain/utils/train_logger.py b/speechbrain/utils/train_logger.py\n--- a/speechbrain/utils/train_logger.py\n+++ b/speechbrain/utils/train_logger.py\n@@ -54,8 +54,10 @@\n \n def _item_to_string(self, key, value, dataset=None):\n \"\"\"Convert one item to string, handling floats\"\"\"\n- if isinstance(value, float):\n+ if isinstance(value, float) and 0.01 < value < 100.0:\n value = f\"{value:.2f}\"\n+ elif isinstance(value, float):\n+ value = f\"{value:.2e}\"\n if dataset is not None:\n key = f\"{dataset} {key}\"\n return f\"{key}: {value}\"\n", "issue": "Learning rate printing\nNow it will only print 0.00. Maybe we should print it with scientific notation.\n", "before_files": [{"content": "import logging\nfrom speechbrain.utils.edit_distance import wer_summary\n\nlogger = logging.getLogger(__name__)\n\n\nclass TrainLogger:\n \"\"\"Abstract class defining an interface for training loggers.\"\"\"\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=False,\n ):\n \"\"\"Log the stats for one epoch.\n\n Arguments\n ---------\n stats_meta : dict of str:scalar pairs\n Meta information about the stats (e.g. epoch, learning-rate, etc.)\n train_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the training pass.\n valid_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the validation pass.\n test_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the test pass.\n verbose : bool\n Whether to also put logging information to the standard logger.\n \"\"\"\n raise NotImplementedError\n\n\nclass FileTrainLogger(TrainLogger):\n \"\"\"Text logger of training information\n\n Arguments\n ---------\n save_file : str\n The file to use for logging train information.\n summary_fns : dict of str:function pairs\n Each summary function should take a list produced as output\n from a training/validation pass and summarize it to a single scalar.\n \"\"\"\n\n def __init__(self, save_file, summary_fns):\n self.save_file = save_file\n self.summary_fns = summary_fns\n\n def _item_to_string(self, key, value, dataset=None):\n \"\"\"Convert one item to string, handling floats\"\"\"\n if isinstance(value, float):\n value = f\"{value:.2f}\"\n if dataset is not None:\n key = f\"{dataset} {key}\"\n return f\"{key}: {value}\"\n\n def _stats_to_string(self, stats, dataset=None):\n \"\"\"Convert all stats to a single string summary\"\"\"\n return \", \".join(\n [self._item_to_string(k, v, dataset) for k, v in stats.items()]\n )\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=True,\n ):\n \"\"\"See TrainLogger.log_stats()\"\"\"\n string_summary = self._stats_to_string(stats_meta)\n for dataset, stats in [\n (\"train\", train_stats),\n (\"valid\", valid_stats),\n (\"test\", test_stats),\n ]:\n if stats is None:\n continue\n summary = {}\n for stat, value_list in stats.items():\n summary[stat] = self.summary_fns[stat](value_list)\n string_summary += \" - \" + self._stats_to_string(summary, dataset)\n\n with open(self.save_file, \"a\") as fout:\n print(string_summary, file=fout)\n if verbose:\n logger.info(string_summary)\n\n\nclass TensorboardLogger(TrainLogger):\n \"\"\"Logs training information in the format required by Tensorboard.\n\n Arguments\n ---------\n save_dir : str\n A directory for storing all the relevant logs\n\n Raises\n ------\n ImportError if Tensorboard is not installed.\n \"\"\"\n\n def __init__(self, save_dir):\n self.save_dir = save_dir\n\n # Raises ImportError if TensorBoard is not installed\n from torch.utils.tensorboard import SummaryWriter\n\n self.writer = SummaryWriter(self.save_dir)\n self.global_step = {\"train\": {}, \"valid\": {}, \"meta\": 0}\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=False,\n ):\n \"\"\"See TrainLogger.log_stats()\"\"\"\n self.global_step[\"meta\"] += 1\n for name, value in stats_meta.items():\n self.writer.add_scalar(name, value, self.global_step[\"meta\"])\n\n for dataset, stats in [\n (\"train\", train_stats),\n (\"valid\", valid_stats),\n (\"test\", test_stats),\n ]:\n if stats is None:\n continue\n for stat, value_list in stats.items():\n if stat not in self.global_step[dataset]:\n self.global_step[dataset][stat] = 0\n tag = f\"{stat}/{dataset}\"\n for value in value_list:\n new_global_step = self.global_step[dataset][stat] + 1\n self.writer.add_scalar(tag, value, new_global_step)\n self.global_step[dataset][stat] = new_global_step\n\n\ndef summarize_average(stat_list):\n return float(sum(stat_list) / len(stat_list))\n\n\ndef summarize_error_rate(stat_list):\n summary = wer_summary(stat_list)\n return summary[\"WER\"]\n", "path": "speechbrain/utils/train_logger.py"}]}
1,965
168
gh_patches_debug_5285
rasdani/github-patches
git_diff
freedomofpress__securedrop-7140
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release SecureDrop 2.8.0 This is a tracking issue for the release of SecureDrop 2.8.0 Tentatively scheduled as follows: **Pre-release announcement:** 2023-03-05 **Release date:** 2024-03-12 **Release manager:** @zenmonkeykstop **Deputy release manager:** @legoktm **Localization manager:** @cfm **Communications manager:** @eloquence _SecureDrop maintainers and testers:_ As you QA 2.8.0, please report back your testing results as comments on this ticket. File GitHub issues for any problems found, tag them "QA: Release". Test debian packages will be posted on https://apt-test.freedom.press signed with [the test key](https://gist.githubusercontent.com/conorsch/ec4008b111bc3142fca522693f3cce7e/raw/2968621e8ad92db4505a31fcc5776422d7d26729/apt-test%2520apt%2520pubkey). # [QA Matrix for 2.8.0](https://docs.google.com/spreadsheets/d/1hcSrgbid03so0tQz3zfwvMaWJ8x7OOZsCfEz1I_PjAE/edit#gid=96348658) # [Test Plan for 2.8.0](https://github.com/freedomofpress/securedrop/wiki/2.8.0-Test-Plan) # [Tails-only test plan for 2.8.0-rc2](https://github.com/freedomofpress/securedrop/issues/7121#issuecomment-1988954749) (complete if you've already tested 2.8.0-rc1, there are no server changes in rc2) # Prepare release candidate (2.8.0~rc1) - [ ] Link to latest version of Tails, including release candidates, to test against during QA - [ ] Tails 5 - [ ] Tails 6 - [x] Prepare 2.8.0~rc1 release changelog - [x] Branch off release/2.8.0 from develop - [x] Prepare 2.8.0 - [ ] Build debs, preserving build log, and put up `2.8.0~rc1` on test apt server - [ ] Commit build log. # Prepare release candidate (2.8.0~rc2) - [ ] Link to latest version of Tails, including release candidates, to test against during QA - [x] Tails 5 - [x] Tails 6 - [x] Prepare 2.8.0~rc2 release changelog - [x] Branch off release/2.8.0 from develop - [x] Prepare 2.8.0-rc2 - [ ] ~Build debs, preserving build log, and put up `2.8.0~rc1` on test apt server~ skipped, as changes are Tails-only. - [ ] ~Commit build log.~ After each test, please update the QA matrix and post details for Basic Server Testing, Application Acceptance Testing and release-specific testing below in comments to this ticket. # Final release - [ ] ~Ensure builder in release branch is updated and/or update builder image~ (no longer in use) - [x] Push signed tag - [x] Pre-Flight: Test updater logic in Tails (apt-qa tracks the `release` branch in the LFS repo) - [x] Build final Debian packages(and preserve build log) - [x] Commit package build log to https://github.com/freedomofpress/build-logs - [x] Pre-Flight: Test that install and upgrade from 2.7.0 to 2.8.0 works w/ prod repo debs (apt-qa.freedom.press polls the `release` branch in the LFS repo for the debs) - [ ] Flip apt QA server to prod status (merge to `main` in the LFS repo) - [ ] Merge Docs branch changes to ``main`` and verify new docs build in securedrop-docs repo - [ ] Prepare release messaging # Post release - [ ] Create GitHub release object - [ ] Once release object is created, update versions in `securedrop-docs` and Wagtail - [ ] Verify new docs show up on https://docs.securedrop.org - [ ] Publish announcements - [ ] Merge changelog back to `develop` - [ ] Update roadmap wiki page: https://github.com/freedomofpress/securedrop/wiki/Development-Roadmap </issue> <code> [start of securedrop/version.py] 1 __version__ = "2.8.0~rc1" 2 [end of securedrop/version.py] [start of securedrop/setup.py] 1 import setuptools 2 3 long_description = "The SecureDrop whistleblower platform." 4 5 setuptools.setup( 6 name="securedrop-app-code", 7 version="2.8.0~rc1", 8 author="Freedom of the Press Foundation", 9 author_email="[email protected]", 10 description="SecureDrop Server", 11 long_description=long_description, 12 long_description_content_type="text/markdown", 13 license="AGPLv3+", 14 python_requires=">=3.8", 15 url="https://github.com/freedomofpress/securedrop", 16 classifiers=[ 17 "Development Status :: 5 - Stable", 18 "Programming Language :: Python :: 3", 19 "Topic :: Software Development :: Libraries :: Python Modules", 20 "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)", 21 "Intended Audience :: Developers", 22 "Operating System :: OS Independent", 23 ], 24 ) 25 [end of securedrop/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/securedrop/setup.py b/securedrop/setup.py --- a/securedrop/setup.py +++ b/securedrop/setup.py @@ -4,7 +4,7 @@ setuptools.setup( name="securedrop-app-code", - version="2.8.0~rc1", + version="2.9.0~rc1", author="Freedom of the Press Foundation", author_email="[email protected]", description="SecureDrop Server", diff --git a/securedrop/version.py b/securedrop/version.py --- a/securedrop/version.py +++ b/securedrop/version.py @@ -1 +1 @@ -__version__ = "2.8.0~rc1" +__version__ = "2.9.0~rc1"
{"golden_diff": "diff --git a/securedrop/setup.py b/securedrop/setup.py\n--- a/securedrop/setup.py\n+++ b/securedrop/setup.py\n@@ -4,7 +4,7 @@\n \n setuptools.setup(\n name=\"securedrop-app-code\",\n- version=\"2.8.0~rc1\",\n+ version=\"2.9.0~rc1\",\n author=\"Freedom of the Press Foundation\",\n author_email=\"[email protected]\",\n description=\"SecureDrop Server\",\ndiff --git a/securedrop/version.py b/securedrop/version.py\n--- a/securedrop/version.py\n+++ b/securedrop/version.py\n@@ -1 +1 @@\n-__version__ = \"2.8.0~rc1\"\n+__version__ = \"2.9.0~rc1\"\n", "issue": "Release SecureDrop 2.8.0\nThis is a tracking issue for the release of SecureDrop 2.8.0\r\n\r\nTentatively scheduled as follows:\r\n\r\n**Pre-release announcement:** 2023-03-05\r\n**Release date:** 2024-03-12\r\n\r\n**Release manager:** @zenmonkeykstop \r\n**Deputy release manager:** @legoktm \r\n**Localization manager:** @cfm\r\n**Communications manager:** @eloquence\r\n\r\n_SecureDrop maintainers and testers:_ As you QA 2.8.0, please report back your testing results as comments on this ticket. File GitHub issues for any problems found, tag them \"QA: Release\".\r\n\r\nTest debian packages will be posted on https://apt-test.freedom.press signed with [the test key](https://gist.githubusercontent.com/conorsch/ec4008b111bc3142fca522693f3cce7e/raw/2968621e8ad92db4505a31fcc5776422d7d26729/apt-test%2520apt%2520pubkey).\r\n\r\n# [QA Matrix for 2.8.0](https://docs.google.com/spreadsheets/d/1hcSrgbid03so0tQz3zfwvMaWJ8x7OOZsCfEz1I_PjAE/edit#gid=96348658)\r\n# [Test Plan for 2.8.0](https://github.com/freedomofpress/securedrop/wiki/2.8.0-Test-Plan)\r\n# [Tails-only test plan for 2.8.0-rc2](https://github.com/freedomofpress/securedrop/issues/7121#issuecomment-1988954749)\r\n(complete if you've already tested 2.8.0-rc1, there are no server changes in rc2)\r\n\r\n# Prepare release candidate (2.8.0~rc1)\r\n- [ ] Link to latest version of Tails, including release candidates, to test against during QA\r\n - [ ] Tails 5 \r\n - [ ] Tails 6 \r\n- [x] Prepare 2.8.0~rc1 release changelog\r\n- [x] Branch off release/2.8.0 from develop\r\n- [x] Prepare 2.8.0\r\n- [ ] Build debs, preserving build log, and put up `2.8.0~rc1` on test apt server\r\n- [ ] Commit build log.\r\n\r\n# Prepare release candidate (2.8.0~rc2)\r\n- [ ] Link to latest version of Tails, including release candidates, to test against during QA\r\n - [x] Tails 5 \r\n - [x] Tails 6 \r\n- [x] Prepare 2.8.0~rc2 release changelog\r\n- [x] Branch off release/2.8.0 from develop\r\n- [x] Prepare 2.8.0-rc2\r\n- [ ] ~Build debs, preserving build log, and put up `2.8.0~rc1` on test apt server~ skipped, as changes are Tails-only.\r\n- [ ] ~Commit build log.~\r\n\r\n\r\nAfter each test, please update the QA matrix and post details for Basic Server Testing, Application Acceptance Testing and release-specific testing below in comments to this ticket.\r\n\r\n# Final release\r\n- [ ] ~Ensure builder in release branch is updated and/or update builder image~ (no longer in use)\r\n- [x] Push signed tag \r\n- [x] Pre-Flight: Test updater logic in Tails (apt-qa tracks the `release` branch in the LFS repo)\r\n- [x] Build final Debian packages(and preserve build log)\r\n- [x] Commit package build log to https://github.com/freedomofpress/build-logs\r\n- [x] Pre-Flight: Test that install and upgrade from 2.7.0 to 2.8.0 works w/ prod repo debs (apt-qa.freedom.press polls the `release` branch in the LFS repo for the debs)\r\n- [ ] Flip apt QA server to prod status (merge to `main` in the LFS repo)\r\n- [ ] Merge Docs branch changes to ``main`` and verify new docs build in securedrop-docs repo\r\n- [ ] Prepare release messaging\r\n\r\n# Post release\r\n- [ ] Create GitHub release object \r\n- [ ] Once release object is created, update versions in `securedrop-docs` and Wagtail\r\n- [ ] Verify new docs show up on https://docs.securedrop.org\r\n- [ ] Publish announcements\r\n- [ ] Merge changelog back to `develop`\r\n- [ ] Update roadmap wiki page: https://github.com/freedomofpress/securedrop/wiki/Development-Roadmap\n", "before_files": [{"content": "__version__ = \"2.8.0~rc1\"\n", "path": "securedrop/version.py"}, {"content": "import setuptools\n\nlong_description = \"The SecureDrop whistleblower platform.\"\n\nsetuptools.setup(\n name=\"securedrop-app-code\",\n version=\"2.8.0~rc1\",\n author=\"Freedom of the Press Foundation\",\n author_email=\"[email protected]\",\n description=\"SecureDrop Server\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"AGPLv3+\",\n python_requires=\">=3.8\",\n url=\"https://github.com/freedomofpress/securedrop\",\n classifiers=[\n \"Development Status :: 5 - Stable\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\",\n \"Intended Audience :: Developers\",\n \"Operating System :: OS Independent\",\n ],\n)\n", "path": "securedrop/setup.py"}]}
1,867
176
gh_patches_debug_29242
rasdani/github-patches
git_diff
larq__larq-34
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> tf.sign(0) = 0 </issue> <code> [start of xquant/quantizers.py] 1 import tensorflow as tf 2 from xquant import utils 3 4 5 @utils.register_keras_custom_object 6 @tf.custom_gradient 7 def ste_sign(x): 8 r""" 9 Sign binarization function. 10 \\[q(x) = \mathrm{Sign}(x)\\] 11 12 The gradient is estimated using the Straight-Through Estimator. 13 \\[\frac{\partial q(x)}{\partial x} = x\\] 14 15 # Arguments 16 x: Input tensor. 17 18 # Returns 19 Binarized tensor. 20 21 # References 22 - [Binarized Neural Networks: Training Deep Neural Networks with Weights and 23 Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830) 24 """ 25 26 def grad(dy): 27 return dy 28 29 return tf.sign(x), grad 30 31 32 @utils.register_keras_custom_object 33 @tf.custom_gradient 34 def approx_sign(x): 35 r""" 36 Sign binarization function. 37 \\[q(x) = \mathrm{Sign}(x)\\] 38 39 The gradient is estimated using the ApproxSign method. 40 \\[\frac{\partial q(x)}{\partial x} = (2 - 2 \left|x\right|))\\] 41 42 # Arguments 43 x: Input tensor. 44 45 # Returns 46 Binarized tensor. 47 48 # References 49 - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved 50 Representational Capability and Advanced 51 Training Algorithm](http://arxiv.org/abs/1808.00278) 52 """ 53 54 def grad(dy): 55 return (1 - tf.abs(x)) * 2 * dy 56 57 return tf.sign(x), grad 58 59 60 def serialize(initializer): 61 return tf.keras.utils.serialize_keras_object(initializer) 62 63 64 def deserialize(name, custom_objects=None): 65 return tf.keras.utils.deserialize_keras_object( 66 name, 67 module_objects=globals(), 68 custom_objects=custom_objects, 69 printable_module_name="quantization function", 70 ) 71 72 73 def get(identifier): 74 if identifier is None: 75 return None 76 if isinstance(identifier, str): 77 return deserialize(str(identifier)) 78 if callable(identifier): 79 return identifier 80 raise ValueError( 81 "Could not interpret quantization function identifier:", identifier 82 ) 83 [end of xquant/quantizers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/xquant/quantizers.py b/xquant/quantizers.py --- a/xquant/quantizers.py +++ b/xquant/quantizers.py @@ -2,12 +2,22 @@ from xquant import utils +def sign(x): + """A sign function that will never be zero""" + return tf.sign(tf.sign(x) + 1e-10) + + @utils.register_keras_custom_object @tf.custom_gradient def ste_sign(x): r""" Sign binarization function. - \\[q(x) = \mathrm{Sign}(x)\\] + \\[ + q(x) = \begin{cases} + -1 & x < 0 \\\ + 1 & x \geq 0 + \end{cases} + \\] The gradient is estimated using the Straight-Through Estimator. \\[\frac{\partial q(x)}{\partial x} = x\\] @@ -26,7 +36,7 @@ def grad(dy): return dy - return tf.sign(x), grad + return sign(x), grad @utils.register_keras_custom_object @@ -34,7 +44,12 @@ def approx_sign(x): r""" Sign binarization function. - \\[q(x) = \mathrm{Sign}(x)\\] + \\[ + q(x) = \begin{cases} + -1 & x < 0 \\\ + 1 & x \geq 0 + \end{cases} + \\] The gradient is estimated using the ApproxSign method. \\[\frac{\partial q(x)}{\partial x} = (2 - 2 \left|x\right|))\\] @@ -54,7 +69,7 @@ def grad(dy): return (1 - tf.abs(x)) * 2 * dy - return tf.sign(x), grad + return sign(x), grad def serialize(initializer):
{"golden_diff": "diff --git a/xquant/quantizers.py b/xquant/quantizers.py\n--- a/xquant/quantizers.py\n+++ b/xquant/quantizers.py\n@@ -2,12 +2,22 @@\n from xquant import utils\n \n \n+def sign(x):\n+ \"\"\"A sign function that will never be zero\"\"\"\n+ return tf.sign(tf.sign(x) + 1e-10)\n+\n+\n @utils.register_keras_custom_object\n @tf.custom_gradient\n def ste_sign(x):\n r\"\"\"\n Sign binarization function.\n- \\\\[q(x) = \\mathrm{Sign}(x)\\\\]\n+ \\\\[\n+ q(x) = \\begin{cases}\n+ -1 & x < 0 \\\\\\\n+ 1 & x \\geq 0\n+ \\end{cases}\n+ \\\\]\n \n The gradient is estimated using the Straight-Through Estimator.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = x\\\\]\n@@ -26,7 +36,7 @@\n def grad(dy):\n return dy\n \n- return tf.sign(x), grad\n+ return sign(x), grad\n \n \n @utils.register_keras_custom_object\n@@ -34,7 +44,12 @@\n def approx_sign(x):\n r\"\"\"\n Sign binarization function.\n- \\\\[q(x) = \\mathrm{Sign}(x)\\\\]\n+ \\\\[\n+ q(x) = \\begin{cases}\n+ -1 & x < 0 \\\\\\\n+ 1 & x \\geq 0\n+ \\end{cases}\n+ \\\\]\n \n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = (2 - 2 \\left|x\\right|))\\\\]\n@@ -54,7 +69,7 @@\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n \n- return tf.sign(x), grad\n+ return sign(x), grad\n \n \n def serialize(initializer):\n", "issue": "tf.sign(0) = 0\n\n", "before_files": [{"content": "import tensorflow as tf\nfrom xquant import utils\n\n\[email protected]_keras_custom_object\[email protected]_gradient\ndef ste_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[q(x) = \\mathrm{Sign}(x)\\\\]\n\n The gradient is estimated using the Straight-Through Estimator.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = x\\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Binarized Neural Networks: Training Deep Neural Networks with Weights and\n Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)\n \"\"\"\n\n def grad(dy):\n return dy\n\n return tf.sign(x), grad\n\n\[email protected]_keras_custom_object\[email protected]_gradient\ndef approx_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[q(x) = \\mathrm{Sign}(x)\\\\]\n\n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = (2 - 2 \\left|x\\right|))\\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced\n Training Algorithm](http://arxiv.org/abs/1808.00278)\n \"\"\"\n\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n\n return tf.sign(x), grad\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(name, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"quantization function\",\n )\n\n\ndef get(identifier):\n if identifier is None:\n return None\n if isinstance(identifier, str):\n return deserialize(str(identifier))\n if callable(identifier):\n return identifier\n raise ValueError(\n \"Could not interpret quantization function identifier:\", identifier\n )\n", "path": "xquant/quantizers.py"}]}
1,207
455
gh_patches_debug_17349
rasdani/github-patches
git_diff
conan-io__conan-center-index-19060
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [request] fast-cdr/1.1.0 ### Package Name/Version fast-cdr/1.1.0 ### Changelog https://github.com/eProsima/Fast-CDR/releases/tag/v1.1.0 ### Context about the new update The Conan Center Bot detects the updatable recipe in #3470. Open branch [qchateau/conan-center-index/ccb-fast-cdr-1.1.0](https://github.com/qchateau/conan-center-index/tree/ccb-fast-cdr-1.1.0) </issue> <code> [start of recipes/fast-cdr/all/conanfile.py] 1 from conan import ConanFile 2 from conan.errors import ConanInvalidConfiguration 3 from conan.tools.build import check_min_cppstd 4 from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout 5 from conan.tools.files import collect_libs, copy, get, rm, rmdir, save 6 from conan.tools.microsoft import is_msvc, is_msvc_static_runtime 7 import os 8 import textwrap 9 10 required_conan_version = ">=1.54.0" 11 12 13 class FastCDRConan(ConanFile): 14 name = "fast-cdr" 15 license = "Apache-2.0" 16 homepage = "https://github.com/eProsima/Fast-CDR" 17 url = "https://github.com/conan-io/conan-center-index" 18 description = "eProsima FastCDR library for serialization" 19 topics = ("dds", "middleware", "serialization") 20 21 package_type = "library" 22 settings = "os", "arch", "compiler", "build_type" 23 options = { 24 "shared": [True, False], 25 "fPIC": [True, False], 26 } 27 default_options = { 28 "shared": False, 29 "fPIC": True, 30 } 31 32 def config_options(self): 33 if self.settings.os == "Windows": 34 del self.options.fPIC 35 36 def configure(self): 37 if self.options.shared: 38 self.options.rm_safe("fPIC") 39 40 def layout(self): 41 cmake_layout(self, src_folder="src") 42 43 def validate(self): 44 if self.settings.compiler.get_safe("cppstd"): 45 check_min_cppstd(self, 11) 46 if self.options.shared and is_msvc(self) and is_msvc_static_runtime(self): 47 # This combination leads to an fast-cdr error when linking 48 # linking dynamic '*.dll' and static MT runtime 49 # see https://github.com/eProsima/Fast-CDR/blob/v1.0.21/include/fastcdr/eProsima_auto_link.h#L37 50 # (2021-05-31) 51 raise ConanInvalidConfiguration("Mixing a dll eprosima library with a static runtime is a bad idea") 52 53 def source(self): 54 get(self, **self.conan_data["sources"][self.version], strip_root=True) 55 56 def generate(self): 57 tc = CMakeToolchain(self) 58 tc.variables["BUILD_STATIC"] = not self.options.shared 59 tc.generate() 60 61 def build(self): 62 cmake = CMake(self) 63 cmake.configure() 64 cmake.build() 65 66 def package(self): 67 copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) 68 cmake = CMake(self) 69 cmake.install() 70 rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) 71 rmdir(self, os.path.join(self.package_folder, "share")) 72 rm(self, "*.pdb", os.path.join(self.package_folder, "lib")) 73 rm(self, "*.pdb", os.path.join(self.package_folder, "bin")) 74 75 # TODO: to remove in conan v2 once cmake_find_package_* generators removed 76 self._create_cmake_module_alias_targets( 77 os.path.join(self.package_folder, self._module_file_rel_path), 78 {"fastcdr": "fastcdr::fastcdr"} 79 ) 80 81 def _create_cmake_module_alias_targets(self, module_file, targets): 82 content = "" 83 for alias, aliased in targets.items(): 84 content += textwrap.dedent(f"""\ 85 if(TARGET {aliased} AND NOT TARGET {alias}) 86 add_library({alias} INTERFACE IMPORTED) 87 set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased}) 88 endif() 89 """) 90 save(self, module_file, content) 91 92 @property 93 def _module_file_rel_path(self): 94 return os.path.join("lib", "cmake", f"conan-official-{self.name}-targets.cmake") 95 96 def package_info(self): 97 self.cpp_info.set_property("cmake_file_name", "fastcdr") 98 self.cpp_info.set_property("cmake_target_name", "fastcdr") 99 self.cpp_info.libs = collect_libs(self) 100 if self.settings.os == "Windows" and self.options.shared: 101 self.cpp_info.defines.append("FASTCDR_DYN_LINK") 102 103 # TODO: to remove in conan v2 once cmake_find_package_* generators removed 104 self.cpp_info.names["cmake_find_package"] = "fastcdr" 105 self.cpp_info.names["cmake_find_package_multi"] = "fastcdr" 106 self.cpp_info.build_modules["cmake_find_package"] = [self._module_file_rel_path] 107 self.cpp_info.build_modules["cmake_find_package_multi"] = [self._module_file_rel_path] 108 [end of recipes/fast-cdr/all/conanfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/recipes/fast-cdr/all/conanfile.py b/recipes/fast-cdr/all/conanfile.py --- a/recipes/fast-cdr/all/conanfile.py +++ b/recipes/fast-cdr/all/conanfile.py @@ -4,6 +4,7 @@ from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout from conan.tools.files import collect_libs, copy, get, rm, rmdir, save from conan.tools.microsoft import is_msvc, is_msvc_static_runtime +from conan.tools.scm import Version import os import textwrap @@ -40,6 +41,10 @@ def layout(self): cmake_layout(self, src_folder="src") + def build_requirements(self): + if Version(self.version) >= "1.1.0": + self.tool_requires("cmake/[>=3.16.3 <4]") + def validate(self): if self.settings.compiler.get_safe("cppstd"): check_min_cppstd(self, 11)
{"golden_diff": "diff --git a/recipes/fast-cdr/all/conanfile.py b/recipes/fast-cdr/all/conanfile.py\n--- a/recipes/fast-cdr/all/conanfile.py\n+++ b/recipes/fast-cdr/all/conanfile.py\n@@ -4,6 +4,7 @@\n from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\n from conan.tools.files import collect_libs, copy, get, rm, rmdir, save\n from conan.tools.microsoft import is_msvc, is_msvc_static_runtime\n+from conan.tools.scm import Version\n import os\n import textwrap\n \n@@ -40,6 +41,10 @@\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n \n+ def build_requirements(self):\n+ if Version(self.version) >= \"1.1.0\":\n+ self.tool_requires(\"cmake/[>=3.16.3 <4]\")\n+\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, 11)\n", "issue": "[request] fast-cdr/1.1.0\n### Package Name/Version\n\nfast-cdr/1.1.0\n\n### Changelog\n\nhttps://github.com/eProsima/Fast-CDR/releases/tag/v1.1.0\n\n### Context about the new update\n\nThe Conan Center Bot detects the updatable recipe in #3470.\r\nOpen branch [qchateau/conan-center-index/ccb-fast-cdr-1.1.0](https://github.com/qchateau/conan-center-index/tree/ccb-fast-cdr-1.1.0)\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\nfrom conan.tools.files import collect_libs, copy, get, rm, rmdir, save\nfrom conan.tools.microsoft import is_msvc, is_msvc_static_runtime\nimport os\nimport textwrap\n\nrequired_conan_version = \">=1.54.0\"\n\n\nclass FastCDRConan(ConanFile):\n name = \"fast-cdr\"\n license = \"Apache-2.0\"\n homepage = \"https://github.com/eProsima/Fast-CDR\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"eProsima FastCDR library for serialization\"\n topics = (\"dds\", \"middleware\", \"serialization\")\n\n package_type = \"library\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, 11)\n if self.options.shared and is_msvc(self) and is_msvc_static_runtime(self):\n # This combination leads to an fast-cdr error when linking\n # linking dynamic '*.dll' and static MT runtime\n # see https://github.com/eProsima/Fast-CDR/blob/v1.0.21/include/fastcdr/eProsima_auto_link.h#L37\n # (2021-05-31)\n raise ConanInvalidConfiguration(\"Mixing a dll eprosima library with a static runtime is a bad idea\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"BUILD_STATIC\"] = not self.options.shared\n tc.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"lib\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"bin\"))\n\n # TODO: to remove in conan v2 once cmake_find_package_* generators removed\n self._create_cmake_module_alias_targets(\n os.path.join(self.package_folder, self._module_file_rel_path),\n {\"fastcdr\": \"fastcdr::fastcdr\"}\n )\n\n def _create_cmake_module_alias_targets(self, module_file, targets):\n content = \"\"\n for alias, aliased in targets.items():\n content += textwrap.dedent(f\"\"\"\\\n if(TARGET {aliased} AND NOT TARGET {alias})\n add_library({alias} INTERFACE IMPORTED)\n set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})\n endif()\n \"\"\")\n save(self, module_file, content)\n\n @property\n def _module_file_rel_path(self):\n return os.path.join(\"lib\", \"cmake\", f\"conan-official-{self.name}-targets.cmake\")\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"fastcdr\")\n self.cpp_info.set_property(\"cmake_target_name\", \"fastcdr\")\n self.cpp_info.libs = collect_libs(self)\n if self.settings.os == \"Windows\" and self.options.shared:\n self.cpp_info.defines.append(\"FASTCDR_DYN_LINK\")\n\n # TODO: to remove in conan v2 once cmake_find_package_* generators removed\n self.cpp_info.names[\"cmake_find_package\"] = \"fastcdr\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"fastcdr\"\n self.cpp_info.build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n self.cpp_info.build_modules[\"cmake_find_package_multi\"] = [self._module_file_rel_path]\n", "path": "recipes/fast-cdr/all/conanfile.py"}]}
1,913
233
gh_patches_debug_1457
rasdani/github-patches
git_diff
liqd__a4-meinberlin-539
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> use list markup for lists of elements Part of the BITV-Test: "1.3.1b HTML-Strukturelemente für Listen" ---- - [x] list of blueprints - [x] list of projects unsure: - [ ] list of questions in poll contents - [ ] list of choices in poll contents </issue> <code> [start of apps/contrib/templatetags/contrib_tags.py] 1 from django import template 2 from django.template.loader import render_to_string 3 4 register = template.Library() 5 6 7 @register.assignment_tag 8 def include_template_string(template, **kwargs): 9 rendered_template = render_to_string(template, kwargs) 10 return str(rendered_template) 11 12 13 @register.assignment_tag 14 def combined_url_parameter(request_query_dict, **kwargs): 15 combined_query_dict = request_query_dict.copy() 16 for key in kwargs: 17 combined_query_dict.setlist(key, [kwargs[key]]) 18 encoded_parameter = '?' + combined_query_dict.urlencode() 19 return encoded_parameter 20 21 22 @register.assignment_tag 23 def filter_has_perm(perm, user, objects): 24 """Filter a list of objects based on user permissions.""" 25 if not hasattr(user, 'has_perm'): 26 # If the swapped user model does not support permissions, all objects 27 # will be returned. This is taken from rules.templatetags.has_perm. 28 return objects 29 else: 30 return (obj for obj in objects if user.has_perm(perm, obj)) 31 [end of apps/contrib/templatetags/contrib_tags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/contrib/templatetags/contrib_tags.py b/apps/contrib/templatetags/contrib_tags.py --- a/apps/contrib/templatetags/contrib_tags.py +++ b/apps/contrib/templatetags/contrib_tags.py @@ -27,4 +27,4 @@ # will be returned. This is taken from rules.templatetags.has_perm. return objects else: - return (obj for obj in objects if user.has_perm(perm, obj)) + return [obj for obj in objects if user.has_perm(perm, obj)]
{"golden_diff": "diff --git a/apps/contrib/templatetags/contrib_tags.py b/apps/contrib/templatetags/contrib_tags.py\n--- a/apps/contrib/templatetags/contrib_tags.py\n+++ b/apps/contrib/templatetags/contrib_tags.py\n@@ -27,4 +27,4 @@\n # will be returned. This is taken from rules.templatetags.has_perm.\n return objects\n else:\n- return (obj for obj in objects if user.has_perm(perm, obj))\n+ return [obj for obj in objects if user.has_perm(perm, obj)]\n", "issue": "use list markup for lists of elements\nPart of the BITV-Test: \"1.3.1b HTML-Strukturelemente f\u00fcr Listen\"\r\n----\r\n- [x] list of blueprints\r\n- [x] list of projects\r\n\r\nunsure:\r\n- [ ] list of questions in poll contents\r\n- [ ] list of choices in poll contents \n", "before_files": [{"content": "from django import template\nfrom django.template.loader import render_to_string\n\nregister = template.Library()\n\n\[email protected]_tag\ndef include_template_string(template, **kwargs):\n rendered_template = render_to_string(template, kwargs)\n return str(rendered_template)\n\n\[email protected]_tag\ndef combined_url_parameter(request_query_dict, **kwargs):\n combined_query_dict = request_query_dict.copy()\n for key in kwargs:\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n\n\[email protected]_tag\ndef filter_has_perm(perm, user, objects):\n \"\"\"Filter a list of objects based on user permissions.\"\"\"\n if not hasattr(user, 'has_perm'):\n # If the swapped user model does not support permissions, all objects\n # will be returned. This is taken from rules.templatetags.has_perm.\n return objects\n else:\n return (obj for obj in objects if user.has_perm(perm, obj))\n", "path": "apps/contrib/templatetags/contrib_tags.py"}]}
893
135
gh_patches_debug_17267
rasdani/github-patches
git_diff
pulp__pulpcore-239
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix bug where Last-Modified header was being updated on duplicate package uploads Fixes a bug where the Last-Modified header of a package stored in django-storages was being updated on duplicate uploads. Closes #5149 </issue> <code> [start of setup.py] 1 from setuptools import find_packages, setup 2 3 with open('README.md') as f: 4 long_description = f.read() 5 6 requirements = [ 7 'coreapi', 8 'Django~=2.2', # LTS version, switch only if we have a compelling reason to 9 'django-filter', 10 'djangorestframework', 11 'djangorestframework-queryfields', 12 'drf-nested-routers', 13 'drf-yasg', 14 'gunicorn', 15 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412 16 'PyYAML', 17 'rq~=1.0', 18 'redis<3.2.0', 19 'setuptools', 20 'dynaconf~=2.0', 21 'whitenoise', 22 ] 23 24 setup( 25 name='pulpcore', 26 version='3.0.0rc5.dev', 27 description='Pulp Django Application and Related Modules', 28 long_description=long_description, 29 long_description_content_type="text/markdown", 30 license='GPLv2+', 31 packages=find_packages(exclude=['test']), 32 author='Pulp Team', 33 author_email='[email protected]', 34 url='http://www.pulpproject.org', 35 python_requires='>=3.6', 36 install_requires=requirements, 37 extras_require={ 38 'postgres': ['psycopg2-binary'], 39 'mysql': ['mysqlclient'] 40 }, 41 include_package_data=True, 42 classifiers=( 43 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 44 'Operating System :: POSIX :: Linux', 45 'Development Status :: 4 - Beta', 46 'Framework :: Django', 47 'Programming Language :: Python', 48 'Programming Language :: Python :: 3', 49 'Programming Language :: Python :: 3.6', 50 'Programming Language :: Python :: 3.7', 51 ), 52 scripts=['bin/pulp-content'], 53 ) 54 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -4,21 +4,21 @@ long_description = f.read() requirements = [ - 'coreapi', - 'Django~=2.2', # LTS version, switch only if we have a compelling reason to - 'django-filter', - 'djangorestframework', - 'djangorestframework-queryfields', - 'drf-nested-routers', - 'drf-yasg', - 'gunicorn', + 'coreapi~=2.3.3', + 'Django~=2.2.3', # LTS version, switch only if we have a compelling reason to + 'django-filter~=2.2.0', + 'djangorestframework~=3.10.2', + 'djangorestframework-queryfields~=1.0.0', + 'drf-nested-routers~=0.91.0', + 'drf-yasg~=1.16.1', + 'gunicorn~=19.9.0', 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412 - 'PyYAML', - 'rq~=1.0', - 'redis<3.2.0', - 'setuptools', - 'dynaconf~=2.0', - 'whitenoise', + 'PyYAML~=5.1.1', + 'rq~=1.1.0', + 'redis~=3.1.0', + 'setuptools~=41.0.1', + 'dynaconf~=2.0.3', + 'whitenoise~=4.1.3', ] setup(
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,21 +4,21 @@\n long_description = f.read()\n \n requirements = [\n- 'coreapi',\n- 'Django~=2.2', # LTS version, switch only if we have a compelling reason to\n- 'django-filter',\n- 'djangorestframework',\n- 'djangorestframework-queryfields',\n- 'drf-nested-routers',\n- 'drf-yasg',\n- 'gunicorn',\n+ 'coreapi~=2.3.3',\n+ 'Django~=2.2.3', # LTS version, switch only if we have a compelling reason to\n+ 'django-filter~=2.2.0',\n+ 'djangorestframework~=3.10.2',\n+ 'djangorestframework-queryfields~=1.0.0',\n+ 'drf-nested-routers~=0.91.0',\n+ 'drf-yasg~=1.16.1',\n+ 'gunicorn~=19.9.0',\n 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412\n- 'PyYAML',\n- 'rq~=1.0',\n- 'redis<3.2.0',\n- 'setuptools',\n- 'dynaconf~=2.0',\n- 'whitenoise',\n+ 'PyYAML~=5.1.1',\n+ 'rq~=1.1.0',\n+ 'redis~=3.1.0',\n+ 'setuptools~=41.0.1',\n+ 'dynaconf~=2.0.3',\n+ 'whitenoise~=4.1.3',\n ]\n \n setup(\n", "issue": "Fix bug where Last-Modified header was being updated on duplicate package uploads\nFixes a bug where the Last-Modified header of a package stored in django-storages was being updated on duplicate uploads.\r\n\r\nCloses #5149\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nwith open('README.md') as f:\n long_description = f.read()\n\nrequirements = [\n 'coreapi',\n 'Django~=2.2', # LTS version, switch only if we have a compelling reason to\n 'django-filter',\n 'djangorestframework',\n 'djangorestframework-queryfields',\n 'drf-nested-routers',\n 'drf-yasg',\n 'gunicorn',\n 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412\n 'PyYAML',\n 'rq~=1.0',\n 'redis<3.2.0',\n 'setuptools',\n 'dynaconf~=2.0',\n 'whitenoise',\n]\n\nsetup(\n name='pulpcore',\n version='3.0.0rc5.dev',\n description='Pulp Django Application and Related Modules',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license='GPLv2+',\n packages=find_packages(exclude=['test']),\n author='Pulp Team',\n author_email='[email protected]',\n url='http://www.pulpproject.org',\n python_requires='>=3.6',\n install_requires=requirements,\n extras_require={\n 'postgres': ['psycopg2-binary'],\n 'mysql': ['mysqlclient']\n },\n include_package_data=True,\n classifiers=(\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Operating System :: POSIX :: Linux',\n 'Development Status :: 4 - Beta',\n 'Framework :: Django',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ),\n scripts=['bin/pulp-content'],\n)\n", "path": "setup.py"}]}
1,114
426
gh_patches_debug_25641
rasdani/github-patches
git_diff
sublimelsp__LSP-1573
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> "Rename…" code action is broken (v1.1.6) The _Rename_ code action doesn’t work correctly. I wasn’t able to use it in VSCode so I can’t tell whether it comes from the language server of the Sublime extension. Here is a minimal example: ```elm module Test exposing (..) import Html exposing (Html) view : Html msg view = Html.text body body : String body = "..." ``` When renaming `body` for instance: * if the new name has the same length, it seems to work fine * if the new name is longer (`bodyxyz` in the example below), the last few characters are duplicated: ```elm view : Html msg view = Html.text bodyxyzxyz bodyxyzxyz : String bodyxyzxyz = "..." ``` * if the new name is shorter (`a` in this example), well… ```elm view : Html msg view = Html.text aaString a "..." ``` </issue> <code> [start of plugin/core/edit.py] 1 from .logging import debug 2 from .open import open_file 3 from .promise import Promise 4 from .typing import List, Dict, Any, Iterable, Optional, Tuple 5 from .url import uri_to_filename 6 from functools import partial 7 import operator 8 import sublime 9 10 11 # tuple of start, end, newText, version 12 TextEdit = Tuple[Tuple[int, int], Tuple[int, int], str, Optional[int]] 13 14 15 def parse_workspace_edit(workspace_edit: Dict[str, Any]) -> Dict[str, List[TextEdit]]: 16 changes = {} # type: Dict[str, List[TextEdit]] 17 raw_changes = workspace_edit.get('changes') 18 if isinstance(raw_changes, dict): 19 for uri, file_changes in raw_changes.items(): 20 changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes) 21 document_changes = workspace_edit.get('documentChanges') 22 if isinstance(document_changes, list): 23 for document_change in document_changes: 24 if 'kind' in document_change: 25 debug('Ignoring unsupported "resourceOperations" edit type') 26 continue 27 uri = document_change.get('textDocument').get('uri') 28 version = document_change.get('textDocument').get('version') 29 text_edit = list(parse_text_edit(change, version) for change in document_change.get('edits')) 30 changes.setdefault(uri_to_filename(uri), []).extend(text_edit) 31 return changes 32 33 34 def parse_range(range: Dict[str, int]) -> Tuple[int, int]: 35 return range['line'], range['character'] 36 37 38 def parse_text_edit(text_edit: Dict[str, Any], version: int = None) -> TextEdit: 39 return ( 40 parse_range(text_edit['range']['start']), 41 parse_range(text_edit['range']['end']), 42 # Strip away carriage returns -- SublimeText takes care of that. 43 text_edit.get('newText', '').replace("\r", ""), 44 version 45 ) 46 47 48 def sort_by_application_order(changes: Iterable[TextEdit]) -> List[TextEdit]: 49 # The spec reads: 50 # > However, it is possible that multiple edits have the same start position: multiple 51 # > inserts, or any number of inserts followed by a single remove or replace edit. If 52 # > multiple inserts have the same position, the order in the array defines the order in 53 # > which the inserted strings appear in the resulting text. 54 # So we sort by start position. But if multiple text edits start at the same position, 55 # we use the index in the array as the key. 56 57 return list(sorted(changes, key=operator.itemgetter(0))) 58 59 60 def apply_workspace_edit(window: sublime.Window, changes: Dict[str, List[TextEdit]]) -> Promise: 61 """Apply workspace edits. This function must be called from the main thread!""" 62 return Promise.all([open_file(window, fn).then(partial(_apply_edits, edits)) for fn, edits in changes.items()]) 63 64 65 def _apply_edits(edits: List[TextEdit], view: Optional[sublime.View]) -> None: 66 if view and view.is_valid(): 67 # Text commands run blocking. After this call has returned the changes are applied. 68 view.run_command("lsp_apply_document_edit", {"changes": edits}) 69 [end of plugin/core/edit.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugin/core/edit.py b/plugin/core/edit.py --- a/plugin/core/edit.py +++ b/plugin/core/edit.py @@ -14,10 +14,6 @@ def parse_workspace_edit(workspace_edit: Dict[str, Any]) -> Dict[str, List[TextEdit]]: changes = {} # type: Dict[str, List[TextEdit]] - raw_changes = workspace_edit.get('changes') - if isinstance(raw_changes, dict): - for uri, file_changes in raw_changes.items(): - changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes) document_changes = workspace_edit.get('documentChanges') if isinstance(document_changes, list): for document_change in document_changes: @@ -28,6 +24,11 @@ version = document_change.get('textDocument').get('version') text_edit = list(parse_text_edit(change, version) for change in document_change.get('edits')) changes.setdefault(uri_to_filename(uri), []).extend(text_edit) + else: + raw_changes = workspace_edit.get('changes') + if isinstance(raw_changes, dict): + for uri, file_changes in raw_changes.items(): + changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes) return changes
{"golden_diff": "diff --git a/plugin/core/edit.py b/plugin/core/edit.py\n--- a/plugin/core/edit.py\n+++ b/plugin/core/edit.py\n@@ -14,10 +14,6 @@\n \n def parse_workspace_edit(workspace_edit: Dict[str, Any]) -> Dict[str, List[TextEdit]]:\n changes = {} # type: Dict[str, List[TextEdit]]\n- raw_changes = workspace_edit.get('changes')\n- if isinstance(raw_changes, dict):\n- for uri, file_changes in raw_changes.items():\n- changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)\n document_changes = workspace_edit.get('documentChanges')\n if isinstance(document_changes, list):\n for document_change in document_changes:\n@@ -28,6 +24,11 @@\n version = document_change.get('textDocument').get('version')\n text_edit = list(parse_text_edit(change, version) for change in document_change.get('edits'))\n changes.setdefault(uri_to_filename(uri), []).extend(text_edit)\n+ else:\n+ raw_changes = workspace_edit.get('changes')\n+ if isinstance(raw_changes, dict):\n+ for uri, file_changes in raw_changes.items():\n+ changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)\n return changes\n", "issue": "\"Rename\u2026\" code action is broken\n(v1.1.6) The _Rename_ code action doesn\u2019t work correctly. I wasn\u2019t able to use it in VSCode so I can\u2019t tell whether it comes from the language server of the Sublime extension.\r\n\r\nHere is a minimal example:\r\n\r\n```elm\r\nmodule Test exposing (..)\r\n\r\nimport Html exposing (Html)\r\n\r\n\r\nview : Html msg\r\nview =\r\n Html.text body\r\n\r\n\r\nbody : String\r\nbody =\r\n \"...\"\r\n```\r\n\r\nWhen renaming `body` for instance:\r\n\r\n* if the new name has the same length, it seems to work fine\r\n* if the new name is longer (`bodyxyz` in the example below), the last few characters are duplicated:\r\n```elm\r\n\r\nview : Html msg\r\nview =\r\n Html.text bodyxyzxyz\r\n\r\n\r\nbodyxyzxyz : String\r\nbodyxyzxyz =\r\n \"...\"\r\n```\r\n* if the new name is shorter (`a` in this example), well\u2026\r\n```elm\r\nview : Html msg\r\nview =\r\n Html.text aaString\r\na \"...\"\r\n```\n", "before_files": [{"content": "from .logging import debug\nfrom .open import open_file\nfrom .promise import Promise\nfrom .typing import List, Dict, Any, Iterable, Optional, Tuple\nfrom .url import uri_to_filename\nfrom functools import partial\nimport operator\nimport sublime\n\n\n# tuple of start, end, newText, version\nTextEdit = Tuple[Tuple[int, int], Tuple[int, int], str, Optional[int]]\n\n\ndef parse_workspace_edit(workspace_edit: Dict[str, Any]) -> Dict[str, List[TextEdit]]:\n changes = {} # type: Dict[str, List[TextEdit]]\n raw_changes = workspace_edit.get('changes')\n if isinstance(raw_changes, dict):\n for uri, file_changes in raw_changes.items():\n changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)\n document_changes = workspace_edit.get('documentChanges')\n if isinstance(document_changes, list):\n for document_change in document_changes:\n if 'kind' in document_change:\n debug('Ignoring unsupported \"resourceOperations\" edit type')\n continue\n uri = document_change.get('textDocument').get('uri')\n version = document_change.get('textDocument').get('version')\n text_edit = list(parse_text_edit(change, version) for change in document_change.get('edits'))\n changes.setdefault(uri_to_filename(uri), []).extend(text_edit)\n return changes\n\n\ndef parse_range(range: Dict[str, int]) -> Tuple[int, int]:\n return range['line'], range['character']\n\n\ndef parse_text_edit(text_edit: Dict[str, Any], version: int = None) -> TextEdit:\n return (\n parse_range(text_edit['range']['start']),\n parse_range(text_edit['range']['end']),\n # Strip away carriage returns -- SublimeText takes care of that.\n text_edit.get('newText', '').replace(\"\\r\", \"\"),\n version\n )\n\n\ndef sort_by_application_order(changes: Iterable[TextEdit]) -> List[TextEdit]:\n # The spec reads:\n # > However, it is possible that multiple edits have the same start position: multiple\n # > inserts, or any number of inserts followed by a single remove or replace edit. If\n # > multiple inserts have the same position, the order in the array defines the order in\n # > which the inserted strings appear in the resulting text.\n # So we sort by start position. But if multiple text edits start at the same position,\n # we use the index in the array as the key.\n\n return list(sorted(changes, key=operator.itemgetter(0)))\n\n\ndef apply_workspace_edit(window: sublime.Window, changes: Dict[str, List[TextEdit]]) -> Promise:\n \"\"\"Apply workspace edits. This function must be called from the main thread!\"\"\"\n return Promise.all([open_file(window, fn).then(partial(_apply_edits, edits)) for fn, edits in changes.items()])\n\n\ndef _apply_edits(edits: List[TextEdit], view: Optional[sublime.View]) -> None:\n if view and view.is_valid():\n # Text commands run blocking. After this call has returned the changes are applied.\n view.run_command(\"lsp_apply_document_edit\", {\"changes\": edits})\n", "path": "plugin/core/edit.py"}]}
1,559
283
gh_patches_debug_14435
rasdani/github-patches
git_diff
fossasia__open-event-server-5247
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Inconsistent data on Admin Statistics end points **I'm submitting a ...** (check one with "x") - [x] bug report - [ ] feature request - [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-server **Current behavior:** <!-- Describe how the bug manifests. --> The admin statistics end points return various stats about events, users etc. Currently they are completely out of sync with the database. For instance, the `admin/statisitics/events` returns ![image](https://user-images.githubusercontent.com/17252805/39888957-1a5daf72-54b4-11e8-934c-1d77a9ab66b7.png) Where as the actual event count when generically querying for events is 92: ![image](https://user-images.githubusercontent.com/17252805/39889011-44af3d9a-54b4-11e8-8fb8-c45b0cef175d.png) **Expected behavior:** <!-- Describe what the behavior would be without the bug. --> The counts should be consistent. </issue> <code> [start of app/api/schema/admin_statistics_schema/events.py] 1 from marshmallow_jsonapi.flask import Schema 2 from marshmallow_jsonapi import fields 3 from app.models.event import Event 4 from app.api.helpers.db import get_count 5 from app.api.helpers.utilities import dasherize 6 from datetime import datetime 7 import pytz 8 9 10 class AdminStatisticsEventSchema(Schema): 11 """ 12 Api schema 13 """ 14 class Meta: 15 """ 16 Meta class 17 """ 18 type_ = 'admin-statistics-event' 19 self_view = 'v1.admin_statistics_event_detail' 20 inflect = dasherize 21 22 id = fields.String() 23 draft = fields.Method("events_draft_count") 24 published = fields.Method("events_published_count") 25 past = fields.Method("events_past_count") 26 27 def events_draft_count(self, obj): 28 return get_count(Event.query.filter_by(state='draft')) 29 30 def events_published_count(self, obj): 31 return get_count(Event.query.filter_by(state='published')) 32 33 def events_past_count(self, obj): 34 return get_count(Event.query.filter(Event.ends_at < datetime.now(pytz.utc))) 35 [end of app/api/schema/admin_statistics_schema/events.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/api/schema/admin_statistics_schema/events.py b/app/api/schema/admin_statistics_schema/events.py --- a/app/api/schema/admin_statistics_schema/events.py +++ b/app/api/schema/admin_statistics_schema/events.py @@ -25,10 +25,12 @@ past = fields.Method("events_past_count") def events_draft_count(self, obj): - return get_count(Event.query.filter_by(state='draft')) + events = Event.query.filter(Event.ends_at > datetime.now(pytz.utc)) + return get_count(events.filter_by(state='draft')) def events_published_count(self, obj): - return get_count(Event.query.filter_by(state='published')) + events = Event.query.filter(Event.ends_at > datetime.now(pytz.utc)) + return get_count(events.filter_by(state='published')) def events_past_count(self, obj): return get_count(Event.query.filter(Event.ends_at < datetime.now(pytz.utc)))
{"golden_diff": "diff --git a/app/api/schema/admin_statistics_schema/events.py b/app/api/schema/admin_statistics_schema/events.py\n--- a/app/api/schema/admin_statistics_schema/events.py\n+++ b/app/api/schema/admin_statistics_schema/events.py\n@@ -25,10 +25,12 @@\n past = fields.Method(\"events_past_count\")\n \n def events_draft_count(self, obj):\n- return get_count(Event.query.filter_by(state='draft'))\n+ events = Event.query.filter(Event.ends_at > datetime.now(pytz.utc))\n+ return get_count(events.filter_by(state='draft'))\n \n def events_published_count(self, obj):\n- return get_count(Event.query.filter_by(state='published'))\n+ events = Event.query.filter(Event.ends_at > datetime.now(pytz.utc))\n+ return get_count(events.filter_by(state='published'))\n \n def events_past_count(self, obj):\n return get_count(Event.query.filter(Event.ends_at < datetime.now(pytz.utc)))\n", "issue": "Inconsistent data on Admin Statistics end points\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-server\r\n\r\n**Current behavior:**\r\n<!-- Describe how the bug manifests. -->\r\nThe admin statistics end points return various stats about events, users etc.\r\nCurrently they are completely out of sync with the database.\r\nFor instance, the `admin/statisitics/events` returns \r\n![image](https://user-images.githubusercontent.com/17252805/39888957-1a5daf72-54b4-11e8-934c-1d77a9ab66b7.png)\r\n\r\nWhere as the actual event count when generically querying for events is 92:\r\n![image](https://user-images.githubusercontent.com/17252805/39889011-44af3d9a-54b4-11e8-8fb8-c45b0cef175d.png)\r\n\r\n**Expected behavior:**\r\n<!-- Describe what the behavior would be without the bug. -->\r\nThe counts should be consistent.\n", "before_files": [{"content": "from marshmallow_jsonapi.flask import Schema\nfrom marshmallow_jsonapi import fields\nfrom app.models.event import Event\nfrom app.api.helpers.db import get_count\nfrom app.api.helpers.utilities import dasherize\nfrom datetime import datetime\nimport pytz\n\n\nclass AdminStatisticsEventSchema(Schema):\n \"\"\"\n Api schema\n \"\"\"\n class Meta:\n \"\"\"\n Meta class\n \"\"\"\n type_ = 'admin-statistics-event'\n self_view = 'v1.admin_statistics_event_detail'\n inflect = dasherize\n\n id = fields.String()\n draft = fields.Method(\"events_draft_count\")\n published = fields.Method(\"events_published_count\")\n past = fields.Method(\"events_past_count\")\n\n def events_draft_count(self, obj):\n return get_count(Event.query.filter_by(state='draft'))\n\n def events_published_count(self, obj):\n return get_count(Event.query.filter_by(state='published'))\n\n def events_past_count(self, obj):\n return get_count(Event.query.filter(Event.ends_at < datetime.now(pytz.utc)))\n", "path": "app/api/schema/admin_statistics_schema/events.py"}]}
1,113
203
gh_patches_debug_49498
rasdani/github-patches
git_diff
pex-tool__pex-1516
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.55 On the docket: + [x] Add official support for Python 3.10 (#1512) + [x] Always register global options. (#1511) + [x] Fix RTD generation by pinning docutils low. (#1509) </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.54" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.54" +__version__ = "2.1.55"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.54\"\n+__version__ = \"2.1.55\"\n", "issue": "Release 2.1.55\nOn the docket:\r\n+ [x] Add official support for Python 3.10 (#1512)\r\n+ [x] Always register global options. (#1511)\r\n+ [x] Fix RTD generation by pinning docutils low. (#1509)\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.54\"\n", "path": "pex/version.py"}]}
653
97
gh_patches_debug_16916
rasdani/github-patches
git_diff
pyjanitor-devs__pyjanitor-452
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [DOC] Clarify original-dataframe mutation behavior in pyjanitor function docstrings # Brief Description of Fix Currently, some pyjanitor functions mutate the original dataframe and others return a copy. Solutions are heavily discussed in #79 but no conclusion has been reached. At the moment, it is unclear, without experimentation from the user, which behavior applies in each function. In the interim, I propose to explicitly clarify this behavior in each function's docstring so the user has a clear idea regarding the function's mutating behavior. Below is a sample of what this could look like for `.clean_names()`: """ Clean column names. Takes all column names, converts them to lowercase, then replaces all spaces with underscores. <b>Does not mutate original dataframe.</b> """ Happy to add this line somewhere else in the docstring if inappropriate here. - [Link to documentation page](https://pyjanitor.readthedocs.io/reference/index.html) - [Link to exact file to be edited](https://github.com/ericmjl/pyjanitor/blob/dev/janitor/functions.py) </issue> <code> [start of janitor/biology.py] 1 """ 2 Biology and bioinformatics-oriented data cleaning functions. 3 """ 4 5 import pandas as pd 6 import pandas_flavor as pf 7 8 from .utils import deprecated_alias, import_message 9 10 try: 11 from Bio import SeqIO 12 except ImportError: 13 import_message( 14 "biology", "biopython", "conda install -c conda-forge biopython" 15 ) 16 17 18 @pf.register_dataframe_method 19 @deprecated_alias(col_name="column_name") 20 def join_fasta( 21 df: pd.DataFrame, filename: str, id_col: str, column_name 22 ) -> pd.DataFrame: 23 """ 24 Convenience method to join in a FASTA file as a column. 25 26 This allows us to add the string sequence of a FASTA file as a new column 27 of data in the dataframe. 28 29 This function only attaches the string representation of the SeqRecord.Seq 30 object from Biopython. Does not attach the full SeqRecord. Alphabet is 31 also not stored, under the assumption that the data scientist has domain 32 knowledge of what kind of sequence is being read in (nucleotide vs. amino 33 acid.) 34 35 For more advanced functions, please use phylopandas. 36 37 :param df: A pandas DataFrame. 38 :param filename: Path to the FASTA file. 39 :param id_col: The column in the DataFrame that houses sequence IDs. 40 :param column_name: The name of the new column. 41 """ 42 seqrecords = { 43 x.id: x.seq.__str__() for x in SeqIO.parse(filename, "fasta") 44 } 45 seq_col = [seqrecords[i] for i in df[id_col]] 46 df[column_name] = seq_col 47 return df 48 [end of janitor/biology.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/janitor/biology.py b/janitor/biology.py --- a/janitor/biology.py +++ b/janitor/biology.py @@ -26,12 +26,14 @@ This allows us to add the string sequence of a FASTA file as a new column of data in the dataframe. - This function only attaches the string representation of the SeqRecord.Seq + This method only attaches the string representation of the SeqRecord.Seq object from Biopython. Does not attach the full SeqRecord. Alphabet is also not stored, under the assumption that the data scientist has domain knowledge of what kind of sequence is being read in (nucleotide vs. amino acid.) + This method mutates the original DataFrame. + For more advanced functions, please use phylopandas. :param df: A pandas DataFrame.
{"golden_diff": "diff --git a/janitor/biology.py b/janitor/biology.py\n--- a/janitor/biology.py\n+++ b/janitor/biology.py\n@@ -26,12 +26,14 @@\n This allows us to add the string sequence of a FASTA file as a new column\n of data in the dataframe.\n \n- This function only attaches the string representation of the SeqRecord.Seq\n+ This method only attaches the string representation of the SeqRecord.Seq\n object from Biopython. Does not attach the full SeqRecord. Alphabet is\n also not stored, under the assumption that the data scientist has domain\n knowledge of what kind of sequence is being read in (nucleotide vs. amino\n acid.)\n \n+ This method mutates the original DataFrame.\n+\n For more advanced functions, please use phylopandas.\n \n :param df: A pandas DataFrame.\n", "issue": "[DOC] Clarify original-dataframe mutation behavior in pyjanitor function docstrings\n# Brief Description of Fix\r\nCurrently, some pyjanitor functions mutate the original dataframe and others return a copy. Solutions are heavily discussed in #79 but no conclusion has been reached. At the moment, it is unclear, without experimentation from the user, which behavior applies in each function. \r\n\r\nIn the interim, I propose to explicitly clarify this behavior in each function's docstring so the user has a clear idea regarding the function's mutating behavior. Below is a sample of what this could look like for `.clean_names()`:\r\n\r\n\"\"\"\r\nClean column names.\r\n Takes all column names, converts them to lowercase, then replaces all\r\n spaces with underscores. <b>Does not mutate original dataframe.</b>\r\n\"\"\"\r\n\r\nHappy to add this line somewhere else in the docstring if inappropriate here. \r\n\r\n- [Link to documentation page](https://pyjanitor.readthedocs.io/reference/index.html)\r\n- [Link to exact file to be edited](https://github.com/ericmjl/pyjanitor/blob/dev/janitor/functions.py)\r\n\n", "before_files": [{"content": "\"\"\"\nBiology and bioinformatics-oriented data cleaning functions.\n\"\"\"\n\nimport pandas as pd\nimport pandas_flavor as pf\n\nfrom .utils import deprecated_alias, import_message\n\ntry:\n from Bio import SeqIO\nexcept ImportError:\n import_message(\n \"biology\", \"biopython\", \"conda install -c conda-forge biopython\"\n )\n\n\[email protected]_dataframe_method\n@deprecated_alias(col_name=\"column_name\")\ndef join_fasta(\n df: pd.DataFrame, filename: str, id_col: str, column_name\n) -> pd.DataFrame:\n \"\"\"\n Convenience method to join in a FASTA file as a column.\n\n This allows us to add the string sequence of a FASTA file as a new column\n of data in the dataframe.\n\n This function only attaches the string representation of the SeqRecord.Seq\n object from Biopython. Does not attach the full SeqRecord. Alphabet is\n also not stored, under the assumption that the data scientist has domain\n knowledge of what kind of sequence is being read in (nucleotide vs. amino\n acid.)\n\n For more advanced functions, please use phylopandas.\n\n :param df: A pandas DataFrame.\n :param filename: Path to the FASTA file.\n :param id_col: The column in the DataFrame that houses sequence IDs.\n :param column_name: The name of the new column.\n \"\"\"\n seqrecords = {\n x.id: x.seq.__str__() for x in SeqIO.parse(filename, \"fasta\")\n }\n seq_col = [seqrecords[i] for i in df[id_col]]\n df[column_name] = seq_col\n return df\n", "path": "janitor/biology.py"}]}
1,209
199
gh_patches_debug_15954
rasdani/github-patches
git_diff
Nitrate__Nitrate-352
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove django 1.10 support - Remove django 1.10 testenv from tox.ini - Update django version in setup.py. Minimum django version is `1.11`. </issue> <code> [start of setup.py] 1 # -*- coding: utf-8 -*- 2 3 import sys 4 5 from setuptools import setup, find_packages 6 7 8 with open('VERSION.txt', 'r') as f: 9 pkg_version = f.read().strip() 10 11 12 def get_long_description(): 13 with open('README.rst', 'r') as f: 14 return f.read() 15 16 17 install_requires = [ 18 'PyMySQL == 0.7.11', 19 'beautifulsoup4 >= 4.1.1', 20 'celery == 4.1.0', 21 'django >= 1.10,<2.0', 22 'django-contrib-comments == 1.8.0', 23 'django-tinymce == 2.7.0', 24 'django-uuslug == 1.1.8', 25 'html2text', 26 'kobo == 0.7.0', 27 'odfpy >= 0.9.6', 28 'python-bugzilla', 29 'six', 30 'xmltodict', 31 ] 32 33 if sys.version_info.major < 3: 34 install_requires += [ 35 'enum34', 36 ] 37 38 extras_require = { 39 # Required for tcms.core.contrib.auth.backends.KerberosBackend 40 'krbauth': [ 41 'kerberos == 1.2.5' 42 ], 43 44 # Packages for building documentation 45 'docs': [ 46 'Sphinx >= 1.1.2', 47 'sphinx_rtd_theme', 48 ], 49 50 # Necessary packages for running tests 51 'tests': [ 52 'coverage', 53 'factory_boy', 54 'flake8', 55 'mock', 56 'pytest', 57 'pytest-cov', 58 'pytest-django', 59 ], 60 61 # Contain tools that assists the development 62 'devtools': [ 63 'django-debug-toolbar == 1.7', 64 'tox', 65 'django-extensions', 66 'pygraphviz', 67 'future-breakpoint', 68 ] 69 } 70 71 72 setup( 73 name='Nitrate', 74 version=pkg_version, 75 description='Test Case Management System', 76 long_description=get_long_description(), 77 author='Nitrate Team', 78 maintainer='Chenxiong Qi', 79 maintainer_email='[email protected]', 80 url='https://github.com/Nitrate/Nitrate/', 81 license='GPLv2+', 82 keywords='test case', 83 install_requires=install_requires, 84 extras_require=extras_require, 85 packages=find_packages(), 86 include_package_data=True, 87 classifiers=[ 88 'Framework :: Django', 89 'Framework :: Django :: 1.10', 90 'Framework :: Django :: 1.11', 91 'Intended Audience :: Developers', 92 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 93 'Programming Language :: Python :: 2', 94 'Programming Language :: Python :: 2.7', 95 'Programming Language :: Python :: 3', 96 'Programming Language :: Python :: 3.6', 97 'Topic :: Software Development :: Quality Assurance', 98 'Topic :: Software Development :: Testing', 99 ], 100 ) 101 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ 'PyMySQL == 0.7.11', 'beautifulsoup4 >= 4.1.1', 'celery == 4.1.0', - 'django >= 1.10,<2.0', + 'django >= 1.11,<2.0', 'django-contrib-comments == 1.8.0', 'django-tinymce == 2.7.0', 'django-uuslug == 1.1.8', @@ -86,7 +86,6 @@ include_package_data=True, classifiers=[ 'Framework :: Django', - 'Framework :: Django :: 1.10', 'Framework :: Django :: 1.11', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n 'PyMySQL == 0.7.11',\n 'beautifulsoup4 >= 4.1.1',\n 'celery == 4.1.0',\n- 'django >= 1.10,<2.0',\n+ 'django >= 1.11,<2.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n@@ -86,7 +86,6 @@\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n- 'Framework :: Django :: 1.10',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n", "issue": "Remove django 1.10 support\n- Remove django 1.10 testenv from tox.ini\r\n- Update django version in setup.py. Minimum django version is `1.11`.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'PyMySQL == 0.7.11',\n 'beautifulsoup4 >= 4.1.1',\n 'celery == 4.1.0',\n 'django >= 1.10,<2.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'kobo == 0.7.0',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ]\n}\n\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.10',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}]}
1,429
223
gh_patches_debug_39573
rasdani/github-patches
git_diff
scoutapp__scout_apm_python-674
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support Jinja 3.x Jinja 3.x was released last week and it has broken some functionality within the agent. The following tests are currently failing: ``` tests/integration/instruments/test_jinja2_py36plus.py::test_async_render tests/integration/instruments/test_jinja2_py36plus.py::test_async_render_name tests/integration/instruments/test_urllib3.py::test_request - pytest.PytestUnraisableExceptionWarning tests/integration/instruments/test_urllib3.py::test_request_no_absolute_url ``` </issue> <code> [start of src/scout_apm/instruments/jinja2.py] 1 # coding=utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 import logging 5 import sys 6 7 import wrapt 8 9 from scout_apm.core.tracked_request import TrackedRequest 10 11 try: 12 from jinja2 import Environment 13 except ImportError: # pragma: no cover 14 Environment = None 15 16 try: 17 from jinja2 import Template 18 except ImportError: # pragma: no cover 19 Template = None 20 21 # The async_ module can only be shipped on Python 3.6+ 22 try: 23 from scout_apm.async_.instruments.jinja2 import wrapped_render_async 24 except ImportError: 25 wrapped_render_async = None 26 27 28 logger = logging.getLogger(__name__) 29 30 31 have_patched_environment_init = False 32 have_patched_template_render = False 33 have_patched_template_render_async = False 34 35 36 def ensure_installed(): 37 global have_patched_environment_init 38 global have_patched_template_render 39 40 logger.debug("Instrumenting Jinja2.") 41 42 if Template is None: 43 logger.debug("Couldn't import jinja2.Template - probably not installed.") 44 return 45 46 if not have_patched_environment_init: 47 try: 48 Environment.__init__ = wrapped_environment_init(Environment.__init__) 49 except Exception as exc: 50 logger.warning( 51 "Failed to instrument jinja2.Environment.__init__: %r", 52 exc, 53 exc_info=exc, 54 ) 55 else: 56 have_patched_environment_init = True 57 58 if not have_patched_template_render: 59 try: 60 Template.render = wrapped_render(Template.render) 61 except Exception as exc: 62 logger.warning( 63 "Failed to instrument jinja2.Template.render: %r", exc, exc_info=exc 64 ) 65 else: 66 have_patched_template_render = True 67 68 69 @wrapt.decorator 70 def wrapped_render(wrapped, instance, args, kwargs): 71 tracked_request = TrackedRequest.instance() 72 with tracked_request.span(operation="Template/Render") as span: 73 span.tag("name", instance.name) 74 return wrapped(*args, **kwargs) 75 76 77 @wrapt.decorator 78 def wrapped_environment_init(wrapped, instance, args, kwargs): 79 """ 80 Delayed wrapping of render_async(), since Template won't have this method 81 until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is 82 done conditionally in Environment.__init__: 83 https://github.com/pallets/jinja/issues/765 84 """ 85 global have_patched_template_render_async 86 result = wrapped(*args, **kwargs) 87 88 if ( 89 wrapped_render_async is not None 90 and not have_patched_template_render_async 91 and "jinja2.asyncsupport" in sys.modules 92 ): 93 try: 94 Template.render_async = wrapped_render_async(Template.render_async) 95 except Exception as exc: 96 logger.warning( 97 "Failed to instrument jinja2.Template.render_async: %r", 98 exc, 99 exc_info=exc, 100 ) 101 else: 102 have_patched_template_render_async = True 103 104 return result 105 [end of src/scout_apm/instruments/jinja2.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/scout_apm/instruments/jinja2.py b/src/scout_apm/instruments/jinja2.py --- a/src/scout_apm/instruments/jinja2.py +++ b/src/scout_apm/instruments/jinja2.py @@ -34,7 +34,6 @@ def ensure_installed(): - global have_patched_environment_init global have_patched_template_render logger.debug("Instrumenting Jinja2.") @@ -43,9 +42,31 @@ logger.debug("Couldn't import jinja2.Template - probably not installed.") return - if not have_patched_environment_init: + instrument_render_async() + + if not have_patched_template_render: + try: + Template.render = wrapped_render(Template.render) + except Exception as exc: + logger.warning( + "Failed to instrument jinja2.Template.render: %r", exc, exc_info=exc + ) + else: + have_patched_template_render = True + + +def instrument_render_async(): + global have_patched_environment_init + global have_patched_template_render_async + + if wrapped_render_async is None: + return + + if not have_patched_environment_init and not hasattr(Template, "render_async"): try: - Environment.__init__ = wrapped_environment_init(Environment.__init__) + Environment.__init__ = wrapped_environment_init_jinja_v2( + Environment.__init__ + ) except Exception as exc: logger.warning( "Failed to instrument jinja2.Environment.__init__: %r", @@ -54,16 +75,17 @@ ) else: have_patched_environment_init = True - - if not have_patched_template_render: + elif hasattr(Template, "render_async") and not have_patched_template_render_async: try: - Template.render = wrapped_render(Template.render) + Template.render_async = wrapped_render_async(Template.render_async) except Exception as exc: logger.warning( - "Failed to instrument jinja2.Template.render: %r", exc, exc_info=exc + "Failed to instrument jinja2.Template.render_async: %r", + exc, + exc_info=exc, ) else: - have_patched_template_render = True + have_patched_template_render_async = True @wrapt.decorator @@ -75,12 +97,14 @@ @wrapt.decorator -def wrapped_environment_init(wrapped, instance, args, kwargs): +def wrapped_environment_init_jinja_v2(wrapped, instance, args, kwargs): """ Delayed wrapping of render_async(), since Template won't have this method until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is done conditionally in Environment.__init__: https://github.com/pallets/jinja/issues/765 + + This is no longer needed since Jinja2 v3.0.0 """ global have_patched_template_render_async result = wrapped(*args, **kwargs)
{"golden_diff": "diff --git a/src/scout_apm/instruments/jinja2.py b/src/scout_apm/instruments/jinja2.py\n--- a/src/scout_apm/instruments/jinja2.py\n+++ b/src/scout_apm/instruments/jinja2.py\n@@ -34,7 +34,6 @@\n \n \n def ensure_installed():\n- global have_patched_environment_init\n global have_patched_template_render\n \n logger.debug(\"Instrumenting Jinja2.\")\n@@ -43,9 +42,31 @@\n logger.debug(\"Couldn't import jinja2.Template - probably not installed.\")\n return\n \n- if not have_patched_environment_init:\n+ instrument_render_async()\n+\n+ if not have_patched_template_render:\n+ try:\n+ Template.render = wrapped_render(Template.render)\n+ except Exception as exc:\n+ logger.warning(\n+ \"Failed to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n+ )\n+ else:\n+ have_patched_template_render = True\n+\n+\n+def instrument_render_async():\n+ global have_patched_environment_init\n+ global have_patched_template_render_async\n+\n+ if wrapped_render_async is None:\n+ return\n+\n+ if not have_patched_environment_init and not hasattr(Template, \"render_async\"):\n try:\n- Environment.__init__ = wrapped_environment_init(Environment.__init__)\n+ Environment.__init__ = wrapped_environment_init_jinja_v2(\n+ Environment.__init__\n+ )\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Environment.__init__: %r\",\n@@ -54,16 +75,17 @@\n )\n else:\n have_patched_environment_init = True\n-\n- if not have_patched_template_render:\n+ elif hasattr(Template, \"render_async\") and not have_patched_template_render_async:\n try:\n- Template.render = wrapped_render(Template.render)\n+ Template.render_async = wrapped_render_async(Template.render_async)\n except Exception as exc:\n logger.warning(\n- \"Failed to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n+ \"Failed to instrument jinja2.Template.render_async: %r\",\n+ exc,\n+ exc_info=exc,\n )\n else:\n- have_patched_template_render = True\n+ have_patched_template_render_async = True\n \n \n @wrapt.decorator\n@@ -75,12 +97,14 @@\n \n \n @wrapt.decorator\n-def wrapped_environment_init(wrapped, instance, args, kwargs):\n+def wrapped_environment_init_jinja_v2(wrapped, instance, args, kwargs):\n \"\"\"\n Delayed wrapping of render_async(), since Template won't have this method\n until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is\n done conditionally in Environment.__init__:\n https://github.com/pallets/jinja/issues/765\n+\n+ This is no longer needed since Jinja2 v3.0.0\n \"\"\"\n global have_patched_template_render_async\n result = wrapped(*args, **kwargs)\n", "issue": "Support Jinja 3.x\nJinja 3.x was released last week and it has broken some functionality within the agent. The following tests are currently failing:\r\n\r\n```\r\ntests/integration/instruments/test_jinja2_py36plus.py::test_async_render\r\ntests/integration/instruments/test_jinja2_py36plus.py::test_async_render_name\r\ntests/integration/instruments/test_urllib3.py::test_request - pytest.PytestUnraisableExceptionWarning\r\ntests/integration/instruments/test_urllib3.py::test_request_no_absolute_url\r\n```\r\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport sys\n\nimport wrapt\n\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from jinja2 import Environment\nexcept ImportError: # pragma: no cover\n Environment = None\n\ntry:\n from jinja2 import Template\nexcept ImportError: # pragma: no cover\n Template = None\n\n# The async_ module can only be shipped on Python 3.6+\ntry:\n from scout_apm.async_.instruments.jinja2 import wrapped_render_async\nexcept ImportError:\n wrapped_render_async = None\n\n\nlogger = logging.getLogger(__name__)\n\n\nhave_patched_environment_init = False\nhave_patched_template_render = False\nhave_patched_template_render_async = False\n\n\ndef ensure_installed():\n global have_patched_environment_init\n global have_patched_template_render\n\n logger.debug(\"Instrumenting Jinja2.\")\n\n if Template is None:\n logger.debug(\"Couldn't import jinja2.Template - probably not installed.\")\n return\n\n if not have_patched_environment_init:\n try:\n Environment.__init__ = wrapped_environment_init(Environment.__init__)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Environment.__init__: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_environment_init = True\n\n if not have_patched_template_render:\n try:\n Template.render = wrapped_render(Template.render)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n )\n else:\n have_patched_template_render = True\n\n\[email protected]\ndef wrapped_render(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(operation=\"Template/Render\") as span:\n span.tag(\"name\", instance.name)\n return wrapped(*args, **kwargs)\n\n\[email protected]\ndef wrapped_environment_init(wrapped, instance, args, kwargs):\n \"\"\"\n Delayed wrapping of render_async(), since Template won't have this method\n until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is\n done conditionally in Environment.__init__:\n https://github.com/pallets/jinja/issues/765\n \"\"\"\n global have_patched_template_render_async\n result = wrapped(*args, **kwargs)\n\n if (\n wrapped_render_async is not None\n and not have_patched_template_render_async\n and \"jinja2.asyncsupport\" in sys.modules\n ):\n try:\n Template.render_async = wrapped_render_async(Template.render_async)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument jinja2.Template.render_async: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_template_render_async = True\n\n return result\n", "path": "src/scout_apm/instruments/jinja2.py"}]}
1,535
693
gh_patches_debug_12015
rasdani/github-patches
git_diff
iterative__dvc-8505
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `exp run`: data gets re-imported on every call # Bug Report ## Description When a pipeline uses an imported data file (with `dvc import`), the data gets cloned(?) and hashed every time `dvc exp run` is called. ### Reproduce 1. dvc import [email protected]:iterative/dataset-registry.git use-cases/cats-dogs 2. dvc stage add -n foo -d cats-dogs echo foo 3. dvc exp run ### Expected When using `dvc repro` the imported data doesn't get re-hashed. I would expect `dvc exp run` to behave the same. ### Environment information **Output of `dvc doctor`:** ```console $ dvc doctor DVC version: 2.6.3 (pip) --------------------------------- Platform: Python 3.9.6 on macOS-10.16-x86_64-i386-64bit Supports: gdrive (pydrive2 = 1.9.1), http (requests = 2.26.0), https (requests = 2.26.0) Cache types: reflink, hardlink, symlink Cache directory: apfs on /dev/disk1s1s1 Caches: local Remotes: None Workspace directory: apfs on /dev/disk1s1s1 Repo: dvc, git ``` **Additional Information (if any):** ```console $ dvc repro -v 2021-08-25 11:11:55,186 DEBUG: Computed stage: 'cats-dogs.dvc' md5: '5a135b297ee3c96465ce4b320f44fb8b' 'cats-dogs.dvc' didn't change, skipping Stage 'foo' didn't change, skipping Data and pipelines are up to date. ``` ```console $ dvc exp run -v 2021-08-25 11:12:15,672 DEBUG: Detaching HEAD at 'HEAD' 2021-08-25 11:12:15,690 DEBUG: Stashing workspace 2021-08-25 11:12:15,700 DEBUG: No changes to stash 2021-08-25 11:12:15,749 DEBUG: Creating external repo [email protected]:iterative/dataset-registry.git@ca140591a21c6d75a7057d1e2eb3f51d3115c5f5 2021-08-25 11:12:15,749 DEBUG: erepo: git clone '[email protected]:iterative/dataset-registry.git' to a temporary dir Computing file/dir hashes (only done once) . . . ``` </issue> <code> [start of dvc/repo/commit.py] 1 from dvc import prompt 2 3 from . import locked 4 5 6 def _prepare_message(stage, changes): 7 changed_deps, changed_outs, changed_stage = changes 8 if changed_deps and changed_outs: 9 msg = "dependencies {deps} and outputs {outs} of {stage} changed." 10 elif changed_deps: 11 msg = "dependencies {deps} of {stage} changed." 12 elif changed_outs: 13 msg = "outputs {outs} of {stage} changed." 14 else: 15 msg = "{stage_changed}" 16 17 msg += " Are you sure you want to commit it?" 18 19 kw = { 20 "stage": stage, 21 "deps": changed_deps, 22 "outs": changed_outs, 23 "stage_changed": changed_stage, 24 } 25 return msg.format_map(kw) 26 27 28 def prompt_to_commit(stage, changes, force=False): 29 from dvc.stage.exceptions import StageCommitError 30 31 if not (force or prompt.confirm(_prepare_message(stage, changes))): 32 raise StageCommitError( 33 "unable to commit changed {}. Use `-f|--force` to " 34 "force.".format(stage) 35 ) 36 37 38 @locked 39 def commit( 40 self, 41 target, 42 with_deps=False, 43 recursive=False, 44 force=False, 45 allow_missing=False, 46 data_only=False, 47 ): 48 from dvc.dvcfile import Dvcfile 49 50 stages_info = [ 51 info 52 for info in self.stage.collect_granular( 53 target, with_deps=with_deps, recursive=recursive 54 ) 55 if not data_only or info.stage.is_data_source 56 ] 57 for stage_info in stages_info: 58 stage = stage_info.stage 59 changes = stage.changed_entries() 60 if any(changes): 61 prompt_to_commit(stage, changes, force=force) 62 stage.save(allow_missing=allow_missing) 63 stage.commit( 64 filter_info=stage_info.filter_info, allow_missing=allow_missing 65 ) 66 67 Dvcfile(self, stage.path).dump(stage, update_pipeline=False) 68 return [s.stage for s in stages_info] 69 [end of dvc/repo/commit.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/repo/commit.py b/dvc/repo/commit.py --- a/dvc/repo/commit.py +++ b/dvc/repo/commit.py @@ -56,10 +56,13 @@ ] for stage_info in stages_info: stage = stage_info.stage - changes = stage.changed_entries() - if any(changes): - prompt_to_commit(stage, changes, force=force) + if force: stage.save(allow_missing=allow_missing) + else: + changes = stage.changed_entries() + if any(changes): + prompt_to_commit(stage, changes, force=force) + stage.save(allow_missing=allow_missing) stage.commit( filter_info=stage_info.filter_info, allow_missing=allow_missing )
{"golden_diff": "diff --git a/dvc/repo/commit.py b/dvc/repo/commit.py\n--- a/dvc/repo/commit.py\n+++ b/dvc/repo/commit.py\n@@ -56,10 +56,13 @@\n ]\n for stage_info in stages_info:\n stage = stage_info.stage\n- changes = stage.changed_entries()\n- if any(changes):\n- prompt_to_commit(stage, changes, force=force)\n+ if force:\n stage.save(allow_missing=allow_missing)\n+ else:\n+ changes = stage.changed_entries()\n+ if any(changes):\n+ prompt_to_commit(stage, changes, force=force)\n+ stage.save(allow_missing=allow_missing)\n stage.commit(\n filter_info=stage_info.filter_info, allow_missing=allow_missing\n )\n", "issue": "`exp run`: data gets re-imported on every call\n# Bug Report\r\n\r\n## Description\r\n\r\nWhen a pipeline uses an imported data file (with `dvc import`), the data gets cloned(?) and hashed every time `dvc exp run` is called.\r\n\r\n### Reproduce\r\n\r\n1. dvc import [email protected]:iterative/dataset-registry.git use-cases/cats-dogs\r\n2. dvc stage add -n foo -d cats-dogs echo foo\r\n3. dvc exp run\r\n\r\n### Expected\r\n\r\nWhen using `dvc repro` the imported data doesn't get re-hashed. I would expect `dvc exp run` to behave the same.\r\n\r\n### Environment information\r\n\r\n**Output of `dvc doctor`:**\r\n\r\n```console\r\n$ dvc doctor\r\nDVC version: 2.6.3 (pip)\r\n---------------------------------\r\nPlatform: Python 3.9.6 on macOS-10.16-x86_64-i386-64bit\r\nSupports:\r\n gdrive (pydrive2 = 1.9.1),\r\n http (requests = 2.26.0),\r\n https (requests = 2.26.0)\r\nCache types: reflink, hardlink, symlink\r\nCache directory: apfs on /dev/disk1s1s1\r\nCaches: local\r\nRemotes: None\r\nWorkspace directory: apfs on /dev/disk1s1s1\r\nRepo: dvc, git\r\n```\r\n\r\n**Additional Information (if any):**\r\n\r\n```console\r\n$ dvc repro -v\r\n2021-08-25 11:11:55,186 DEBUG: Computed stage: 'cats-dogs.dvc' md5: '5a135b297ee3c96465ce4b320f44fb8b'\r\n'cats-dogs.dvc' didn't change, skipping\r\nStage 'foo' didn't change, skipping\r\nData and pipelines are up to date.\r\n```\r\n\r\n```console\r\n$ dvc exp run -v\r\n2021-08-25 11:12:15,672 DEBUG: Detaching HEAD at 'HEAD' \r\n2021-08-25 11:12:15,690 DEBUG: Stashing workspace\r\n2021-08-25 11:12:15,700 DEBUG: No changes to stash\r\n2021-08-25 11:12:15,749 DEBUG: Creating external repo [email protected]:iterative/dataset-registry.git@ca140591a21c6d75a7057d1e2eb3f51d3115c5f5\r\n2021-08-25 11:12:15,749 DEBUG: erepo: git clone '[email protected]:iterative/dataset-registry.git' to a temporary dir\r\nComputing file/dir hashes (only done once) \r\n. . . \r\n```\r\n\r\n\n", "before_files": [{"content": "from dvc import prompt\n\nfrom . import locked\n\n\ndef _prepare_message(stage, changes):\n changed_deps, changed_outs, changed_stage = changes\n if changed_deps and changed_outs:\n msg = \"dependencies {deps} and outputs {outs} of {stage} changed.\"\n elif changed_deps:\n msg = \"dependencies {deps} of {stage} changed.\"\n elif changed_outs:\n msg = \"outputs {outs} of {stage} changed.\"\n else:\n msg = \"{stage_changed}\"\n\n msg += \" Are you sure you want to commit it?\"\n\n kw = {\n \"stage\": stage,\n \"deps\": changed_deps,\n \"outs\": changed_outs,\n \"stage_changed\": changed_stage,\n }\n return msg.format_map(kw)\n\n\ndef prompt_to_commit(stage, changes, force=False):\n from dvc.stage.exceptions import StageCommitError\n\n if not (force or prompt.confirm(_prepare_message(stage, changes))):\n raise StageCommitError(\n \"unable to commit changed {}. Use `-f|--force` to \"\n \"force.\".format(stage)\n )\n\n\n@locked\ndef commit(\n self,\n target,\n with_deps=False,\n recursive=False,\n force=False,\n allow_missing=False,\n data_only=False,\n):\n from dvc.dvcfile import Dvcfile\n\n stages_info = [\n info\n for info in self.stage.collect_granular(\n target, with_deps=with_deps, recursive=recursive\n )\n if not data_only or info.stage.is_data_source\n ]\n for stage_info in stages_info:\n stage = stage_info.stage\n changes = stage.changed_entries()\n if any(changes):\n prompt_to_commit(stage, changes, force=force)\n stage.save(allow_missing=allow_missing)\n stage.commit(\n filter_info=stage_info.filter_info, allow_missing=allow_missing\n )\n\n Dvcfile(self, stage.path).dump(stage, update_pipeline=False)\n return [s.stage for s in stages_info]\n", "path": "dvc/repo/commit.py"}]}
1,797
177
gh_patches_debug_35163
rasdani/github-patches
git_diff
StackStorm__st2-4174
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Incomplete help for: st2 action-alias match ``` $ st2 action-alias match --help usage: st2 action-alias match [-t TOKEN] [--api-key API_KEY] [-j] [-y] [-a ATTR [ATTR ...]] [-w WIDTH [WIDTH ...]] command st2 action-alias match: error: too few arguments ``` </issue> <code> [start of st2client/st2client/commands/action_alias.py] 1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more 2 # contributor license agreements. See the NOTICE file distributed with 3 # this work for additional information regarding copyright ownership. 4 # The ASF licenses this file to You under the Apache License, Version 2.0 5 # (the "License"); you may not use this file except in compliance with 6 # the License. You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 from __future__ import absolute_import 17 18 from st2client.models import core 19 from st2client.models.action_alias import ActionAlias 20 from st2client.models.action_alias import ActionAliasMatch 21 from st2client.commands import resource 22 from st2client.formatters import table 23 24 25 __all__ = [ 26 'ActionAliasBranch', 27 'ActionAliasMatchCommand', 28 'ActionAliasExecuteCommand' 29 ] 30 31 32 class ActionAliasBranch(resource.ResourceBranch): 33 def __init__(self, description, app, subparsers, parent_parser=None): 34 super(ActionAliasBranch, self).__init__( 35 ActionAlias, description, app, subparsers, 36 parent_parser=parent_parser, read_only=False, 37 commands={ 38 'list': ActionAliasListCommand, 39 'get': ActionAliasGetCommand 40 }) 41 42 self.commands['match'] = ActionAliasMatchCommand( 43 self.resource, self.app, self.subparsers, 44 add_help=False) 45 self.commands['execute'] = ActionAliasExecuteCommand( 46 self.resource, self.app, self.subparsers, 47 add_help=False) 48 49 50 class ActionAliasListCommand(resource.ContentPackResourceListCommand): 51 display_attributes = ['ref', 'pack', 'description', 'enabled'] 52 53 54 class ActionAliasGetCommand(resource.ContentPackResourceGetCommand): 55 display_attributes = ['all'] 56 attribute_display_order = ['id', 'ref', 'pack', 'name', 'description', 57 'enabled', 'action_ref', 'formats'] 58 59 60 class ActionAliasMatchCommand(resource.ResourceCommand): 61 display_attributes = ['name', 'description'] 62 63 def __init__(self, resource, *args, **kwargs): 64 super(ActionAliasMatchCommand, self).__init__( 65 resource, 'match', 66 'Get the list of %s that match the command text.' % 67 resource.get_plural_display_name().lower(), 68 *args, **kwargs) 69 70 self.parser.add_argument('match_text', 71 metavar='command', 72 help=help) 73 self.parser.add_argument('-h', '--help', 74 action='store_true', dest='help', 75 help='Print usage for the given action.') 76 self.parser.add_argument('-a', '--attr', nargs='+', 77 default=self.display_attributes, 78 help=('List of attributes to include in the ' 79 'output. "all" will return all ' 80 'attributes.')) 81 self.parser.add_argument('-w', '--width', nargs='+', type=int, 82 default=None, 83 help=('Set the width of columns in output.')) 84 85 @resource.add_auth_token_to_kwargs_from_cli 86 def run(self, args, **kwargs): 87 alias_match = ActionAliasMatch() 88 alias_match.command = args.match_text 89 90 match, _ = self.manager.match(alias_match, **kwargs) 91 return [match] 92 93 def run_and_print(self, args, **kwargs): 94 instances = self.run(args, **kwargs) 95 self.print_output(instances, table.MultiColumnTable, 96 attributes=args.attr, widths=args.width, 97 json=args.json, yaml=args.yaml) 98 99 100 class ActionAliasExecuteCommand(resource.ResourceCommand): 101 display_attributes = ['name'] 102 103 def __init__(self, resource, *args, **kwargs): 104 super(ActionAliasExecuteCommand, self).__init__( 105 resource, 'execute', 106 ('Execute the command text by finding a matching %s.' % 107 resource.get_display_name().lower()), *args, **kwargs) 108 109 self.parser.add_argument('command_text', 110 metavar='command', 111 help=help) 112 self.parser.add_argument('-h', '--help', 113 action='store_true', dest='help', 114 help='Print usage for the given action.') 115 self.parser.add_argument('-u', '--user', type=str, default=None, 116 help='User under which to run the action (admins only).') 117 118 @resource.add_auth_token_to_kwargs_from_cli 119 def run(self, args, **kwargs): 120 payload = core.Resource() 121 payload.command = args.command_text 122 payload.user = args.user 123 payload.source_channel = 'cli' 124 125 alias_execution_mgr = self.app.client.managers['ActionAliasExecution'] 126 execution = alias_execution_mgr.match_and_execute(payload) 127 return execution 128 129 def run_and_print(self, args, **kwargs): 130 execution = self.run(args, **kwargs) 131 print("Matching Action-alias: '%s'" % execution.actionalias['ref']) 132 print("To get the results, execute:\n st2 execution get %s" % 133 (execution.execution['id'])) 134 [end of st2client/st2client/commands/action_alias.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/st2client/st2client/commands/action_alias.py b/st2client/st2client/commands/action_alias.py --- a/st2client/st2client/commands/action_alias.py +++ b/st2client/st2client/commands/action_alias.py @@ -41,10 +41,10 @@ self.commands['match'] = ActionAliasMatchCommand( self.resource, self.app, self.subparsers, - add_help=False) + add_help=True) self.commands['execute'] = ActionAliasExecuteCommand( self.resource, self.app, self.subparsers, - add_help=False) + add_help=True) class ActionAliasListCommand(resource.ContentPackResourceListCommand): @@ -69,10 +69,8 @@ self.parser.add_argument('match_text', metavar='command', - help=help) - self.parser.add_argument('-h', '--help', - action='store_true', dest='help', - help='Print usage for the given action.') + help=('Get the list of %s that match the command text.' % + resource.get_display_name().lower())) self.parser.add_argument('-a', '--attr', nargs='+', default=self.display_attributes, help=('List of attributes to include in the ' @@ -108,10 +106,8 @@ self.parser.add_argument('command_text', metavar='command', - help=help) - self.parser.add_argument('-h', '--help', - action='store_true', dest='help', - help='Print usage for the given action.') + help=('Execute the command text by finding a matching %s.' % + resource.get_display_name().lower())) self.parser.add_argument('-u', '--user', type=str, default=None, help='User under which to run the action (admins only).')
{"golden_diff": "diff --git a/st2client/st2client/commands/action_alias.py b/st2client/st2client/commands/action_alias.py\n--- a/st2client/st2client/commands/action_alias.py\n+++ b/st2client/st2client/commands/action_alias.py\n@@ -41,10 +41,10 @@\n \n self.commands['match'] = ActionAliasMatchCommand(\n self.resource, self.app, self.subparsers,\n- add_help=False)\n+ add_help=True)\n self.commands['execute'] = ActionAliasExecuteCommand(\n self.resource, self.app, self.subparsers,\n- add_help=False)\n+ add_help=True)\n \n \n class ActionAliasListCommand(resource.ContentPackResourceListCommand):\n@@ -69,10 +69,8 @@\n \n self.parser.add_argument('match_text',\n metavar='command',\n- help=help)\n- self.parser.add_argument('-h', '--help',\n- action='store_true', dest='help',\n- help='Print usage for the given action.')\n+ help=('Get the list of %s that match the command text.' %\n+ resource.get_display_name().lower()))\n self.parser.add_argument('-a', '--attr', nargs='+',\n default=self.display_attributes,\n help=('List of attributes to include in the '\n@@ -108,10 +106,8 @@\n \n self.parser.add_argument('command_text',\n metavar='command',\n- help=help)\n- self.parser.add_argument('-h', '--help',\n- action='store_true', dest='help',\n- help='Print usage for the given action.')\n+ help=('Execute the command text by finding a matching %s.' %\n+ resource.get_display_name().lower()))\n self.parser.add_argument('-u', '--user', type=str, default=None,\n help='User under which to run the action (admins only).')\n", "issue": "Incomplete help for: st2 action-alias match\n```\r\n$ st2 action-alias match --help\r\nusage: st2 action-alias match [-t TOKEN] [--api-key API_KEY] [-j] [-y]\r\n [-a ATTR [ATTR ...]] [-w WIDTH [WIDTH ...]]\r\n command\r\nst2 action-alias match: error: too few arguments\r\n```\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom st2client.models import core\nfrom st2client.models.action_alias import ActionAlias\nfrom st2client.models.action_alias import ActionAliasMatch\nfrom st2client.commands import resource\nfrom st2client.formatters import table\n\n\n__all__ = [\n 'ActionAliasBranch',\n 'ActionAliasMatchCommand',\n 'ActionAliasExecuteCommand'\n]\n\n\nclass ActionAliasBranch(resource.ResourceBranch):\n def __init__(self, description, app, subparsers, parent_parser=None):\n super(ActionAliasBranch, self).__init__(\n ActionAlias, description, app, subparsers,\n parent_parser=parent_parser, read_only=False,\n commands={\n 'list': ActionAliasListCommand,\n 'get': ActionAliasGetCommand\n })\n\n self.commands['match'] = ActionAliasMatchCommand(\n self.resource, self.app, self.subparsers,\n add_help=False)\n self.commands['execute'] = ActionAliasExecuteCommand(\n self.resource, self.app, self.subparsers,\n add_help=False)\n\n\nclass ActionAliasListCommand(resource.ContentPackResourceListCommand):\n display_attributes = ['ref', 'pack', 'description', 'enabled']\n\n\nclass ActionAliasGetCommand(resource.ContentPackResourceGetCommand):\n display_attributes = ['all']\n attribute_display_order = ['id', 'ref', 'pack', 'name', 'description',\n 'enabled', 'action_ref', 'formats']\n\n\nclass ActionAliasMatchCommand(resource.ResourceCommand):\n display_attributes = ['name', 'description']\n\n def __init__(self, resource, *args, **kwargs):\n super(ActionAliasMatchCommand, self).__init__(\n resource, 'match',\n 'Get the list of %s that match the command text.' %\n resource.get_plural_display_name().lower(),\n *args, **kwargs)\n\n self.parser.add_argument('match_text',\n metavar='command',\n help=help)\n self.parser.add_argument('-h', '--help',\n action='store_true', dest='help',\n help='Print usage for the given action.')\n self.parser.add_argument('-a', '--attr', nargs='+',\n default=self.display_attributes,\n help=('List of attributes to include in the '\n 'output. \"all\" will return all '\n 'attributes.'))\n self.parser.add_argument('-w', '--width', nargs='+', type=int,\n default=None,\n help=('Set the width of columns in output.'))\n\n @resource.add_auth_token_to_kwargs_from_cli\n def run(self, args, **kwargs):\n alias_match = ActionAliasMatch()\n alias_match.command = args.match_text\n\n match, _ = self.manager.match(alias_match, **kwargs)\n return [match]\n\n def run_and_print(self, args, **kwargs):\n instances = self.run(args, **kwargs)\n self.print_output(instances, table.MultiColumnTable,\n attributes=args.attr, widths=args.width,\n json=args.json, yaml=args.yaml)\n\n\nclass ActionAliasExecuteCommand(resource.ResourceCommand):\n display_attributes = ['name']\n\n def __init__(self, resource, *args, **kwargs):\n super(ActionAliasExecuteCommand, self).__init__(\n resource, 'execute',\n ('Execute the command text by finding a matching %s.' %\n resource.get_display_name().lower()), *args, **kwargs)\n\n self.parser.add_argument('command_text',\n metavar='command',\n help=help)\n self.parser.add_argument('-h', '--help',\n action='store_true', dest='help',\n help='Print usage for the given action.')\n self.parser.add_argument('-u', '--user', type=str, default=None,\n help='User under which to run the action (admins only).')\n\n @resource.add_auth_token_to_kwargs_from_cli\n def run(self, args, **kwargs):\n payload = core.Resource()\n payload.command = args.command_text\n payload.user = args.user\n payload.source_channel = 'cli'\n\n alias_execution_mgr = self.app.client.managers['ActionAliasExecution']\n execution = alias_execution_mgr.match_and_execute(payload)\n return execution\n\n def run_and_print(self, args, **kwargs):\n execution = self.run(args, **kwargs)\n print(\"Matching Action-alias: '%s'\" % execution.actionalias['ref'])\n print(\"To get the results, execute:\\n st2 execution get %s\" %\n (execution.execution['id']))\n", "path": "st2client/st2client/commands/action_alias.py"}]}
2,037
407
gh_patches_debug_20315
rasdani/github-patches
git_diff
Qiskit__qiskit-2302
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> multi-language documentation <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. --> ### What is the expected behavior? I would like to remove all the multi-language documentation from this repository and add it to github.com/Qiskit/qiskit. I am not sure of the best way we do it and I let all discuss in that repository how we should do it. But we are not going to have documentation in the individual elements (only a readme about the element, contributing guidelines and other community files) @alfrisch could you take the lead on the German @rraymondhp could you take the lead on the Japanese @hanheepaik could you take the lead on the Korean @liupibm could you take the lead on the Chinese Thanks </issue> <code> [start of docs/de/conf.py] 1 #!/usr/bin/env python3 2 # -*- coding: utf-8 -*- 3 """ 4 Language specific configuration file, inheriting from the main /doc 5 conf.py file and adjusting the variables that depend on the language. 6 """ 7 8 import os 9 import sys 10 11 sys.path.insert(0, os.path.abspath('..')) 12 from conf import * 13 14 language = 'de' 15 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst'] 16 17 templates_path = ['../_templates'] 18 html_static_path = ['../theme/static/'] 19 html_logo = '../theme/static/qiskit-logo-white-no-margin.gif' 20 html_favicon = '../theme/static/favicon.ico' 21 [end of docs/de/conf.py] [start of docs/ja/conf.py] 1 #!/usr/bin/env python3 2 # -*- coding: utf-8 -*- 3 """ 4 Language specific configuration file, inheriting from the main /doc 5 conf.py file and adjusting the variables that depend on the language. 6 """ 7 8 import os 9 import sys 10 11 sys.path.insert(0, os.path.abspath('..')) 12 from conf import * 13 14 language = 'ja' 15 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst'] 16 17 templates_path = ['../_templates'] 18 html_static_path = ['../theme/static/'] 19 html_logo = '../theme/static/qiskit-logo-white-no-margin.gif' 20 html_favicon = '../theme/static/favicon.ico' 21 [end of docs/ja/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/de/conf.py b/docs/de/conf.py deleted file mode 100644 --- a/docs/de/conf.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Language specific configuration file, inheriting from the main /doc -conf.py file and adjusting the variables that depend on the language. -""" - -import os -import sys - -sys.path.insert(0, os.path.abspath('..')) -from conf import * - -language = 'de' -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst'] - -templates_path = ['../_templates'] -html_static_path = ['../theme/static/'] -html_logo = '../theme/static/qiskit-logo-white-no-margin.gif' -html_favicon = '../theme/static/favicon.ico' diff --git a/docs/ja/conf.py b/docs/ja/conf.py deleted file mode 100644 --- a/docs/ja/conf.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Language specific configuration file, inheriting from the main /doc -conf.py file and adjusting the variables that depend on the language. -""" - -import os -import sys - -sys.path.insert(0, os.path.abspath('..')) -from conf import * - -language = 'ja' -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst'] - -templates_path = ['../_templates'] -html_static_path = ['../theme/static/'] -html_logo = '../theme/static/qiskit-logo-white-no-margin.gif' -html_favicon = '../theme/static/favicon.ico'
{"golden_diff": "diff --git a/docs/de/conf.py b/docs/de/conf.py\ndeleted file mode 100644\n--- a/docs/de/conf.py\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-#!/usr/bin/env python3\n-# -*- coding: utf-8 -*-\n-\"\"\"\n-Language specific configuration file, inheriting from the main /doc\n-conf.py file and adjusting the variables that depend on the language.\n-\"\"\"\n-\n-import os\n-import sys\n-\n-sys.path.insert(0, os.path.abspath('..'))\n-from conf import *\n-\n-language = 'de'\n-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']\n-\n-templates_path = ['../_templates']\n-html_static_path = ['../theme/static/']\n-html_logo = '../theme/static/qiskit-logo-white-no-margin.gif'\n-html_favicon = '../theme/static/favicon.ico'\ndiff --git a/docs/ja/conf.py b/docs/ja/conf.py\ndeleted file mode 100644\n--- a/docs/ja/conf.py\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-#!/usr/bin/env python3\n-# -*- coding: utf-8 -*-\n-\"\"\"\n-Language specific configuration file, inheriting from the main /doc\n-conf.py file and adjusting the variables that depend on the language.\n-\"\"\"\n-\n-import os\n-import sys\n-\n-sys.path.insert(0, os.path.abspath('..'))\n-from conf import *\n-\n-language = 'ja'\n-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']\n-\n-templates_path = ['../_templates']\n-html_static_path = ['../theme/static/']\n-html_logo = '../theme/static/qiskit-logo-white-no-margin.gif'\n-html_favicon = '../theme/static/favicon.ico'\n", "issue": "multi-language documentation \n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected behavior?\r\nI would like to remove all the multi-language documentation from this repository and add it to github.com/Qiskit/qiskit. I am not sure of the best way we do it and I let all discuss in that repository how we should do it. But we are not going to have documentation in the individual elements (only a readme about the element, contributing guidelines and other community files)\r\n\r\n@alfrisch could you take the lead on the German\r\n@rraymondhp could you take the lead on the Japanese\r\n@hanheepaik could you take the lead on the Korean\r\n@liupibm could you take the lead on the Chinese\r\n\r\nThanks \r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nLanguage specific configuration file, inheriting from the main /doc\nconf.py file and adjusting the variables that depend on the language.\n\"\"\"\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\nfrom conf import *\n\nlanguage = 'de'\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']\n\ntemplates_path = ['../_templates']\nhtml_static_path = ['../theme/static/']\nhtml_logo = '../theme/static/qiskit-logo-white-no-margin.gif'\nhtml_favicon = '../theme/static/favicon.ico'\n", "path": "docs/de/conf.py"}, {"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nLanguage specific configuration file, inheriting from the main /doc\nconf.py file and adjusting the variables that depend on the language.\n\"\"\"\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\nfrom conf import *\n\nlanguage = 'ja'\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_autodoc/modules.rst']\n\ntemplates_path = ['../_templates']\nhtml_static_path = ['../theme/static/']\nhtml_logo = '../theme/static/qiskit-logo-white-no-margin.gif'\nhtml_favicon = '../theme/static/favicon.ico'\n", "path": "docs/ja/conf.py"}]}
1,091
396
gh_patches_debug_43406
rasdani/github-patches
git_diff
sublimelsp__LSP-707
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Reduce impact of LSP color listener by default. @predragnikolic: Noticed while closing views after find-replace in pyls: ``` Traceback (most recent call last): File "/Applications/Sublime Text.app/Contents/MacOS/sublime_plugin.py", line 506, in run_async_view_listener_callback vel.__class__.__dict__[name](vel) File "/Users/tomv/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/color.py", line 47, in on_activated_async self.schedule_request() File "/Users/tomv/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/color.py", line 53, in schedule_request current_point = self.view.sel()[0].begin() File "/Applications/Sublime Text.app/Contents/MacOS/sublime.py", line 649, in __getitem__ raise IndexError() IndexError ``` * We should probably fix the above issue (maybe it's rare but I ran into this edge case pretty quickly) * We could avoid creating ViewEventListeners for a few kinds of irrelevant views: Read-only, transient, unsupported syntaxes ( Maybe `DocumentSyncListener` `is_applicable` is suitable?) * We could avoid running `LspColorListener`'s `schedule_request` until a session with colorProvider for a given view has been found. </issue> <code> [start of plugin/color.py] 1 import sublime_plugin 2 import sublime 3 4 try: 5 from typing import Any, List, Dict, Callable, Optional 6 assert Any and List and Dict and Callable and Optional 7 except ImportError: 8 pass 9 10 from .core.protocol import Request 11 from .core.url import filename_to_uri 12 from .core.registry import session_for_view 13 from .core.settings import settings 14 from .core.views import range_to_region 15 from .core.protocol import Range 16 17 18 def send_color_request(view, on_response_recieved: 'Callable'): 19 session = session_for_view(view) 20 if not session or not session.has_capability('colorProvider'): 21 # the server doesn't support colors, just return 22 return 23 24 params = { 25 "textDocument": { 26 "uri": filename_to_uri(view.file_name()) 27 } 28 } 29 session.client.send_request( 30 Request.documentColor(params), 31 lambda response: on_response_recieved(response)) 32 33 34 class LspColorListener(sublime_plugin.ViewEventListener): 35 def __init__(self, view: sublime.View) -> None: 36 super().__init__(view) 37 self.color_phantom_set = None # type: Optional[sublime.PhantomSet] 38 self._stored_point = -1 39 40 @classmethod 41 def is_applicable(cls, _settings): 42 return 'colorProvider' not in settings.disabled_capabilities 43 44 def on_activated_async(self): 45 self.schedule_request() 46 47 def on_modified_async(self): 48 self.schedule_request() 49 50 def schedule_request(self): 51 current_point = self.view.sel()[0].begin() 52 if self._stored_point != current_point: 53 self._stored_point = current_point 54 sublime.set_timeout_async(lambda: self.fire_request(current_point), 800) 55 56 def fire_request(self, current_point: int) -> None: 57 if current_point == self._stored_point: 58 send_color_request(self.view, self.handle_response) 59 60 def handle_response(self, response) -> None: 61 phantoms = [] 62 for val in response: 63 color = val['color'] 64 red = color['red'] * 255 65 green = color['green'] * 255 66 blue = color['blue'] * 255 67 alpha = color['alpha'] 68 69 content = """ 70 <div style='padding: 0.4em; 71 margin-top: 0.1em; 72 border: 1px solid color(var(--foreground) alpha(0.25)); 73 background-color: rgba({}, {}, {}, {})'> 74 </div>""".format(red, green, blue, alpha) 75 76 range = Range.from_lsp(val['range']) 77 region = range_to_region(range, self.view) 78 79 phantoms.append(sublime.Phantom(region, content, sublime.LAYOUT_INLINE)) 80 81 if phantoms: 82 if not self.color_phantom_set: 83 self.color_phantom_set = sublime.PhantomSet(self.view, "lsp_color") 84 self.color_phantom_set.update(phantoms) 85 else: 86 self.color_phantom_set = None 87 88 89 def remove_color_boxes(view): 90 view.erase_phantoms('lsp_color') 91 [end of plugin/color.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugin/color.py b/plugin/color.py --- a/plugin/color.py +++ b/plugin/color.py @@ -9,26 +9,12 @@ from .core.protocol import Request from .core.url import filename_to_uri -from .core.registry import session_for_view -from .core.settings import settings +from .core.registry import session_for_view, config_for_scope +from .core.settings import settings, client_configs from .core.views import range_to_region from .core.protocol import Range - - -def send_color_request(view, on_response_recieved: 'Callable'): - session = session_for_view(view) - if not session or not session.has_capability('colorProvider'): - # the server doesn't support colors, just return - return - - params = { - "textDocument": { - "uri": filename_to_uri(view.file_name()) - } - } - session.client.send_request( - Request.documentColor(params), - lambda response: on_response_recieved(response)) +from .core.configurations import is_supported_syntax +from .core.documents import is_transient_view class LspColorListener(sublime_plugin.ViewEventListener): @@ -36,26 +22,72 @@ super().__init__(view) self.color_phantom_set = None # type: Optional[sublime.PhantomSet] self._stored_point = -1 + self.initialized = False + self.enabled = False @classmethod def is_applicable(cls, _settings): - return 'colorProvider' not in settings.disabled_capabilities + syntax = _settings.get('syntax') + is_supported = syntax and is_supported_syntax(syntax, client_configs.all) + disabled_by_user = 'colorProvider' in settings.disabled_capabilities + return is_supported and not disabled_by_user def on_activated_async(self): - self.schedule_request() + if not self.initialized: + self.initialize() + + def initialize(self, is_retry=False): + config = config_for_scope(self.view) + if not config: + self.initialized = True # no server enabled, re-open file to activate feature. + + session = session_for_view(self.view) + if session: + self.initialized = True + self.enabled = session.has_capability('colorProvider') + if self.enabled: + self.send_color_request() + elif not is_retry: + # session may be starting, try again once in a second. + sublime.set_timeout_async(lambda: self.initialize(is_retry=True), 1000) + else: + self.initialized = True # we retried but still no session available. def on_modified_async(self): - self.schedule_request() + if self.enabled: + self.schedule_request() def schedule_request(self): - current_point = self.view.sel()[0].begin() + sel = self.view.sel() + if len(sel) < 1: + return + + current_point = sel[0].begin() if self._stored_point != current_point: self._stored_point = current_point sublime.set_timeout_async(lambda: self.fire_request(current_point), 800) def fire_request(self, current_point: int) -> None: if current_point == self._stored_point: - send_color_request(self.view, self.handle_response) + self.send_color_request() + + def send_color_request(self): + if is_transient_view(self.view): + return + + session = session_for_view(self.view) + if not session: + return + + params = { + "textDocument": { + "uri": filename_to_uri(self.view.file_name()) + } + } + session.client.send_request( + Request.documentColor(params), + self.handle_response + ) def handle_response(self, response) -> None: phantoms = [] @@ -68,7 +100,7 @@ content = """ <div style='padding: 0.4em; - margin-top: 0.1em; + margin-top: 0.2em; border: 1px solid color(var(--foreground) alpha(0.25)); background-color: rgba({}, {}, {}, {})'> </div>""".format(red, green, blue, alpha)
{"golden_diff": "diff --git a/plugin/color.py b/plugin/color.py\n--- a/plugin/color.py\n+++ b/plugin/color.py\n@@ -9,26 +9,12 @@\n \n from .core.protocol import Request\n from .core.url import filename_to_uri\n-from .core.registry import session_for_view\n-from .core.settings import settings\n+from .core.registry import session_for_view, config_for_scope\n+from .core.settings import settings, client_configs\n from .core.views import range_to_region\n from .core.protocol import Range\n-\n-\n-def send_color_request(view, on_response_recieved: 'Callable'):\n- session = session_for_view(view)\n- if not session or not session.has_capability('colorProvider'):\n- # the server doesn't support colors, just return\n- return\n-\n- params = {\n- \"textDocument\": {\n- \"uri\": filename_to_uri(view.file_name())\n- }\n- }\n- session.client.send_request(\n- Request.documentColor(params),\n- lambda response: on_response_recieved(response))\n+from .core.configurations import is_supported_syntax\n+from .core.documents import is_transient_view\n \n \n class LspColorListener(sublime_plugin.ViewEventListener):\n@@ -36,26 +22,72 @@\n super().__init__(view)\n self.color_phantom_set = None # type: Optional[sublime.PhantomSet]\n self._stored_point = -1\n+ self.initialized = False\n+ self.enabled = False\n \n @classmethod\n def is_applicable(cls, _settings):\n- return 'colorProvider' not in settings.disabled_capabilities\n+ syntax = _settings.get('syntax')\n+ is_supported = syntax and is_supported_syntax(syntax, client_configs.all)\n+ disabled_by_user = 'colorProvider' in settings.disabled_capabilities\n+ return is_supported and not disabled_by_user\n \n def on_activated_async(self):\n- self.schedule_request()\n+ if not self.initialized:\n+ self.initialize()\n+\n+ def initialize(self, is_retry=False):\n+ config = config_for_scope(self.view)\n+ if not config:\n+ self.initialized = True # no server enabled, re-open file to activate feature.\n+\n+ session = session_for_view(self.view)\n+ if session:\n+ self.initialized = True\n+ self.enabled = session.has_capability('colorProvider')\n+ if self.enabled:\n+ self.send_color_request()\n+ elif not is_retry:\n+ # session may be starting, try again once in a second.\n+ sublime.set_timeout_async(lambda: self.initialize(is_retry=True), 1000)\n+ else:\n+ self.initialized = True # we retried but still no session available.\n \n def on_modified_async(self):\n- self.schedule_request()\n+ if self.enabled:\n+ self.schedule_request()\n \n def schedule_request(self):\n- current_point = self.view.sel()[0].begin()\n+ sel = self.view.sel()\n+ if len(sel) < 1:\n+ return\n+\n+ current_point = sel[0].begin()\n if self._stored_point != current_point:\n self._stored_point = current_point\n sublime.set_timeout_async(lambda: self.fire_request(current_point), 800)\n \n def fire_request(self, current_point: int) -> None:\n if current_point == self._stored_point:\n- send_color_request(self.view, self.handle_response)\n+ self.send_color_request()\n+\n+ def send_color_request(self):\n+ if is_transient_view(self.view):\n+ return\n+\n+ session = session_for_view(self.view)\n+ if not session:\n+ return\n+\n+ params = {\n+ \"textDocument\": {\n+ \"uri\": filename_to_uri(self.view.file_name())\n+ }\n+ }\n+ session.client.send_request(\n+ Request.documentColor(params),\n+ self.handle_response\n+ )\n \n def handle_response(self, response) -> None:\n phantoms = []\n@@ -68,7 +100,7 @@\n \n content = \"\"\"\n <div style='padding: 0.4em;\n- margin-top: 0.1em;\n+ margin-top: 0.2em;\n border: 1px solid color(var(--foreground) alpha(0.25));\n background-color: rgba({}, {}, {}, {})'>\n </div>\"\"\".format(red, green, blue, alpha)\n", "issue": "Reduce impact of LSP color listener by default.\n@predragnikolic: Noticed while closing views after find-replace in pyls:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Applications/Sublime Text.app/Contents/MacOS/sublime_plugin.py\", line 506, in run_async_view_listener_callback\r\n vel.__class__.__dict__[name](vel)\r\n File \"/Users/tomv/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/color.py\", line 47, in on_activated_async\r\n self.schedule_request()\r\n File \"/Users/tomv/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/color.py\", line 53, in schedule_request\r\n current_point = self.view.sel()[0].begin()\r\n File \"/Applications/Sublime Text.app/Contents/MacOS/sublime.py\", line 649, in __getitem__\r\n raise IndexError()\r\nIndexError\r\n```\r\n\r\n* We should probably fix the above issue (maybe it's rare but I ran into this edge case pretty quickly)\r\n* We could avoid creating ViewEventListeners for a few kinds of irrelevant views: Read-only, transient, unsupported syntaxes ( Maybe `DocumentSyncListener` `is_applicable` is suitable?)\r\n* We could avoid running `LspColorListener`'s `schedule_request` until a session with colorProvider for a given view has been found.\r\n\n", "before_files": [{"content": "import sublime_plugin\nimport sublime\n\ntry:\n from typing import Any, List, Dict, Callable, Optional\n assert Any and List and Dict and Callable and Optional\nexcept ImportError:\n pass\n\nfrom .core.protocol import Request\nfrom .core.url import filename_to_uri\nfrom .core.registry import session_for_view\nfrom .core.settings import settings\nfrom .core.views import range_to_region\nfrom .core.protocol import Range\n\n\ndef send_color_request(view, on_response_recieved: 'Callable'):\n session = session_for_view(view)\n if not session or not session.has_capability('colorProvider'):\n # the server doesn't support colors, just return\n return\n\n params = {\n \"textDocument\": {\n \"uri\": filename_to_uri(view.file_name())\n }\n }\n session.client.send_request(\n Request.documentColor(params),\n lambda response: on_response_recieved(response))\n\n\nclass LspColorListener(sublime_plugin.ViewEventListener):\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.color_phantom_set = None # type: Optional[sublime.PhantomSet]\n self._stored_point = -1\n\n @classmethod\n def is_applicable(cls, _settings):\n return 'colorProvider' not in settings.disabled_capabilities\n\n def on_activated_async(self):\n self.schedule_request()\n\n def on_modified_async(self):\n self.schedule_request()\n\n def schedule_request(self):\n current_point = self.view.sel()[0].begin()\n if self._stored_point != current_point:\n self._stored_point = current_point\n sublime.set_timeout_async(lambda: self.fire_request(current_point), 800)\n\n def fire_request(self, current_point: int) -> None:\n if current_point == self._stored_point:\n send_color_request(self.view, self.handle_response)\n\n def handle_response(self, response) -> None:\n phantoms = []\n for val in response:\n color = val['color']\n red = color['red'] * 255\n green = color['green'] * 255\n blue = color['blue'] * 255\n alpha = color['alpha']\n\n content = \"\"\"\n <div style='padding: 0.4em;\n margin-top: 0.1em;\n border: 1px solid color(var(--foreground) alpha(0.25));\n background-color: rgba({}, {}, {}, {})'>\n </div>\"\"\".format(red, green, blue, alpha)\n\n range = Range.from_lsp(val['range'])\n region = range_to_region(range, self.view)\n\n phantoms.append(sublime.Phantom(region, content, sublime.LAYOUT_INLINE))\n\n if phantoms:\n if not self.color_phantom_set:\n self.color_phantom_set = sublime.PhantomSet(self.view, \"lsp_color\")\n self.color_phantom_set.update(phantoms)\n else:\n self.color_phantom_set = None\n\n\ndef remove_color_boxes(view):\n view.erase_phantoms('lsp_color')\n", "path": "plugin/color.py"}]}
1,668
960
gh_patches_debug_21965
rasdani/github-patches
git_diff
apache__tvm-10188
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Bug] RPC Server Can't Serve Through Proxy Because of Missing Command Line Option Now we have RPC server, proxy, and tracker, so if the host machine can't access the RPC server directly, then we can put RPC proxy between host machine and RPC server like "Host --> RPC Proxy --> RPC Server", we can do it from Python API, but we can't do it through command line. Because "tvm.exec.rpc_server" haven't exposed the parameter "is_proxy" of class "tvm.rpc.server.Server" through command line option. ### Expected behavior Can register the RPC server to RPC proxy from command line like something below. ```shell python -m tvm.exec.rpc_server --host 0.0.0.0 --port 9090 --through-proxy ``` ### Actual behavior Currently haven't any command line option to set the parameter "is_proxy". ### Environment No ### Steps to reproduce Any </issue> <code> [start of python/tvm/exec/rpc_server.py] 1 # Licensed to the Apache Software Foundation (ASF) under one 2 # or more contributor license agreements. See the NOTICE file 3 # distributed with this work for additional information 4 # regarding copyright ownership. The ASF licenses this file 5 # to you under the Apache License, Version 2.0 (the 6 # "License"); you may not use this file except in compliance 7 # with the License. You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, 12 # software distributed under the License is distributed on an 13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 # KIND, either express or implied. See the License for the 15 # specific language governing permissions and limitations 16 # under the License. 17 # pylint: disable=redefined-outer-name, invalid-name 18 """Start an RPC server""" 19 import argparse 20 import logging 21 from .. import rpc 22 23 24 def main(args): 25 """Main function 26 27 Parameters 28 ---------- 29 args : argparse.Namespace 30 parsed args from command-line invocation 31 """ 32 if args.tracker: 33 url, port = args.tracker.rsplit(":", 1) 34 port = int(port) 35 tracker_addr = (url, port) 36 if not args.key: 37 raise RuntimeError("Need key to present type of resource when tracker is available") 38 else: 39 tracker_addr = None 40 41 server = rpc.Server( 42 args.host, 43 args.port, 44 args.port_end, 45 key=args.key, 46 tracker_addr=tracker_addr, 47 load_library=args.load_library, 48 custom_addr=args.custom_addr, 49 silent=args.silent, 50 no_fork=not args.fork, 51 ) 52 server.proc.join() 53 54 55 if __name__ == "__main__": 56 parser = argparse.ArgumentParser() 57 parser.add_argument( 58 "--host", type=str, default="0.0.0.0", help="The host IP address the tracker binds to" 59 ) 60 parser.add_argument("--port", type=int, default=9090, help="The port of the RPC") 61 parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC") 62 parser.add_argument( 63 "--tracker", 64 type=str, 65 help=("The address of RPC tracker in host:port format. " "e.g. (10.77.1.234:9190)"), 66 ) 67 parser.add_argument( 68 "--key", type=str, default="", help="The key used to identify the device type in tracker." 69 ) 70 parser.add_argument("--silent", action="store_true", help="Whether run in silent mode.") 71 parser.add_argument("--load-library", type=str, help="Additional library to load") 72 parser.add_argument( 73 "--no-fork", 74 dest="fork", 75 action="store_false", 76 help="Use spawn mode to avoid fork. This option \ 77 is able to avoid potential fork problems with Metal, OpenCL \ 78 and ROCM compilers.", 79 ) 80 parser.add_argument( 81 "--custom-addr", type=str, help="Custom IP Address to Report to RPC Tracker" 82 ) 83 84 parser.set_defaults(fork=True) 85 args = parser.parse_args() 86 logging.basicConfig(level=logging.INFO) 87 if not args.fork is False and not args.silent: 88 logging.info( 89 "If you are running ROCM/Metal, fork will cause " 90 "compiler internal error. Try to launch with arg ```--no-fork```" 91 ) 92 main(args) 93 [end of python/tvm/exec/rpc_server.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/tvm/exec/rpc_server.py b/python/tvm/exec/rpc_server.py --- a/python/tvm/exec/rpc_server.py +++ b/python/tvm/exec/rpc_server.py @@ -42,6 +42,7 @@ args.host, args.port, args.port_end, + is_proxy=args.through_proxy, key=args.key, tracker_addr=tracker_addr, load_library=args.load_library, @@ -58,6 +59,15 @@ "--host", type=str, default="0.0.0.0", help="The host IP address the tracker binds to" ) parser.add_argument("--port", type=int, default=9090, help="The port of the RPC") + parser.add_argument( + "--through-proxy", + dest="through_proxy", + action="store_true", + help=( + "Whether this server provide service through a proxy. If this is true, the host and" + "port actually is the address of the proxy." + ), + ) parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC") parser.add_argument( "--tracker",
{"golden_diff": "diff --git a/python/tvm/exec/rpc_server.py b/python/tvm/exec/rpc_server.py\n--- a/python/tvm/exec/rpc_server.py\n+++ b/python/tvm/exec/rpc_server.py\n@@ -42,6 +42,7 @@\n args.host,\n args.port,\n args.port_end,\n+ is_proxy=args.through_proxy,\n key=args.key,\n tracker_addr=tracker_addr,\n load_library=args.load_library,\n@@ -58,6 +59,15 @@\n \"--host\", type=str, default=\"0.0.0.0\", help=\"The host IP address the tracker binds to\"\n )\n parser.add_argument(\"--port\", type=int, default=9090, help=\"The port of the RPC\")\n+ parser.add_argument(\n+ \"--through-proxy\",\n+ dest=\"through_proxy\",\n+ action=\"store_true\",\n+ help=(\n+ \"Whether this server provide service through a proxy. If this is true, the host and\"\n+ \"port actually is the address of the proxy.\"\n+ ),\n+ )\n parser.add_argument(\"--port-end\", type=int, default=9199, help=\"The end search port of the RPC\")\n parser.add_argument(\n \"--tracker\",\n", "issue": "[Bug] RPC Server Can't Serve Through Proxy Because of Missing Command Line Option\nNow we have RPC server, proxy, and tracker, so if the host machine can't access the RPC server directly, then we can put RPC proxy between host machine and RPC server like \"Host --> RPC Proxy --> RPC Server\", we can do it from Python API, but we can't do it through command line. Because \"tvm.exec.rpc_server\" haven't exposed the parameter \"is_proxy\" of class \"tvm.rpc.server.Server\" through command line option.\r\n\r\n### Expected behavior\r\n\r\nCan register the RPC server to RPC proxy from command line like something below.\r\n```shell\r\npython -m tvm.exec.rpc_server --host 0.0.0.0 --port 9090 --through-proxy\r\n```\r\n\r\n### Actual behavior\r\n\r\nCurrently haven't any command line option to set the parameter \"is_proxy\".\r\n\r\n### Environment\r\n\r\nNo\r\n\r\n### Steps to reproduce\r\n\r\nAny\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=redefined-outer-name, invalid-name\n\"\"\"Start an RPC server\"\"\"\nimport argparse\nimport logging\nfrom .. import rpc\n\n\ndef main(args):\n \"\"\"Main function\n\n Parameters\n ----------\n args : argparse.Namespace\n parsed args from command-line invocation\n \"\"\"\n if args.tracker:\n url, port = args.tracker.rsplit(\":\", 1)\n port = int(port)\n tracker_addr = (url, port)\n if not args.key:\n raise RuntimeError(\"Need key to present type of resource when tracker is available\")\n else:\n tracker_addr = None\n\n server = rpc.Server(\n args.host,\n args.port,\n args.port_end,\n key=args.key,\n tracker_addr=tracker_addr,\n load_library=args.load_library,\n custom_addr=args.custom_addr,\n silent=args.silent,\n no_fork=not args.fork,\n )\n server.proc.join()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--host\", type=str, default=\"0.0.0.0\", help=\"The host IP address the tracker binds to\"\n )\n parser.add_argument(\"--port\", type=int, default=9090, help=\"The port of the RPC\")\n parser.add_argument(\"--port-end\", type=int, default=9199, help=\"The end search port of the RPC\")\n parser.add_argument(\n \"--tracker\",\n type=str,\n help=(\"The address of RPC tracker in host:port format. \" \"e.g. (10.77.1.234:9190)\"),\n )\n parser.add_argument(\n \"--key\", type=str, default=\"\", help=\"The key used to identify the device type in tracker.\"\n )\n parser.add_argument(\"--silent\", action=\"store_true\", help=\"Whether run in silent mode.\")\n parser.add_argument(\"--load-library\", type=str, help=\"Additional library to load\")\n parser.add_argument(\n \"--no-fork\",\n dest=\"fork\",\n action=\"store_false\",\n help=\"Use spawn mode to avoid fork. This option \\\n is able to avoid potential fork problems with Metal, OpenCL \\\n and ROCM compilers.\",\n )\n parser.add_argument(\n \"--custom-addr\", type=str, help=\"Custom IP Address to Report to RPC Tracker\"\n )\n\n parser.set_defaults(fork=True)\n args = parser.parse_args()\n logging.basicConfig(level=logging.INFO)\n if not args.fork is False and not args.silent:\n logging.info(\n \"If you are running ROCM/Metal, fork will cause \"\n \"compiler internal error. Try to launch with arg ```--no-fork```\"\n )\n main(args)\n", "path": "python/tvm/exec/rpc_server.py"}]}
1,677
272
gh_patches_debug_5099
rasdani/github-patches
git_diff
translate__pootle-6747
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add current character count when entering translations We're trying to use Pootle to translate metadata strings for an app in the iOS AppStore. The metadata includes individual messages for the app name, subtitle, description, privacy URL and so on and there are different limits on the number of characters allowed in each of them. For instance, an app's name can be no more than 30 characters. When entering translations, it would be really helpful to see the current number of characters that the translation uses as you type to ensure that you're not exceeding the limit. This could maybe fit on the lower right corner of the input view. You currently have timeline / comment / raw on the left. Current character count could just be a small label that floats to the right on the same line. # Environment (i.e. 'pootle --version', DB, OS, Browser): Pootle 2.8.0 </issue> <code> [start of pootle/core/templatetags/core.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright (C) Pootle contributors. 5 # 6 # This file is a part of the Pootle project. It is distributed under the GPL3 7 # or later license. See the LICENSE file for a copy of the license and the 8 # AUTHORS file for copyright and authorship information. 9 10 from django import template 11 from django.utils.html import escapejs 12 from django.utils.safestring import mark_safe 13 14 from ..utils.json import jsonify 15 16 17 register = template.Library() 18 19 20 @register.filter 21 def to_js(value): 22 """Returns a string which leaves the value readily available for JS 23 consumption. 24 """ 25 return mark_safe('JSON.parse("%s")' % escapejs(jsonify(value))) 26 27 28 @register.inclusion_tag('includes/formtable.html') 29 def formtable(formtable): 30 return dict(formtable=formtable) 31 [end of pootle/core/templatetags/core.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pootle/core/templatetags/core.py b/pootle/core/templatetags/core.py --- a/pootle/core/templatetags/core.py +++ b/pootle/core/templatetags/core.py @@ -25,6 +25,13 @@ return mark_safe('JSON.parse("%s")' % escapejs(jsonify(value))) [email protected] +def map_to_lengths(value): + """Maps a list value by replacing each element with its length. + """ + return [len(e) for e in value] + + @register.inclusion_tag('includes/formtable.html') def formtable(formtable): return dict(formtable=formtable)
{"golden_diff": "diff --git a/pootle/core/templatetags/core.py b/pootle/core/templatetags/core.py\n--- a/pootle/core/templatetags/core.py\n+++ b/pootle/core/templatetags/core.py\n@@ -25,6 +25,13 @@\n return mark_safe('JSON.parse(\"%s\")' % escapejs(jsonify(value)))\n \n \[email protected]\n+def map_to_lengths(value):\n+ \"\"\"Maps a list value by replacing each element with its length.\n+ \"\"\"\n+ return [len(e) for e in value]\n+\n+\n @register.inclusion_tag('includes/formtable.html')\n def formtable(formtable):\n return dict(formtable=formtable)\n", "issue": "Add current character count when entering translations\nWe're trying to use Pootle to translate metadata strings for an app in the iOS AppStore. The metadata includes individual messages for the app name, subtitle, description, privacy URL and so on and there are different limits on the number of characters allowed in each of them. For instance, an app's name can be no more than 30 characters. \r\n\r\nWhen entering translations, it would be really helpful to see the current number of characters that the translation uses as you type to ensure that you're not exceeding the limit. This could maybe fit on the lower right corner of the input view. You currently have timeline / comment / raw on the left. Current character count could just be a small label that floats to the right on the same line.\r\n\r\n# Environment (i.e. 'pootle --version', DB, OS, Browser):\r\n\r\nPootle 2.8.0\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django import template\nfrom django.utils.html import escapejs\nfrom django.utils.safestring import mark_safe\n\nfrom ..utils.json import jsonify\n\n\nregister = template.Library()\n\n\[email protected]\ndef to_js(value):\n \"\"\"Returns a string which leaves the value readily available for JS\n consumption.\n \"\"\"\n return mark_safe('JSON.parse(\"%s\")' % escapejs(jsonify(value)))\n\n\[email protected]_tag('includes/formtable.html')\ndef formtable(formtable):\n return dict(formtable=formtable)\n", "path": "pootle/core/templatetags/core.py"}]}
980
158
gh_patches_debug_27424
rasdani/github-patches
git_diff
learningequality__kolibri-2117
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> can no longer sign in using a pre-existing superuser account after upgrade It appears that my superuser account is no longer available since upgrading to the latest develop. I would have expected it to get migrated to an admin account with superuser flags enabled. It actually looks like the user might still be there: ![image](https://user-images.githubusercontent.com/2367265/29852726-81f2b1b4-8cf0-11e7-9b0a-adb9fb0cb264.png) </issue> <code> [start of kolibri/auth/migrations/0004_auto_20170816_1607.py] 1 # -*- coding: utf-8 -*- 2 # Generated by Django 1.9.7 on 2017-08-16 23:07 3 from __future__ import unicode_literals 4 5 import django.core.validators 6 from django.db import migrations, models 7 from kolibri.auth.constants.role_kinds import ADMIN 8 9 10 def device_owner_to_super_user(apps, schema_editor): 11 DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner') 12 FacilityUser = apps.get_model('kolibriauth', 'FacilityUser') 13 Facility = apps.get_model('kolibriauth', 'Facility') 14 default_facility = Facility.objects.all().first() 15 DevicePermissions = apps.get_model('device', 'DevicePermissions') 16 DeviceSettings = apps.get_model('device', 'DeviceSettings') 17 Role = apps.get_model('kolibriauth', 'Role') 18 from kolibri.auth.models import FacilityUser as RealFacilityUser, Facility as RealFacility, Role as RealRole 19 real_default_facility = RealFacility.get_default_facility() 20 # Can't do much if no facilities exist, as no facility to FK the users onto 21 if default_facility: 22 for device_owner in DeviceOwner.objects.all(): 23 dataset_id = real_default_facility.dataset_id 24 real_superuser = RealFacilityUser( 25 username=device_owner.username, 26 facility=real_default_facility, 27 dataset_id=dataset_id 28 ) 29 uuid = real_superuser.calculate_uuid() 30 superuser = FacilityUser.objects.create( 31 username=device_owner.username, 32 password=device_owner.password, 33 facility=default_facility, 34 full_name=device_owner.full_name, 35 date_joined=device_owner.date_joined, 36 id=uuid, 37 dataset_id=dataset_id, 38 _morango_source_id=real_superuser._morango_source_id, 39 _morango_partition=real_superuser._morango_partition, 40 ) 41 real_role = RealRole( 42 user=real_superuser, 43 collection=real_default_facility, 44 kind=ADMIN, 45 dataset_id=dataset_id, 46 ) 47 role_uuid = real_role.calculate_uuid() 48 role = Role.objects.create( 49 user=superuser, 50 collection=default_facility, 51 kind=ADMIN, 52 id=role_uuid, 53 dataset_id=dataset_id, 54 _morango_source_id=real_role._morango_source_id, 55 _morango_partition=real_role._morango_partition, 56 ) 57 DevicePermissions.objects.create(user=superuser, is_superuser=True) 58 # Finally, set the is_provisioned flag 59 settings, created = DeviceSettings.objects.get_or_create(is_provisioned=True) 60 61 62 class Migration(migrations.Migration): 63 64 dependencies = [ 65 ('kolibriauth', '0003_auto_20170621_0958'), 66 ('device', '0001_initial') 67 ] 68 69 operations = [ 70 migrations.RunPython(device_owner_to_super_user, migrations.RunPython.noop), 71 migrations.DeleteModel( 72 name='DeviceOwner', 73 ), 74 ] 75 [end of kolibri/auth/migrations/0004_auto_20170816_1607.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kolibri/auth/migrations/0004_auto_20170816_1607.py b/kolibri/auth/migrations/0004_auto_20170816_1607.py --- a/kolibri/auth/migrations/0004_auto_20170816_1607.py +++ b/kolibri/auth/migrations/0004_auto_20170816_1607.py @@ -8,17 +8,17 @@ def device_owner_to_super_user(apps, schema_editor): - DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner') - FacilityUser = apps.get_model('kolibriauth', 'FacilityUser') - Facility = apps.get_model('kolibriauth', 'Facility') - default_facility = Facility.objects.all().first() - DevicePermissions = apps.get_model('device', 'DevicePermissions') - DeviceSettings = apps.get_model('device', 'DeviceSettings') - Role = apps.get_model('kolibriauth', 'Role') from kolibri.auth.models import FacilityUser as RealFacilityUser, Facility as RealFacility, Role as RealRole real_default_facility = RealFacility.get_default_facility() # Can't do much if no facilities exist, as no facility to FK the users onto - if default_facility: + if real_default_facility: + DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner') + FacilityUser = apps.get_model('kolibriauth', 'FacilityUser') + Facility = apps.get_model('kolibriauth', 'Facility') + default_facility = Facility.objects.get(pk=real_default_facility.id) + DevicePermissions = apps.get_model('device', 'DevicePermissions') + DeviceSettings = apps.get_model('device', 'DeviceSettings') + Role = apps.get_model('kolibriauth', 'Role') for device_owner in DeviceOwner.objects.all(): dataset_id = real_default_facility.dataset_id real_superuser = RealFacilityUser(
{"golden_diff": "diff --git a/kolibri/auth/migrations/0004_auto_20170816_1607.py b/kolibri/auth/migrations/0004_auto_20170816_1607.py\n--- a/kolibri/auth/migrations/0004_auto_20170816_1607.py\n+++ b/kolibri/auth/migrations/0004_auto_20170816_1607.py\n@@ -8,17 +8,17 @@\n \n \n def device_owner_to_super_user(apps, schema_editor):\n- DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')\n- FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')\n- Facility = apps.get_model('kolibriauth', 'Facility')\n- default_facility = Facility.objects.all().first()\n- DevicePermissions = apps.get_model('device', 'DevicePermissions')\n- DeviceSettings = apps.get_model('device', 'DeviceSettings')\n- Role = apps.get_model('kolibriauth', 'Role')\n from kolibri.auth.models import FacilityUser as RealFacilityUser, Facility as RealFacility, Role as RealRole\n real_default_facility = RealFacility.get_default_facility()\n # Can't do much if no facilities exist, as no facility to FK the users onto\n- if default_facility:\n+ if real_default_facility:\n+ DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')\n+ FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')\n+ Facility = apps.get_model('kolibriauth', 'Facility')\n+ default_facility = Facility.objects.get(pk=real_default_facility.id)\n+ DevicePermissions = apps.get_model('device', 'DevicePermissions')\n+ DeviceSettings = apps.get_model('device', 'DeviceSettings')\n+ Role = apps.get_model('kolibriauth', 'Role')\n for device_owner in DeviceOwner.objects.all():\n dataset_id = real_default_facility.dataset_id\n real_superuser = RealFacilityUser(\n", "issue": "can no longer sign in using a pre-existing superuser account after upgrade\nIt appears that my superuser account is no longer available since upgrading to the latest develop. \r\n\r\nI would have expected it to get migrated to an admin account with superuser flags enabled.\r\n\r\nIt actually looks like the user might still be there:\r\n\r\n![image](https://user-images.githubusercontent.com/2367265/29852726-81f2b1b4-8cf0-11e7-9b0a-adb9fb0cb264.png)\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.7 on 2017-08-16 23:07\nfrom __future__ import unicode_literals\n\nimport django.core.validators\nfrom django.db import migrations, models\nfrom kolibri.auth.constants.role_kinds import ADMIN\n\n\ndef device_owner_to_super_user(apps, schema_editor):\n DeviceOwner = apps.get_model('kolibriauth', 'DeviceOwner')\n FacilityUser = apps.get_model('kolibriauth', 'FacilityUser')\n Facility = apps.get_model('kolibriauth', 'Facility')\n default_facility = Facility.objects.all().first()\n DevicePermissions = apps.get_model('device', 'DevicePermissions')\n DeviceSettings = apps.get_model('device', 'DeviceSettings')\n Role = apps.get_model('kolibriauth', 'Role')\n from kolibri.auth.models import FacilityUser as RealFacilityUser, Facility as RealFacility, Role as RealRole\n real_default_facility = RealFacility.get_default_facility()\n # Can't do much if no facilities exist, as no facility to FK the users onto\n if default_facility:\n for device_owner in DeviceOwner.objects.all():\n dataset_id = real_default_facility.dataset_id\n real_superuser = RealFacilityUser(\n username=device_owner.username,\n facility=real_default_facility,\n dataset_id=dataset_id\n )\n uuid = real_superuser.calculate_uuid()\n superuser = FacilityUser.objects.create(\n username=device_owner.username,\n password=device_owner.password,\n facility=default_facility,\n full_name=device_owner.full_name,\n date_joined=device_owner.date_joined,\n id=uuid,\n dataset_id=dataset_id,\n _morango_source_id=real_superuser._morango_source_id,\n _morango_partition=real_superuser._morango_partition,\n )\n real_role = RealRole(\n user=real_superuser,\n collection=real_default_facility,\n kind=ADMIN,\n dataset_id=dataset_id,\n )\n role_uuid = real_role.calculate_uuid()\n role = Role.objects.create(\n user=superuser,\n collection=default_facility,\n kind=ADMIN,\n id=role_uuid,\n dataset_id=dataset_id,\n _morango_source_id=real_role._morango_source_id,\n _morango_partition=real_role._morango_partition,\n )\n DevicePermissions.objects.create(user=superuser, is_superuser=True)\n # Finally, set the is_provisioned flag\n settings, created = DeviceSettings.objects.get_or_create(is_provisioned=True)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('kolibriauth', '0003_auto_20170621_0958'),\n ('device', '0001_initial')\n ]\n\n operations = [\n migrations.RunPython(device_owner_to_super_user, migrations.RunPython.noop),\n migrations.DeleteModel(\n name='DeviceOwner',\n ),\n ]\n", "path": "kolibri/auth/migrations/0004_auto_20170816_1607.py"}]}
1,487
472
gh_patches_debug_18591
rasdani/github-patches
git_diff
StackStorm__st2-4007
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Pinned eventlet version has outstanding bugs pinned version 0.17 of eventlet has outstanding bugs on it's monkey patching of the ssl module. e.g. https://github.com/eventlet/eventlet/issues/371 ``` # Note: 0.20.0 removed select.poll() on which some of our code and libraries we # depend on rely ``` @Kami committed this reversion in https://github.com/StackStorm/st2/commit/1ec43d294e6770e56ec8f9990c805cb9dffe98c5 What was the specific issue? </issue> <code> [start of st2common/st2common/util/monkey_patch.py] 1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more 2 # contributor license agreements. See the NOTICE file distributed with 3 # this work for additional information regarding copyright ownership. 4 # The ASF licenses this file to You under the Apache License, Version 2.0 5 # (the "License"); you may not use this file except in compliance with 6 # the License. You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # 15 16 """ 17 Module for performing eventlet and other monkey patching. 18 """ 19 20 from __future__ import absolute_import 21 22 import sys 23 24 __all__ = [ 25 'monkey_patch', 26 'is_use_debugger_flag_provided' 27 ] 28 29 USE_DEBUGGER_FLAG = '--use-debugger' 30 PARENT_ARGS_FLAG = '--parent-args=' 31 32 33 def monkey_patch(): 34 """ 35 Function which performs eventlet monkey patching and also takes into account "--use-debugger" 36 argument in the command line arguments. 37 38 If this argument is found, no monkey patching is performed for the thread module. This allows 39 user to use remote debuggers. 40 """ 41 import eventlet 42 43 patch_thread = not is_use_debugger_flag_provided() 44 eventlet.monkey_patch(os=True, select=True, socket=True, thread=patch_thread, time=True) 45 46 47 def is_use_debugger_flag_provided(): 48 # 1. Check sys.argv directly 49 if USE_DEBUGGER_FLAG in sys.argv: 50 return True 51 52 # 2. Check "parent-args" arguments. This is used for spawned processes such as sensors and 53 # Python runner actions 54 55 for arg in sys.argv: 56 if arg.startswith(PARENT_ARGS_FLAG) and USE_DEBUGGER_FLAG in arg: 57 return True 58 59 return False 60 [end of st2common/st2common/util/monkey_patch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/st2common/st2common/util/monkey_patch.py b/st2common/st2common/util/monkey_patch.py --- a/st2common/st2common/util/monkey_patch.py +++ b/st2common/st2common/util/monkey_patch.py @@ -23,6 +23,7 @@ __all__ = [ 'monkey_patch', + 'use_select_poll_workaround', 'is_use_debugger_flag_provided' ] @@ -44,6 +45,21 @@ eventlet.monkey_patch(os=True, select=True, socket=True, thread=patch_thread, time=True) +def use_select_poll_workaround(): + """ + Work around for some tests which injects original select module with select.poll() + available to sys.modules. + """ + import sys + import subprocess + import eventlet + + # Work around to get tests to pass with eventlet >= 0.20.0 + if 'nose' in sys.modules.keys(): + sys.modules['select'] = eventlet.patcher.original('select') + subprocess.select = eventlet.patcher.original('select') + + def is_use_debugger_flag_provided(): # 1. Check sys.argv directly if USE_DEBUGGER_FLAG in sys.argv:
{"golden_diff": "diff --git a/st2common/st2common/util/monkey_patch.py b/st2common/st2common/util/monkey_patch.py\n--- a/st2common/st2common/util/monkey_patch.py\n+++ b/st2common/st2common/util/monkey_patch.py\n@@ -23,6 +23,7 @@\n \n __all__ = [\n 'monkey_patch',\n+ 'use_select_poll_workaround',\n 'is_use_debugger_flag_provided'\n ]\n \n@@ -44,6 +45,21 @@\n eventlet.monkey_patch(os=True, select=True, socket=True, thread=patch_thread, time=True)\n \n \n+def use_select_poll_workaround():\n+ \"\"\"\n+ Work around for some tests which injects original select module with select.poll()\n+ available to sys.modules.\n+ \"\"\"\n+ import sys\n+ import subprocess\n+ import eventlet\n+\n+ # Work around to get tests to pass with eventlet >= 0.20.0\n+ if 'nose' in sys.modules.keys():\n+ sys.modules['select'] = eventlet.patcher.original('select')\n+ subprocess.select = eventlet.patcher.original('select')\n+\n+\n def is_use_debugger_flag_provided():\n # 1. Check sys.argv directly\n if USE_DEBUGGER_FLAG in sys.argv:\n", "issue": "Pinned eventlet version has outstanding bugs\npinned version 0.17 of eventlet has outstanding bugs on it's monkey patching of the ssl module.\r\n\r\ne.g.\r\nhttps://github.com/eventlet/eventlet/issues/371\r\n\r\n```\r\n# Note: 0.20.0 removed select.poll() on which some of our code and libraries we\r\n# depend on rely\r\n```\r\n\r\n@Kami committed this reversion in https://github.com/StackStorm/st2/commit/1ec43d294e6770e56ec8f9990c805cb9dffe98c5\r\n\r\nWhat was the specific issue?\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n#\n\n\"\"\"\nModule for performing eventlet and other monkey patching.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport sys\n\n__all__ = [\n 'monkey_patch',\n 'is_use_debugger_flag_provided'\n]\n\nUSE_DEBUGGER_FLAG = '--use-debugger'\nPARENT_ARGS_FLAG = '--parent-args='\n\n\ndef monkey_patch():\n \"\"\"\n Function which performs eventlet monkey patching and also takes into account \"--use-debugger\"\n argument in the command line arguments.\n\n If this argument is found, no monkey patching is performed for the thread module. This allows\n user to use remote debuggers.\n \"\"\"\n import eventlet\n\n patch_thread = not is_use_debugger_flag_provided()\n eventlet.monkey_patch(os=True, select=True, socket=True, thread=patch_thread, time=True)\n\n\ndef is_use_debugger_flag_provided():\n # 1. Check sys.argv directly\n if USE_DEBUGGER_FLAG in sys.argv:\n return True\n\n # 2. Check \"parent-args\" arguments. This is used for spawned processes such as sensors and\n # Python runner actions\n\n for arg in sys.argv:\n if arg.startswith(PARENT_ARGS_FLAG) and USE_DEBUGGER_FLAG in arg:\n return True\n\n return False\n", "path": "st2common/st2common/util/monkey_patch.py"}]}
1,249
287
gh_patches_debug_9640
rasdani/github-patches
git_diff
chainer__chainer-7760
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Refactor utility link in optimizer_hooks unit tests `chainer_tests/optimizer_hooks_tests` uses similar dummy links, which can be refactored to reduce repetition. </issue> <code> [start of chainer/optimizer_hooks/gradient_hard_clipping.py] 1 import chainer 2 from chainer import backend 3 4 5 class GradientHardClipping(object): 6 7 """Optimizer/UpdateRule hook function for gradient clipping. 8 9 This hook function clips all gradient arrays to be within a lower and upper 10 bound. 11 12 Args: 13 lower_bound (float): The lower bound of the gradient value. 14 upper_bound (float): The upper bound of the gradient value. 15 16 Attributes: 17 ~optimizer_hooks.GradientHardClipping.lower_bound (float): The 18 lower bound of the gradient value. 19 ~optimizer_hooks.GradientHardClipping.upper_bound (float): The 20 upper bound of the gradient value. 21 ~optimizer_hooks.GradientHardClipping.timing (string): Specifies 22 when this hook should be called by the 23 Optimizer/UpdateRule. Valid values are 'pre' 24 (before any updates) and 'post' 25 (after any updates). 26 ~optimizer_hooks.GradientHardClipping.call_for_each_param (bool): \ 27 Specifies if this hook is called for each parameter 28 (``True``) or only once (``False``) by an optimizer to 29 which this hook is registered. This function does 30 not expect users to switch the value from default one, 31 which is `True`. 32 33 .. versionadded:: 4.0.0 34 The *timing* parameter. 35 36 """ 37 name = 'GradientHardClipping' 38 call_for_each_param = True 39 timing = 'pre' 40 41 def __init__(self, lower_bound, upper_bound): 42 self.lower_bound = lower_bound 43 self.upper_bound = upper_bound 44 45 def __call__(self, rule, param): 46 grad = param.grad 47 if grad is None: 48 return 49 with chainer.using_device(param.device): 50 xp = param.device.xp 51 # TODO(kshitij12345): Fix when chainerx.clip 52 # supports kwarg `out`. 53 if xp == backend.chainerx \ 54 or isinstance(param.grad, backend.intel64.mdarray): 55 grad[:] = grad.clip(self.lower_bound, self.upper_bound) 56 else: 57 # Save on new object allocation when using numpy and cupy 58 # using kwarg `out` 59 xp.clip(grad, self.lower_bound, self.upper_bound, out=grad) 60 [end of chainer/optimizer_hooks/gradient_hard_clipping.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/optimizer_hooks/gradient_hard_clipping.py b/chainer/optimizer_hooks/gradient_hard_clipping.py --- a/chainer/optimizer_hooks/gradient_hard_clipping.py +++ b/chainer/optimizer_hooks/gradient_hard_clipping.py @@ -52,7 +52,7 @@ # supports kwarg `out`. if xp == backend.chainerx \ or isinstance(param.grad, backend.intel64.mdarray): - grad[:] = grad.clip(self.lower_bound, self.upper_bound) + grad[...] = grad.clip(self.lower_bound, self.upper_bound) else: # Save on new object allocation when using numpy and cupy # using kwarg `out`
{"golden_diff": "diff --git a/chainer/optimizer_hooks/gradient_hard_clipping.py b/chainer/optimizer_hooks/gradient_hard_clipping.py\n--- a/chainer/optimizer_hooks/gradient_hard_clipping.py\n+++ b/chainer/optimizer_hooks/gradient_hard_clipping.py\n@@ -52,7 +52,7 @@\n # supports kwarg `out`.\n if xp == backend.chainerx \\\n or isinstance(param.grad, backend.intel64.mdarray):\n- grad[:] = grad.clip(self.lower_bound, self.upper_bound)\n+ grad[...] = grad.clip(self.lower_bound, self.upper_bound)\n else:\n # Save on new object allocation when using numpy and cupy\n # using kwarg `out`\n", "issue": "Refactor utility link in optimizer_hooks unit tests\n`chainer_tests/optimizer_hooks_tests` uses similar dummy links, which can be refactored to reduce repetition.\n", "before_files": [{"content": "import chainer\nfrom chainer import backend\n\n\nclass GradientHardClipping(object):\n\n \"\"\"Optimizer/UpdateRule hook function for gradient clipping.\n\n This hook function clips all gradient arrays to be within a lower and upper\n bound.\n\n Args:\n lower_bound (float): The lower bound of the gradient value.\n upper_bound (float): The upper bound of the gradient value.\n\n Attributes:\n ~optimizer_hooks.GradientHardClipping.lower_bound (float): The\n lower bound of the gradient value.\n ~optimizer_hooks.GradientHardClipping.upper_bound (float): The\n upper bound of the gradient value.\n ~optimizer_hooks.GradientHardClipping.timing (string): Specifies\n when this hook should be called by the\n Optimizer/UpdateRule. Valid values are 'pre'\n (before any updates) and 'post'\n (after any updates).\n ~optimizer_hooks.GradientHardClipping.call_for_each_param (bool): \\\n Specifies if this hook is called for each parameter\n (``True``) or only once (``False``) by an optimizer to\n which this hook is registered. This function does\n not expect users to switch the value from default one,\n which is `True`.\n\n .. versionadded:: 4.0.0\n The *timing* parameter.\n\n \"\"\"\n name = 'GradientHardClipping'\n call_for_each_param = True\n timing = 'pre'\n\n def __init__(self, lower_bound, upper_bound):\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n\n def __call__(self, rule, param):\n grad = param.grad\n if grad is None:\n return\n with chainer.using_device(param.device):\n xp = param.device.xp\n # TODO(kshitij12345): Fix when chainerx.clip\n # supports kwarg `out`.\n if xp == backend.chainerx \\\n or isinstance(param.grad, backend.intel64.mdarray):\n grad[:] = grad.clip(self.lower_bound, self.upper_bound)\n else:\n # Save on new object allocation when using numpy and cupy\n # using kwarg `out`\n xp.clip(grad, self.lower_bound, self.upper_bound, out=grad)\n", "path": "chainer/optimizer_hooks/gradient_hard_clipping.py"}]}
1,182
156
gh_patches_debug_22503
rasdani/github-patches
git_diff
TheAlgorithms__Python-10012
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve our test coverage ### Feature description Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase. ### How to find low-coverage files Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage: ``` ---------- coverage: platform linux, python 3.12.0-final-0 ----------- Name Stmts Miss Cover Missing ----------------------------------------------------------------------------------------------------------- quantum/q_fourier_transform.py 30 30 0% 14-93 scripts/validate_solutions.py 54 54 0% 2-94 strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129 ... ``` The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests. Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage. _**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue. ### How to add doctests A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring: ```py def add(a: int, b: int) -> int: """ Adds two non-negative numbers. >>> add(1, 1) 2 >>> add(2, 5) 7 >>> add(1, 0) 1 >>> add(-1, -1) Traceback (most recent last): ... ValueError: Numbers must be non-negative """ ``` For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc). Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it. _**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_ </issue> <code> [start of dynamic_programming/minimum_partition.py] 1 """ 2 Partition a set into two subsets such that the difference of subset sums is minimum 3 """ 4 5 6 def find_min(arr): 7 n = len(arr) 8 s = sum(arr) 9 10 dp = [[False for x in range(s + 1)] for y in range(n + 1)] 11 12 for i in range(1, n + 1): 13 dp[i][0] = True 14 15 for i in range(1, s + 1): 16 dp[0][i] = False 17 18 for i in range(1, n + 1): 19 for j in range(1, s + 1): 20 dp[i][j] = dp[i][j - 1] 21 22 if arr[i - 1] <= j: 23 dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]] 24 25 for j in range(int(s / 2), -1, -1): 26 if dp[n][j] is True: 27 diff = s - 2 * j 28 break 29 30 return diff 31 [end of dynamic_programming/minimum_partition.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py --- a/dynamic_programming/minimum_partition.py +++ b/dynamic_programming/minimum_partition.py @@ -3,13 +3,25 @@ """ -def find_min(arr): +def find_min(arr: list[int]) -> int: + """ + >>> find_min([1, 2, 3, 4, 5]) + 1 + >>> find_min([5, 5, 5, 5, 5]) + 5 + >>> find_min([5, 5, 5, 5]) + 0 + >>> find_min([3]) + 3 + >>> find_min([]) + 0 + """ n = len(arr) s = sum(arr) dp = [[False for x in range(s + 1)] for y in range(n + 1)] - for i in range(1, n + 1): + for i in range(n + 1): dp[i][0] = True for i in range(1, s + 1): @@ -17,7 +29,7 @@ for i in range(1, n + 1): for j in range(1, s + 1): - dp[i][j] = dp[i][j - 1] + dp[i][j] = dp[i - 1][j] if arr[i - 1] <= j: dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]] @@ -28,3 +40,9 @@ break return diff + + +if __name__ == "__main__": + from doctest import testmod + + testmod()
{"golden_diff": "diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py\n--- a/dynamic_programming/minimum_partition.py\n+++ b/dynamic_programming/minimum_partition.py\n@@ -3,13 +3,25 @@\n \"\"\"\n \n \n-def find_min(arr):\n+def find_min(arr: list[int]) -> int:\n+ \"\"\"\n+ >>> find_min([1, 2, 3, 4, 5])\n+ 1\n+ >>> find_min([5, 5, 5, 5, 5])\n+ 5\n+ >>> find_min([5, 5, 5, 5])\n+ 0\n+ >>> find_min([3])\n+ 3\n+ >>> find_min([])\n+ 0\n+ \"\"\"\n n = len(arr)\n s = sum(arr)\n \n dp = [[False for x in range(s + 1)] for y in range(n + 1)]\n \n- for i in range(1, n + 1):\n+ for i in range(n + 1):\n dp[i][0] = True\n \n for i in range(1, s + 1):\n@@ -17,7 +29,7 @@\n \n for i in range(1, n + 1):\n for j in range(1, s + 1):\n- dp[i][j] = dp[i][j - 1]\n+ dp[i][j] = dp[i - 1][j]\n \n if arr[i - 1] <= j:\n dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]]\n@@ -28,3 +40,9 @@\n break\n \n return diff\n+\n+\n+if __name__ == \"__main__\":\n+ from doctest import testmod\n+\n+ testmod()\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "\"\"\"\nPartition a set into two subsets such that the difference of subset sums is minimum\n\"\"\"\n\n\ndef find_min(arr):\n n = len(arr)\n s = sum(arr)\n\n dp = [[False for x in range(s + 1)] for y in range(n + 1)]\n\n for i in range(1, n + 1):\n dp[i][0] = True\n\n for i in range(1, s + 1):\n dp[0][i] = False\n\n for i in range(1, n + 1):\n for j in range(1, s + 1):\n dp[i][j] = dp[i][j - 1]\n\n if arr[i - 1] <= j:\n dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]]\n\n for j in range(int(s / 2), -1, -1):\n if dp[n][j] is True:\n diff = s - 2 * j\n break\n\n return diff\n", "path": "dynamic_programming/minimum_partition.py"}]}
1,669
417
gh_patches_debug_17712
rasdani/github-patches
git_diff
pypa__virtualenv-1509
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Integration with virtualenvwrapper Congratulations on the rewrite. I notice that `virtualenvwrapper` doesn't integrate with the new beta: ``` $ mkvirtualenv foo ERROR: Environment '/Users/brettmz/.virtualenvs/foo' does not contain an activate script. ``` Sure enough - I believe it is looking for a script `activate` - but the script file now depends on the shell - `activate.sh` etc. It would be good if this could work somehow - would it be reasonable to create / link a default `activate` script for this case? Or should virtualenvwrapper be updated? </issue> <code> [start of src/virtualenv/activation/bash/__init__.py] 1 from __future__ import absolute_import, unicode_literals 2 3 from virtualenv.util.path import Path 4 5 from ..via_template import ViaTemplateActivator 6 7 8 class BashActivator(ViaTemplateActivator): 9 @classmethod 10 def supports(cls, interpreter): 11 return interpreter.os != "nt" 12 13 def templates(self): 14 yield Path("activate.sh") 15 [end of src/virtualenv/activation/bash/__init__.py] [start of src/virtualenv/activation/via_template.py] 1 from __future__ import absolute_import, unicode_literals 2 3 import os 4 import sys 5 from abc import ABCMeta, abstractmethod 6 7 import six 8 9 from .activator import Activator 10 11 if sys.version_info >= (3, 7): 12 from importlib.resources import read_text 13 else: 14 from importlib_resources import read_text 15 16 17 @six.add_metaclass(ABCMeta) 18 class ViaTemplateActivator(Activator): 19 @abstractmethod 20 def templates(self): 21 raise NotImplementedError 22 23 def generate(self, creator): 24 dest_folder = creator.bin_dir 25 replacements = self.replacements(creator, dest_folder) 26 self._generate(replacements, self.templates(), dest_folder, creator) 27 if self.flag_prompt is not None: 28 creator.pyenv_cfg["prompt"] = self.flag_prompt 29 30 def replacements(self, creator, dest_folder): 31 return { 32 "__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt, 33 "__VIRTUAL_ENV__": six.ensure_text(str(creator.dest)), 34 "__VIRTUAL_NAME__": creator.env_name, 35 "__BIN_NAME__": six.ensure_text(str(creator.bin_dir.relative_to(creator.dest))), 36 "__PATH_SEP__": six.ensure_text(os.pathsep), 37 } 38 39 def _generate(self, replacements, templates, to_folder, creator): 40 for template in templates: 41 text = self.instantiate_template(replacements, template, creator) 42 (to_folder / template).write_text(text, encoding="utf-8") 43 44 def instantiate_template(self, replacements, template, creator): 45 # read text and do replacements 46 text = read_text(self.__module__, str(template), encoding="utf-8", errors="strict") 47 for key, value in replacements.items(): 48 value = self._repr_unicode(creator, value) 49 text = text.replace(key, value) 50 return text 51 52 @staticmethod 53 def _repr_unicode(creator, value): 54 # by default we just let it be unicode 55 return value 56 [end of src/virtualenv/activation/via_template.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/virtualenv/activation/bash/__init__.py b/src/virtualenv/activation/bash/__init__.py --- a/src/virtualenv/activation/bash/__init__.py +++ b/src/virtualenv/activation/bash/__init__.py @@ -12,3 +12,6 @@ def templates(self): yield Path("activate.sh") + + def as_name(self, template): + return template.stem diff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py --- a/src/virtualenv/activation/via_template.py +++ b/src/virtualenv/activation/via_template.py @@ -39,7 +39,11 @@ def _generate(self, replacements, templates, to_folder, creator): for template in templates: text = self.instantiate_template(replacements, template, creator) - (to_folder / template).write_text(text, encoding="utf-8") + dest = to_folder / self.as_name(template) + dest.write_text(text, encoding="utf-8") + + def as_name(self, template): + return template.name def instantiate_template(self, replacements, template, creator): # read text and do replacements
{"golden_diff": "diff --git a/src/virtualenv/activation/bash/__init__.py b/src/virtualenv/activation/bash/__init__.py\n--- a/src/virtualenv/activation/bash/__init__.py\n+++ b/src/virtualenv/activation/bash/__init__.py\n@@ -12,3 +12,6 @@\n \n def templates(self):\n yield Path(\"activate.sh\")\n+\n+ def as_name(self, template):\n+ return template.stem\ndiff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py\n--- a/src/virtualenv/activation/via_template.py\n+++ b/src/virtualenv/activation/via_template.py\n@@ -39,7 +39,11 @@\n def _generate(self, replacements, templates, to_folder, creator):\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n- (to_folder / template).write_text(text, encoding=\"utf-8\")\n+ dest = to_folder / self.as_name(template)\n+ dest.write_text(text, encoding=\"utf-8\")\n+\n+ def as_name(self, template):\n+ return template.name\n \n def instantiate_template(self, replacements, template, creator):\n # read text and do replacements\n", "issue": "Integration with virtualenvwrapper\nCongratulations on the rewrite.\r\n\r\nI notice that `virtualenvwrapper` doesn't integrate with the new beta:\r\n\r\n```\r\n$ mkvirtualenv foo\r\nERROR: Environment '/Users/brettmz/.virtualenvs/foo' does not contain an activate script.\r\n```\r\n\r\nSure enough - I believe it is looking for a script `activate` - but the script file now depends on the shell - `activate.sh` etc. \r\n\r\nIt would be good if this could work somehow - would it be reasonable to create / link a default `activate` script for this case? Or should virtualenvwrapper be updated?\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom virtualenv.util.path import Path\n\nfrom ..via_template import ViaTemplateActivator\n\n\nclass BashActivator(ViaTemplateActivator):\n @classmethod\n def supports(cls, interpreter):\n return interpreter.os != \"nt\"\n\n def templates(self):\n yield Path(\"activate.sh\")\n", "path": "src/virtualenv/activation/bash/__init__.py"}, {"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nfrom abc import ABCMeta, abstractmethod\n\nimport six\n\nfrom .activator import Activator\n\nif sys.version_info >= (3, 7):\n from importlib.resources import read_text\nelse:\n from importlib_resources import read_text\n\n\[email protected]_metaclass(ABCMeta)\nclass ViaTemplateActivator(Activator):\n @abstractmethod\n def templates(self):\n raise NotImplementedError\n\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n\n def replacements(self, creator, dest_folder):\n return {\n \"__VIRTUAL_PROMPT__\": \"\" if self.flag_prompt is None else self.flag_prompt,\n \"__VIRTUAL_ENV__\": six.ensure_text(str(creator.dest)),\n \"__VIRTUAL_NAME__\": creator.env_name,\n \"__BIN_NAME__\": six.ensure_text(str(creator.bin_dir.relative_to(creator.dest))),\n \"__PATH_SEP__\": six.ensure_text(os.pathsep),\n }\n\n def _generate(self, replacements, templates, to_folder, creator):\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n (to_folder / template).write_text(text, encoding=\"utf-8\")\n\n def instantiate_template(self, replacements, template, creator):\n # read text and do replacements\n text = read_text(self.__module__, str(template), encoding=\"utf-8\", errors=\"strict\")\n for key, value in replacements.items():\n value = self._repr_unicode(creator, value)\n text = text.replace(key, value)\n return text\n\n @staticmethod\n def _repr_unicode(creator, value):\n # by default we just let it be unicode\n return value\n", "path": "src/virtualenv/activation/via_template.py"}]}
1,325
277
gh_patches_debug_25048
rasdani/github-patches
git_diff
mosaicml__composer-1493
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> HuggingFace model should update word embeddings dimension according to tokenizer ** To reproduce Steps to reproduce the behavior: Right now if I call ``` model = transformers.AutoModelForCausalLM.from_pretrained('bigscience/bloom-560m') tokenizer = transformers.AutoTokenizer.from_pretrained('gpt2') model = HuggingFaceModel(model=model, tokenizer=tokenizer) ``` The bloom model will have a [250880, 1024] dim word embedding matrix and a vocab size of 250880 even though a gpt2 tokenizer with vocab size of 50257 is used. ## Expected behavior The class HuggingFaceModel needs to update word embedding matrix if a tokenizer is supplied. </issue> <code> [start of composer/models/huggingface.py] 1 # Copyright 2022 MosaicML Composer authors 2 # SPDX-License-Identifier: Apache-2.0 3 4 """A wrapper class that converts 🤗 Transformers models to composer models""" 5 6 from __future__ import annotations 7 8 from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union 9 10 from torchmetrics import Metric 11 12 from composer.models.base import ComposerModel 13 from composer.utils.import_helpers import MissingConditionalImportError 14 15 if TYPE_CHECKING: 16 import transformers 17 18 __all__ = ['HuggingFaceModel'] 19 20 21 class HuggingFaceModel(ComposerModel): 22 """ 23 A wrapper class that converts 🤗 Transformers models to composer models. 24 25 Args: 26 model (transformers.PreTrainedModel): A 🤗 Transformers model. 27 tokenizer (transformers.PreTrainedTokenizer): Tokenizer used to prepare the dataset and validate model inputs during training. Default ``None``. 28 use_logits (bool, optional): If True, the model's output logits will be used to calculate validation metrics. Else, metrics will be inferred from the HuggingFaceModel directly. Default: ``False`` 29 metrics (list[Metric], optional): list of torchmetrics to apply to the output of `validate`. Default: ``None``. 30 .. warning:: This wrapper is designed to work with 🤗 datasets that define a `labels` column. 31 32 Example: 33 34 .. testcode:: 35 36 import transformers 37 from composer.models import HuggingFaceModel 38 39 hf_model = transformers.AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2) 40 model = HuggingFaceModel(hf_model) 41 """ 42 43 def __init__(self, 44 model: transformers.PreTrainedModel, 45 tokenizer: Optional[Union[transformers.PreTrainedTokenizer, 46 transformers.PreTrainedTokenizerFast]] = None, 47 use_logits: Optional[bool] = False, 48 metrics: Optional[List[Metric]] = None) -> None: 49 try: 50 import transformers 51 except ImportError as e: 52 raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e 53 54 super().__init__() 55 self.model = model 56 self.config = model.config 57 58 # the set of inputs that a model expects inferred from the model type or 59 # tokenizer if provided 60 if tokenizer is None: 61 if isinstance(self.model.base_model, transformers.GPT2Model): 62 self.model_inputs = {'input_ids', 'attention_mask'} 63 elif isinstance(self.model.base_model, transformers.BertModel): 64 self.model_inputs = {'input_ids', 'attention_mask', 'token_type_ids'} 65 else: 66 assert tokenizer.model_input_names is not None, 'the tokenizer should have a model input name' 67 self.model_inputs = set(tokenizer.model_input_names) 68 69 self.use_logits = use_logits 70 71 self.train_metrics = None 72 self.val_metrics = None 73 74 if metrics: 75 self.train_metrics = {metric.__class__.__name__: metric for metric in metrics} 76 self.val_metrics = {metric.__class__.__name__: metric for metric in metrics} 77 78 self.labels = None # set in eval_forward() if exists 79 80 def forward(self, batch): 81 for key in self.model_inputs: 82 if key not in batch.keys(): 83 raise ValueError(f'Batch missing key: {key}') 84 85 output = self.model(**batch) # type: ignore (thirdparty) 86 return output 87 88 def loss(self, outputs, batch): 89 return outputs['loss'] 90 91 def eval_forward(self, batch, outputs: Optional[Any] = None): 92 output = outputs if outputs else self.forward(batch) 93 if self.use_logits: 94 self.labels = batch.pop('labels') 95 output = output['logits'] 96 97 # if we are in the single class case, then remove the classes dimension 98 if output.shape[1] == 1: 99 output = output.squeeze(dim=1) 100 101 return output 102 103 def get_metrics(self, is_train: bool = False) -> Dict[str, Metric]: 104 if is_train: 105 metrics = self.train_metrics 106 else: 107 metrics = self.val_metrics 108 109 return metrics if metrics else {} 110 111 def update_metric(self, batch: Any, outputs: Any, metric: Metric) -> None: 112 metric.update(outputs, self.labels) 113 114 def get_model_inputs(self): 115 """Returns a set of inputs that the model expects in the forward pass. 116 If an algorithm wants to interact with the model inputs (for instance, 117 popping the labels for a custom loss fn, or adding attention head masks 118 for head pruning, it must access self.set_model_inputs(). 119 Returns: 120 model_inputs: The set of keys that are expected in the Mapping used to compute the forward pass. 121 """ 122 123 return self.model_inputs 124 [end of composer/models/huggingface.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/composer/models/huggingface.py b/composer/models/huggingface.py --- a/composer/models/huggingface.py +++ b/composer/models/huggingface.py @@ -5,6 +5,7 @@ from __future__ import annotations +import logging from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from torchmetrics import Metric @@ -15,6 +16,8 @@ if TYPE_CHECKING: import transformers +log = logging.getLogger(__name__) + __all__ = ['HuggingFaceModel'] @@ -66,6 +69,13 @@ assert tokenizer.model_input_names is not None, 'the tokenizer should have a model input name' self.model_inputs = set(tokenizer.model_input_names) + if self.config.vocab_size != len(tokenizer): + # set model's word embedding matrix and final lm_head to vocab size according to tokenizer + log.warning( + f'The number of tokens in the tokenizer and the number of tokens in the model are different.' + f' Resizing the model tokenizer to {len(tokenizer)} from {self.config.vocab_size}.') + self.model.resize_token_embeddings(len(tokenizer)) + self.use_logits = use_logits self.train_metrics = None
{"golden_diff": "diff --git a/composer/models/huggingface.py b/composer/models/huggingface.py\n--- a/composer/models/huggingface.py\n+++ b/composer/models/huggingface.py\n@@ -5,6 +5,7 @@\n \n from __future__ import annotations\n \n+import logging\n from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union\n \n from torchmetrics import Metric\n@@ -15,6 +16,8 @@\n if TYPE_CHECKING:\n import transformers\n \n+log = logging.getLogger(__name__)\n+\n __all__ = ['HuggingFaceModel']\n \n \n@@ -66,6 +69,13 @@\n assert tokenizer.model_input_names is not None, 'the tokenizer should have a model input name'\n self.model_inputs = set(tokenizer.model_input_names)\n \n+ if self.config.vocab_size != len(tokenizer):\n+ # set model's word embedding matrix and final lm_head to vocab size according to tokenizer\n+ log.warning(\n+ f'The number of tokens in the tokenizer and the number of tokens in the model are different.'\n+ f' Resizing the model tokenizer to {len(tokenizer)} from {self.config.vocab_size}.')\n+ self.model.resize_token_embeddings(len(tokenizer))\n+\n self.use_logits = use_logits\n \n self.train_metrics = None\n", "issue": "HuggingFace model should update word embeddings dimension according to tokenizer\n** To reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\nRight now if I call \r\n\r\n```\r\nmodel = transformers.AutoModelForCausalLM.from_pretrained('bigscience/bloom-560m')\r\ntokenizer = transformers.AutoTokenizer.from_pretrained('gpt2')\r\n\r\nmodel = HuggingFaceModel(model=model, tokenizer=tokenizer)\r\n```\r\n\r\nThe bloom model will have a [250880, 1024] dim word embedding matrix and a vocab size of 250880 even though a gpt2 tokenizer with vocab size of 50257 is used.\r\n\r\n## Expected behavior\r\n\r\nThe class HuggingFaceModel needs to update word embedding matrix if a tokenizer is supplied.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"A wrapper class that converts \ud83e\udd17 Transformers models to composer models\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Union\n\nfrom torchmetrics import Metric\n\nfrom composer.models.base import ComposerModel\nfrom composer.utils.import_helpers import MissingConditionalImportError\n\nif TYPE_CHECKING:\n import transformers\n\n__all__ = ['HuggingFaceModel']\n\n\nclass HuggingFaceModel(ComposerModel):\n \"\"\"\n A wrapper class that converts \ud83e\udd17 Transformers models to composer models.\n\n Args:\n model (transformers.PreTrainedModel): A \ud83e\udd17 Transformers model.\n tokenizer (transformers.PreTrainedTokenizer): Tokenizer used to prepare the dataset and validate model inputs during training. Default ``None``.\n use_logits (bool, optional): If True, the model's output logits will be used to calculate validation metrics. Else, metrics will be inferred from the HuggingFaceModel directly. Default: ``False``\n metrics (list[Metric], optional): list of torchmetrics to apply to the output of `validate`. Default: ``None``.\n .. warning:: This wrapper is designed to work with \ud83e\udd17 datasets that define a `labels` column.\n\n Example:\n\n .. testcode::\n\n import transformers\n from composer.models import HuggingFaceModel\n\n hf_model = transformers.AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)\n model = HuggingFaceModel(hf_model)\n \"\"\"\n\n def __init__(self,\n model: transformers.PreTrainedModel,\n tokenizer: Optional[Union[transformers.PreTrainedTokenizer,\n transformers.PreTrainedTokenizerFast]] = None,\n use_logits: Optional[bool] = False,\n metrics: Optional[List[Metric]] = None) -> None:\n try:\n import transformers\n except ImportError as e:\n raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e\n\n super().__init__()\n self.model = model\n self.config = model.config\n\n # the set of inputs that a model expects inferred from the model type or\n # tokenizer if provided\n if tokenizer is None:\n if isinstance(self.model.base_model, transformers.GPT2Model):\n self.model_inputs = {'input_ids', 'attention_mask'}\n elif isinstance(self.model.base_model, transformers.BertModel):\n self.model_inputs = {'input_ids', 'attention_mask', 'token_type_ids'}\n else:\n assert tokenizer.model_input_names is not None, 'the tokenizer should have a model input name'\n self.model_inputs = set(tokenizer.model_input_names)\n\n self.use_logits = use_logits\n\n self.train_metrics = None\n self.val_metrics = None\n\n if metrics:\n self.train_metrics = {metric.__class__.__name__: metric for metric in metrics}\n self.val_metrics = {metric.__class__.__name__: metric for metric in metrics}\n\n self.labels = None # set in eval_forward() if exists\n\n def forward(self, batch):\n for key in self.model_inputs:\n if key not in batch.keys():\n raise ValueError(f'Batch missing key: {key}')\n\n output = self.model(**batch) # type: ignore (thirdparty)\n return output\n\n def loss(self, outputs, batch):\n return outputs['loss']\n\n def eval_forward(self, batch, outputs: Optional[Any] = None):\n output = outputs if outputs else self.forward(batch)\n if self.use_logits:\n self.labels = batch.pop('labels')\n output = output['logits']\n\n # if we are in the single class case, then remove the classes dimension\n if output.shape[1] == 1:\n output = output.squeeze(dim=1)\n\n return output\n\n def get_metrics(self, is_train: bool = False) -> Dict[str, Metric]:\n if is_train:\n metrics = self.train_metrics\n else:\n metrics = self.val_metrics\n\n return metrics if metrics else {}\n\n def update_metric(self, batch: Any, outputs: Any, metric: Metric) -> None:\n metric.update(outputs, self.labels)\n\n def get_model_inputs(self):\n \"\"\"Returns a set of inputs that the model expects in the forward pass.\n If an algorithm wants to interact with the model inputs (for instance,\n popping the labels for a custom loss fn, or adding attention head masks\n for head pruning, it must access self.set_model_inputs().\n Returns:\n model_inputs: The set of keys that are expected in the Mapping used to compute the forward pass.\n \"\"\"\n\n return self.model_inputs\n", "path": "composer/models/huggingface.py"}]}
1,989
282
gh_patches_debug_16540
rasdani/github-patches
git_diff
Kinto__kinto-1343
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `kinto create-user` doesn't override the password if the user already exists. </issue> <code> [start of kinto/plugins/accounts/scripts.py] 1 import logging 2 import getpass 3 4 import transaction as current_transaction 5 from pyramid.settings import asbool 6 7 from .utils import hash_password 8 from .views import AccountIdGenerator 9 10 11 logger = logging.getLogger(__name__) 12 13 14 def create_user(env, username=None, password=None): 15 """Administrative command to create a new user.""" 16 registry = env['registry'] 17 settings = registry.settings 18 readonly_mode = asbool(settings.get('readonly', False)) 19 if readonly_mode: 20 message = 'Cannot create a user with a readonly server.' 21 logger.error(message) 22 return 51 23 24 if 'kinto.plugins.accounts' not in settings['includes']: 25 message = 'Cannot create a user when the accounts plugin is not installed.' 26 logger.error(message) 27 return 52 28 29 try: 30 validator = AccountIdGenerator() 31 if username is None: 32 username = input('Username: ') 33 while not validator.match(username): 34 print('{} is not a valid username.') 35 print('Username should match {0!r}, please try again.'.format(validator.regexp)) 36 username = input('Username: ') 37 38 if password is None: 39 while True: # The user didn't entered twice the same password 40 password = getpass.getpass('Please enter a password for {}: '.format(username)) 41 confirm = getpass.getpass('Please confirm the password: '.format(username)) 42 43 if password != confirm: 44 print('Sorry, passwords do not match, please try again.') 45 else: 46 break 47 except EOFError: 48 print('User creation aborted') 49 return 53 50 51 print("Creating user '{}'".format(username)) 52 record = {'id': username, 'password': hash_password(password)} 53 registry.storage.create(collection_id='account', 54 parent_id=username, 55 record=record, 56 ignore_conflict=True) 57 registry.permission.add_principal_to_ace('/accounts/{}'.format(username), 58 'write', 59 'account:{}'.format(username)) 60 61 current_transaction.commit() 62 63 return 0 64 [end of kinto/plugins/accounts/scripts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/plugins/accounts/scripts.py b/kinto/plugins/accounts/scripts.py --- a/kinto/plugins/accounts/scripts.py +++ b/kinto/plugins/accounts/scripts.py @@ -50,10 +50,10 @@ print("Creating user '{}'".format(username)) record = {'id': username, 'password': hash_password(password)} - registry.storage.create(collection_id='account', + registry.storage.update(collection_id='account', parent_id=username, - record=record, - ignore_conflict=True) + object_id=username, + record=record) registry.permission.add_principal_to_ace('/accounts/{}'.format(username), 'write', 'account:{}'.format(username))
{"golden_diff": "diff --git a/kinto/plugins/accounts/scripts.py b/kinto/plugins/accounts/scripts.py\n--- a/kinto/plugins/accounts/scripts.py\n+++ b/kinto/plugins/accounts/scripts.py\n@@ -50,10 +50,10 @@\n \n print(\"Creating user '{}'\".format(username))\n record = {'id': username, 'password': hash_password(password)}\n- registry.storage.create(collection_id='account',\n+ registry.storage.update(collection_id='account',\n parent_id=username,\n- record=record,\n- ignore_conflict=True)\n+ object_id=username,\n+ record=record)\n registry.permission.add_principal_to_ace('/accounts/{}'.format(username),\n 'write',\n 'account:{}'.format(username))\n", "issue": "`kinto create-user` doesn't override the password if the user already exists.\n\n", "before_files": [{"content": "import logging\nimport getpass\n\nimport transaction as current_transaction\nfrom pyramid.settings import asbool\n\nfrom .utils import hash_password\nfrom .views import AccountIdGenerator\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_user(env, username=None, password=None):\n \"\"\"Administrative command to create a new user.\"\"\"\n registry = env['registry']\n settings = registry.settings\n readonly_mode = asbool(settings.get('readonly', False))\n if readonly_mode:\n message = 'Cannot create a user with a readonly server.'\n logger.error(message)\n return 51\n\n if 'kinto.plugins.accounts' not in settings['includes']:\n message = 'Cannot create a user when the accounts plugin is not installed.'\n logger.error(message)\n return 52\n\n try:\n validator = AccountIdGenerator()\n if username is None:\n username = input('Username: ')\n while not validator.match(username):\n print('{} is not a valid username.')\n print('Username should match {0!r}, please try again.'.format(validator.regexp))\n username = input('Username: ')\n\n if password is None:\n while True: # The user didn't entered twice the same password\n password = getpass.getpass('Please enter a password for {}: '.format(username))\n confirm = getpass.getpass('Please confirm the password: '.format(username))\n\n if password != confirm:\n print('Sorry, passwords do not match, please try again.')\n else:\n break\n except EOFError:\n print('User creation aborted')\n return 53\n\n print(\"Creating user '{}'\".format(username))\n record = {'id': username, 'password': hash_password(password)}\n registry.storage.create(collection_id='account',\n parent_id=username,\n record=record,\n ignore_conflict=True)\n registry.permission.add_principal_to_ace('/accounts/{}'.format(username),\n 'write',\n 'account:{}'.format(username))\n\n current_transaction.commit()\n\n return 0\n", "path": "kinto/plugins/accounts/scripts.py"}]}
1,104
154
gh_patches_debug_22380
rasdani/github-patches
git_diff
getsentry__sentry-python-355
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 0.7.10 CeleryIntegration captures task Ignore exception Similar to #252, I've noticed that the Sentry client with the CeleryIntegration is capturing task Ignore exceptions, which are often used with dynamic tasks (see also https://github.com/celery/celery/issues/3437). I believe that since Retries are ignored, the `Ignore` exception should also be ignored. The exception in Sentry is showing `celery/app/task.py in replace at line 894`: ```python raise Ignore('Replaced by new task') ``` celery: 4.3 sentry-sdk: 0.7.10 python: 3.6 </issue> <code> [start of sentry_sdk/integrations/celery.py] 1 from __future__ import absolute_import 2 3 import sys 4 5 from celery.exceptions import SoftTimeLimitExceeded, Retry # type: ignore 6 7 from sentry_sdk.hub import Hub 8 from sentry_sdk.utils import capture_internal_exceptions, event_from_exception 9 from sentry_sdk._compat import reraise 10 from sentry_sdk.integrations import Integration 11 from sentry_sdk.integrations.logging import ignore_logger 12 13 14 class CeleryIntegration(Integration): 15 identifier = "celery" 16 17 @staticmethod 18 def setup_once(): 19 import celery.app.trace as trace # type: ignore 20 21 old_build_tracer = trace.build_tracer 22 23 def sentry_build_tracer(name, task, *args, **kwargs): 24 # Need to patch both methods because older celery sometimes 25 # short-circuits to task.run if it thinks it's safe. 26 task.__call__ = _wrap_task_call(task, task.__call__) 27 task.run = _wrap_task_call(task, task.run) 28 return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs)) 29 30 trace.build_tracer = sentry_build_tracer 31 32 _patch_worker_exit() 33 34 # This logger logs every status of every task that ran on the worker. 35 # Meaning that every task's breadcrumbs are full of stuff like "Task 36 # <foo> raised unexpected <bar>". 37 ignore_logger("celery.worker.job") 38 39 40 def _wrap_tracer(task, f): 41 # Need to wrap tracer for pushing the scope before prerun is sent, and 42 # popping it after postrun is sent. 43 # 44 # This is the reason we don't use signals for hooking in the first place. 45 # Also because in Celery 3, signal dispatch returns early if one handler 46 # crashes. 47 def _inner(*args, **kwargs): 48 hub = Hub.current 49 if hub.get_integration(CeleryIntegration) is None: 50 return f(*args, **kwargs) 51 52 with hub.push_scope() as scope: 53 scope._name = "celery" 54 scope.clear_breadcrumbs() 55 scope.add_event_processor(_make_event_processor(task, *args, **kwargs)) 56 57 return f(*args, **kwargs) 58 59 return _inner 60 61 62 def _wrap_task_call(task, f): 63 # Need to wrap task call because the exception is caught before we get to 64 # see it. Also celery's reported stacktrace is untrustworthy. 65 def _inner(*args, **kwargs): 66 try: 67 return f(*args, **kwargs) 68 except Exception: 69 exc_info = sys.exc_info() 70 with capture_internal_exceptions(): 71 _capture_exception(task, exc_info) 72 reraise(*exc_info) 73 74 return _inner 75 76 77 def _make_event_processor(task, uuid, args, kwargs, request=None): 78 def event_processor(event, hint): 79 with capture_internal_exceptions(): 80 event["transaction"] = task.name 81 82 with capture_internal_exceptions(): 83 extra = event.setdefault("extra", {}) 84 extra["celery-job"] = { 85 "task_name": task.name, 86 "args": args, 87 "kwargs": kwargs, 88 } 89 90 if "exc_info" in hint: 91 with capture_internal_exceptions(): 92 if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded): 93 event["fingerprint"] = [ 94 "celery", 95 "SoftTimeLimitExceeded", 96 getattr(task, "name", task), 97 ] 98 99 return event 100 101 return event_processor 102 103 104 def _capture_exception(task, exc_info): 105 hub = Hub.current 106 107 if hub.get_integration(CeleryIntegration) is None: 108 return 109 if isinstance(exc_info[1], Retry): 110 return 111 if hasattr(task, "throws") and isinstance(exc_info[1], task.throws): 112 return 113 114 event, hint = event_from_exception( 115 exc_info, 116 client_options=hub.client.options, 117 mechanism={"type": "celery", "handled": False}, 118 ) 119 120 hub.capture_event(event, hint=hint) 121 122 123 def _patch_worker_exit(): 124 # Need to flush queue before worker shutdown because a crashing worker will 125 # call os._exit 126 from billiard.pool import Worker # type: ignore 127 128 old_workloop = Worker.workloop 129 130 def sentry_workloop(*args, **kwargs): 131 try: 132 return old_workloop(*args, **kwargs) 133 finally: 134 with capture_internal_exceptions(): 135 hub = Hub.current 136 if hub.get_integration(CeleryIntegration) is not None: 137 hub.flush() 138 139 Worker.workloop = sentry_workloop 140 [end of sentry_sdk/integrations/celery.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py --- a/sentry_sdk/integrations/celery.py +++ b/sentry_sdk/integrations/celery.py @@ -2,7 +2,12 @@ import sys -from celery.exceptions import SoftTimeLimitExceeded, Retry # type: ignore +from celery.exceptions import ( # type: ignore + SoftTimeLimitExceeded, + Retry, + Ignore, + Reject, +) from sentry_sdk.hub import Hub from sentry_sdk.utils import capture_internal_exceptions, event_from_exception @@ -11,6 +16,9 @@ from sentry_sdk.integrations.logging import ignore_logger +CELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject) + + class CeleryIntegration(Integration): identifier = "celery" @@ -106,7 +114,7 @@ if hub.get_integration(CeleryIntegration) is None: return - if isinstance(exc_info[1], Retry): + if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS): return if hasattr(task, "throws") and isinstance(exc_info[1], task.throws): return
{"golden_diff": "diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py\n--- a/sentry_sdk/integrations/celery.py\n+++ b/sentry_sdk/integrations/celery.py\n@@ -2,7 +2,12 @@\n \n import sys\n \n-from celery.exceptions import SoftTimeLimitExceeded, Retry # type: ignore\n+from celery.exceptions import ( # type: ignore\n+ SoftTimeLimitExceeded,\n+ Retry,\n+ Ignore,\n+ Reject,\n+)\n \n from sentry_sdk.hub import Hub\n from sentry_sdk.utils import capture_internal_exceptions, event_from_exception\n@@ -11,6 +16,9 @@\n from sentry_sdk.integrations.logging import ignore_logger\n \n \n+CELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject)\n+\n+\n class CeleryIntegration(Integration):\n identifier = \"celery\"\n \n@@ -106,7 +114,7 @@\n \n if hub.get_integration(CeleryIntegration) is None:\n return\n- if isinstance(exc_info[1], Retry):\n+ if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS):\n return\n if hasattr(task, \"throws\") and isinstance(exc_info[1], task.throws):\n return\n", "issue": "0.7.10 CeleryIntegration captures task Ignore exception\nSimilar to #252, I've noticed that the Sentry client with the CeleryIntegration is capturing task Ignore exceptions, which are often used with dynamic tasks (see also https://github.com/celery/celery/issues/3437). I believe that since Retries are ignored, the `Ignore` exception should also be ignored.\r\n\r\nThe exception in Sentry is showing `celery/app/task.py in replace at line 894`:\r\n```python\r\nraise Ignore('Replaced by new task')\r\n```\r\n\r\ncelery: 4.3\r\nsentry-sdk: 0.7.10\r\npython: 3.6\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport sys\n\nfrom celery.exceptions import SoftTimeLimitExceeded, Retry # type: ignore\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations.logging import ignore_logger\n\n\nclass CeleryIntegration(Integration):\n identifier = \"celery\"\n\n @staticmethod\n def setup_once():\n import celery.app.trace as trace # type: ignore\n\n old_build_tracer = trace.build_tracer\n\n def sentry_build_tracer(name, task, *args, **kwargs):\n # Need to patch both methods because older celery sometimes\n # short-circuits to task.run if it thinks it's safe.\n task.__call__ = _wrap_task_call(task, task.__call__)\n task.run = _wrap_task_call(task, task.run)\n return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))\n\n trace.build_tracer = sentry_build_tracer\n\n _patch_worker_exit()\n\n # This logger logs every status of every task that ran on the worker.\n # Meaning that every task's breadcrumbs are full of stuff like \"Task\n # <foo> raised unexpected <bar>\".\n ignore_logger(\"celery.worker.job\")\n\n\ndef _wrap_tracer(task, f):\n # Need to wrap tracer for pushing the scope before prerun is sent, and\n # popping it after postrun is sent.\n #\n # This is the reason we don't use signals for hooking in the first place.\n # Also because in Celery 3, signal dispatch returns early if one handler\n # crashes.\n def _inner(*args, **kwargs):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is None:\n return f(*args, **kwargs)\n\n with hub.push_scope() as scope:\n scope._name = \"celery\"\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_event_processor(task, *args, **kwargs))\n\n return f(*args, **kwargs)\n\n return _inner\n\n\ndef _wrap_task_call(task, f):\n # Need to wrap task call because the exception is caught before we get to\n # see it. Also celery's reported stacktrace is untrustworthy.\n def _inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception:\n exc_info = sys.exc_info()\n with capture_internal_exceptions():\n _capture_exception(task, exc_info)\n reraise(*exc_info)\n\n return _inner\n\n\ndef _make_event_processor(task, uuid, args, kwargs, request=None):\n def event_processor(event, hint):\n with capture_internal_exceptions():\n event[\"transaction\"] = task.name\n\n with capture_internal_exceptions():\n extra = event.setdefault(\"extra\", {})\n extra[\"celery-job\"] = {\n \"task_name\": task.name,\n \"args\": args,\n \"kwargs\": kwargs,\n }\n\n if \"exc_info\" in hint:\n with capture_internal_exceptions():\n if issubclass(hint[\"exc_info\"][0], SoftTimeLimitExceeded):\n event[\"fingerprint\"] = [\n \"celery\",\n \"SoftTimeLimitExceeded\",\n getattr(task, \"name\", task),\n ]\n\n return event\n\n return event_processor\n\n\ndef _capture_exception(task, exc_info):\n hub = Hub.current\n\n if hub.get_integration(CeleryIntegration) is None:\n return\n if isinstance(exc_info[1], Retry):\n return\n if hasattr(task, \"throws\") and isinstance(exc_info[1], task.throws):\n return\n\n event, hint = event_from_exception(\n exc_info,\n client_options=hub.client.options,\n mechanism={\"type\": \"celery\", \"handled\": False},\n )\n\n hub.capture_event(event, hint=hint)\n\n\ndef _patch_worker_exit():\n # Need to flush queue before worker shutdown because a crashing worker will\n # call os._exit\n from billiard.pool import Worker # type: ignore\n\n old_workloop = Worker.workloop\n\n def sentry_workloop(*args, **kwargs):\n try:\n return old_workloop(*args, **kwargs)\n finally:\n with capture_internal_exceptions():\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is not None:\n hub.flush()\n\n Worker.workloop = sentry_workloop\n", "path": "sentry_sdk/integrations/celery.py"}]}
2,007
283
gh_patches_debug_9738
rasdani/github-patches
git_diff
modin-project__modin-6337
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ray is incompatible with pydantic>=2.0 We should pin `pydantic<2.0` to workaround the issues on Ray side. Example [from](https://github.com/modin-project/modin/actions/runs/5425526005/jobs/9866377841): ```bash File "/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/__init__.py", line 1, in <module> from ray.util.state.api import ( File "/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/api.py", line 17, in <module> from ray.util.state.common import ( File "/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/common.py", line 120, in <module> @dataclass(init=True) File "/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/pydantic/dataclasses.py", line 139, in dataclass assert init is False, 'pydantic.dataclasses.dataclass only supports init=False' AssertionError: pydantic.dataclasses.dataclass only supports init=False ``` </issue> <code> [start of setup.py] 1 from setuptools import setup, find_packages 2 import versioneer 3 4 with open("README.md", "r", encoding="utf-8") as fh: 5 long_description = fh.read() 6 7 dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"] 8 # ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100 9 ray_deps = ["ray[default]>=1.13.0,!=2.5.0", "pyarrow"] 10 unidist_deps = ["unidist[mpi]>=0.2.1"] 11 remote_deps = ["rpyc==4.1.5", "cloudpickle", "boto3"] 12 spreadsheet_deps = ["modin-spreadsheet>=0.1.0"] 13 sql_deps = ["dfsql>=0.4.2", "pyparsing<=2.4.7"] 14 all_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps 15 16 # Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions. 17 # This file provides the "import pandas before Ray init" feature if specific 18 # environment variable is set (see https://github.com/modin-project/modin/issues/4564). 19 cmdclass = versioneer.get_cmdclass() 20 extra_files = ["modin-autoimport-pandas.pth"] 21 22 23 class AddPthFileBuild(cmdclass["build_py"]): 24 def _get_data_files(self): 25 return (super()._get_data_files() or []) + [ 26 (".", ".", self.build_lib, extra_files) 27 ] 28 29 30 class AddPthFileSDist(cmdclass["sdist"]): 31 def make_distribution(self): 32 self.filelist.extend(extra_files) 33 return super().make_distribution() 34 35 36 cmdclass["build_py"] = AddPthFileBuild 37 cmdclass["sdist"] = AddPthFileSDist 38 39 setup( 40 name="modin", 41 version=versioneer.get_version(), 42 cmdclass=cmdclass, 43 description="Modin: Make your pandas code run faster by changing one line of code.", 44 packages=find_packages(exclude=["scripts", "scripts.*"]), 45 include_package_data=True, 46 license="Apache 2", 47 url="https://github.com/modin-project/modin", 48 long_description=long_description, 49 long_description_content_type="text/markdown", 50 install_requires=[ 51 "pandas>=2,<2.1", 52 "packaging", 53 "numpy>=1.18.5", 54 "fsspec", 55 "psutil", 56 ], 57 extras_require={ 58 # can be installed by pip install modin[dask] 59 "dask": dask_deps, 60 "ray": ray_deps, 61 "unidist": unidist_deps, 62 "remote": remote_deps, 63 "spreadsheet": spreadsheet_deps, 64 "sql": sql_deps, 65 "all": all_deps, 66 }, 67 python_requires=">=3.8", 68 ) 69 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -6,7 +6,8 @@ dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"] # ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100 -ray_deps = ["ray[default]>=1.13.0,!=2.5.0", "pyarrow"] +# pydantic<2: https://github.com/modin-project/modin/issues/6336 +ray_deps = ["ray[default]>=1.13.0,!=2.5.0", "pyarrow", "pydantic<2"] unidist_deps = ["unidist[mpi]>=0.2.1"] remote_deps = ["rpyc==4.1.5", "cloudpickle", "boto3"] spreadsheet_deps = ["modin-spreadsheet>=0.1.0"]
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,7 +6,8 @@\n \n dask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\n # ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100\n-ray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\"]\n+# pydantic<2: https://github.com/modin-project/modin/issues/6336\n+ray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\", \"pydantic<2\"]\n unidist_deps = [\"unidist[mpi]>=0.2.1\"]\n remote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\n spreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\n", "issue": "Ray is incompatible with pydantic>=2.0\nWe should pin `pydantic<2.0` to workaround the issues on Ray side.\r\n\r\n\r\nExample [from](https://github.com/modin-project/modin/actions/runs/5425526005/jobs/9866377841):\r\n```bash\r\n File \"/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/__init__.py\", line 1, in <module>\r\n from ray.util.state.api import (\r\n File \"/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/api.py\", line 17, in <module>\r\n from ray.util.state.common import (\r\n File \"/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/ray/util/state/common.py\", line 120, in <module>\r\n @dataclass(init=True)\r\n File \"/usr/share/miniconda/envs/modin/lib/python3.8/site-packages/pydantic/dataclasses.py\", line 139, in dataclass\r\n assert init is False, 'pydantic.dataclasses.dataclass only supports init=False'\r\nAssertionError: pydantic.dataclasses.dataclass only supports init=False\r\n```\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\n# ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100\nray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\"]\nunidist_deps = [\"unidist[mpi]>=0.2.1\"]\nremote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\nspreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\nsql_deps = [\"dfsql>=0.4.2\", \"pyparsing<=2.4.7\"]\nall_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps\n\n# Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.\n# This file provides the \"import pandas before Ray init\" feature if specific\n# environment variable is set (see https://github.com/modin-project/modin/issues/4564).\ncmdclass = versioneer.get_cmdclass()\nextra_files = [\"modin-autoimport-pandas.pth\"]\n\n\nclass AddPthFileBuild(cmdclass[\"build_py\"]):\n def _get_data_files(self):\n return (super()._get_data_files() or []) + [\n (\".\", \".\", self.build_lib, extra_files)\n ]\n\n\nclass AddPthFileSDist(cmdclass[\"sdist\"]):\n def make_distribution(self):\n self.filelist.extend(extra_files)\n return super().make_distribution()\n\n\ncmdclass[\"build_py\"] = AddPthFileBuild\ncmdclass[\"sdist\"] = AddPthFileSDist\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=cmdclass,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(exclude=[\"scripts\", \"scripts.*\"]),\n include_package_data=True,\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\n \"pandas>=2,<2.1\",\n \"packaging\",\n \"numpy>=1.18.5\",\n \"fsspec\",\n \"psutil\",\n ],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"unidist\": unidist_deps,\n \"remote\": remote_deps,\n \"spreadsheet\": spreadsheet_deps,\n \"sql\": sql_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.8\",\n)\n", "path": "setup.py"}]}
1,583
226
gh_patches_debug_3681
rasdani/github-patches
git_diff
ivy-llc__ivy-13823
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> rand_like </issue> <code> [start of ivy/functional/frontends/torch/random_sampling.py] 1 import ivy 2 from ivy.func_wrapper import with_supported_dtypes 3 from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back 4 5 try: 6 from torch import Generator 7 except ImportError: 8 from types import SimpleNamespace 9 10 Generator = SimpleNamespace 11 12 13 def seed() -> int: 14 """Returns a 64 bit number used to seed the RNG""" 15 return int(ivy.randint(-(2**63), 2**63 - 1)) 16 17 18 @to_ivy_arrays_and_back 19 def manual_seed(seed: int): 20 ivy.seed(seed_value=seed) 21 return Generator().manual_seed(seed) 22 23 24 @with_supported_dtypes( 25 { 26 "1.11.0 and below": ( 27 "float32", 28 "float64", 29 ) 30 }, 31 "torch", 32 ) 33 @to_ivy_arrays_and_back 34 def multinomial(input, num_samples, replacement=False, *, generator=None, out=None): 35 return ivy.multinomial( 36 num_samples + 1, # doesn't matter because `probs` is provided, but should be 37 # greater than the number of samples 38 num_samples, 39 probs=input, 40 replace=replacement, 41 out=out, 42 ) 43 44 45 @with_supported_dtypes( 46 { 47 "1.11.0 and below": ( 48 "float32", 49 "float64", 50 ) 51 }, 52 "torch", 53 ) 54 @to_ivy_arrays_and_back 55 def poisson(input, generator=None): 56 return ivy.poisson(input, shape=None) 57 58 59 @to_ivy_arrays_and_back 60 def rand( 61 size, 62 *, 63 generator=None, 64 out=None, 65 dtype=None, 66 layout=None, 67 device=None, 68 requires_grad=False, 69 pin_memory=False 70 ): 71 return ivy.random_uniform( 72 shape=size, 73 out=out, 74 dtype=dtype, 75 device=device, 76 ) 77 78 79 @to_ivy_arrays_and_back 80 def randn( 81 size, 82 *, 83 generator=None, 84 out=None, 85 dtype=None, 86 layout=None, 87 device=None, 88 requires_grad=False, 89 pin_memory=False 90 ): 91 return ivy.random_normal( 92 shape=size, 93 out=out, 94 dtype=dtype, 95 device=device, 96 ) 97 [end of ivy/functional/frontends/torch/random_sampling.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/torch/random_sampling.py b/ivy/functional/frontends/torch/random_sampling.py --- a/ivy/functional/frontends/torch/random_sampling.py +++ b/ivy/functional/frontends/torch/random_sampling.py @@ -76,6 +76,27 @@ ) +@to_ivy_arrays_and_back +def rand_like( + input, + *, + dtype=None, + layout=None, + device=None, + requires_grad=False, + memory_format=False +): + shape = input.shape + if not dtype: + dtype = input.dtype + + return ivy.random_uniform( + shape=shape, + dtype=dtype, + device=device, + ) + + @to_ivy_arrays_and_back def randn( size,
{"golden_diff": "diff --git a/ivy/functional/frontends/torch/random_sampling.py b/ivy/functional/frontends/torch/random_sampling.py\n--- a/ivy/functional/frontends/torch/random_sampling.py\n+++ b/ivy/functional/frontends/torch/random_sampling.py\n@@ -76,6 +76,27 @@\n )\n \n \n+@to_ivy_arrays_and_back\n+def rand_like(\n+ input,\n+ *,\n+ dtype=None,\n+ layout=None,\n+ device=None,\n+ requires_grad=False,\n+ memory_format=False\n+):\n+ shape = input.shape\n+ if not dtype:\n+ dtype = input.dtype\n+\n+ return ivy.random_uniform(\n+ shape=shape,\n+ dtype=dtype,\n+ device=device,\n+ )\n+\n+\n @to_ivy_arrays_and_back\n def randn(\n size,\n", "issue": "rand_like\n\n", "before_files": [{"content": "import ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\ntry:\n from torch import Generator\nexcept ImportError:\n from types import SimpleNamespace\n\n Generator = SimpleNamespace\n\n\ndef seed() -> int:\n \"\"\"Returns a 64 bit number used to seed the RNG\"\"\"\n return int(ivy.randint(-(2**63), 2**63 - 1))\n\n\n@to_ivy_arrays_and_back\ndef manual_seed(seed: int):\n ivy.seed(seed_value=seed)\n return Generator().manual_seed(seed)\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef multinomial(input, num_samples, replacement=False, *, generator=None, out=None):\n return ivy.multinomial(\n num_samples + 1, # doesn't matter because `probs` is provided, but should be\n # greater than the number of samples\n num_samples,\n probs=input,\n replace=replacement,\n out=out,\n )\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef poisson(input, generator=None):\n return ivy.poisson(input, shape=None)\n\n\n@to_ivy_arrays_and_back\ndef rand(\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False\n):\n return ivy.random_uniform(\n shape=size,\n out=out,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef randn(\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False\n):\n return ivy.random_normal(\n shape=size,\n out=out,\n dtype=dtype,\n device=device,\n )\n", "path": "ivy/functional/frontends/torch/random_sampling.py"}]}
1,224
189
gh_patches_debug_66902
rasdani/github-patches
git_diff
ivy-llc__ivy-17524
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> argsort </issue> <code> [start of ivy/functional/frontends/paddle/tensor/search.py] 1 # global 2 import ivy 3 from ivy.func_wrapper import with_supported_dtypes 4 from ivy.functional.frontends.paddle.func_wrapper import ( 5 to_ivy_arrays_and_back, 6 ) 7 8 9 @with_supported_dtypes( 10 {"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")}, 11 "paddle", 12 ) 13 @to_ivy_arrays_and_back 14 def argmax(x, /, *, axis=None, keepdim=False, dtype="int64", name=None): 15 return ivy.argmax(x, axis=axis, keepdims=keepdim, dtype=dtype) 16 17 18 @with_supported_dtypes( 19 {"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")}, 20 "paddle", 21 ) 22 @to_ivy_arrays_and_back 23 def argmin(x, /, *, axis=None, keepdim=False, dtype="int64", name=None): 24 return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype) 25 [end of ivy/functional/frontends/paddle/tensor/search.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/paddle/tensor/search.py b/ivy/functional/frontends/paddle/tensor/search.py --- a/ivy/functional/frontends/paddle/tensor/search.py +++ b/ivy/functional/frontends/paddle/tensor/search.py @@ -22,3 +22,12 @@ @to_ivy_arrays_and_back def argmin(x, /, *, axis=None, keepdim=False, dtype="int64", name=None): return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype) + + +@with_supported_dtypes( + {"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")}, + "paddle", +) +@to_ivy_arrays_and_back +def argsort(x, /, *, axis=-1, descending=False, name=None): + return ivy.argsort(x, axis=axis, descending=descending)
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/search.py b/ivy/functional/frontends/paddle/tensor/search.py\n--- a/ivy/functional/frontends/paddle/tensor/search.py\n+++ b/ivy/functional/frontends/paddle/tensor/search.py\n@@ -22,3 +22,12 @@\n @to_ivy_arrays_and_back\n def argmin(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)\n+\n+\n+@with_supported_dtypes(\n+ {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def argsort(x, /, *, axis=-1, descending=False, name=None):\n+ return ivy.argsort(x, axis=axis, descending=descending)\n", "issue": "argsort\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argmax(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmax(x, axis=axis, keepdims=keepdim, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argmin(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)\n", "path": "ivy/functional/frontends/paddle/tensor/search.py"}]}
846
229
gh_patches_debug_11382
rasdani/github-patches
git_diff
activeloopai__deeplake-683
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] ## 🐛🐛 Bug Report ### ⚗️ Current Behavior Hub's version info is present in two locations, `setup.py` and `hub/version.py`. As result, the released version displays the wrong version info (1.2.3 instead of 1.3.0) when users do `hub --version`. ### 🧰 Possible Solution (optional) Remove version info from `setup.py`. </issue> <code> [start of setup.py] 1 """ 2 License: 3 This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. 4 If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/. 5 """ 6 7 import os 8 9 from setuptools import find_packages, setup 10 11 project = "hub" 12 VERSION = "1.3.0" 13 14 this_directory = os.path.abspath(os.path.dirname(__file__)) 15 with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f: 16 long_description = f.read() 17 18 with open(os.path.join(this_directory, "requirements.txt")) as f: 19 requirements = f.readlines() 20 21 setup( 22 name=project, 23 version=VERSION, 24 description="Activeloop Hub", 25 long_description=long_description, 26 long_description_content_type="text/markdown", 27 author="Snark AI Inc.", 28 author_email="[email protected]", 29 license="MPL 2.0", 30 url="https://github.com/activeloopai/Hub", 31 packages=find_packages(), 32 include_package_data=True, 33 zip_safe=False, 34 keywords="snark-hub", 35 project_urls={ 36 "Documentation": "https://docs.activeloop.ai/", 37 "Source": "https://github.com/activeloopai/Hub", 38 }, 39 classifiers=[ 40 "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)", 41 "Programming Language :: Python :: 3", 42 "Programming Language :: Python :: 3.6", 43 "Programming Language :: Python :: 3.7", 44 "Programming Language :: Python :: 3.8", 45 "Programming Language :: Python :: 3.9", 46 "Programming Language :: Python :: 3 :: Only", 47 ], 48 python_requires=">=3.6", 49 install_requires=requirements, 50 setup_requires=[], 51 dependency_links=[], 52 entry_points={ 53 "console_scripts": [ 54 "activeloop = hub.cli.command:cli", 55 "activeloop-local = hub.cli.local:cli", 56 "activeloop-dev = hub.cli.dev:cli", 57 "hub = hub.cli.command:cli", 58 ] 59 }, 60 tests_require=["pytest", "mock>=1.0.1"], 61 ) 62 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -9,7 +9,6 @@ from setuptools import find_packages, setup project = "hub" -VERSION = "1.3.0" this_directory = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(this_directory, "README.md")) as f: @@ -20,7 +19,6 @@ setup( name=project, - version=VERSION, description="Activeloop Hub", long_description=long_description, long_description_content_type="text/markdown",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,6 @@\n from setuptools import find_packages, setup\n \n project = \"hub\"\n-VERSION = \"1.3.0\"\n \n this_directory = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(this_directory, \"README.md\")) as f:\n@@ -20,7 +19,6 @@\n \n setup(\n name=project,\n- version=VERSION,\n description=\"Activeloop Hub\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n", "issue": "[BUG]\n## \ud83d\udc1b\ud83d\udc1b Bug Report\r\n\r\n\r\n### \u2697\ufe0f Current Behavior\r\nHub's version info is present in two locations, `setup.py` and `hub/version.py`. As result, the released version displays the wrong version info (1.2.3 instead of 1.3.0) when users do `hub --version`.\r\n\r\n### \ud83e\uddf0 Possible Solution (optional)\r\nRemove version info from `setup.py`.\n", "before_files": [{"content": "\"\"\"\nLicense:\nThis Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.\nIf a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\"\"\"\n\nimport os\n\nfrom setuptools import find_packages, setup\n\nproject = \"hub\"\nVERSION = \"1.3.0\"\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nwith open(os.path.join(this_directory, \"requirements.txt\")) as f:\n requirements = f.readlines()\n\nsetup(\n name=project,\n version=VERSION,\n description=\"Activeloop Hub\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Snark AI Inc.\",\n author_email=\"[email protected]\",\n license=\"MPL 2.0\",\n url=\"https://github.com/activeloopai/Hub\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n keywords=\"snark-hub\",\n project_urls={\n \"Documentation\": \"https://docs.activeloop.ai/\",\n \"Source\": \"https://github.com/activeloopai/Hub\",\n },\n classifiers=[\n \"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n python_requires=\">=3.6\",\n install_requires=requirements,\n setup_requires=[],\n dependency_links=[],\n entry_points={\n \"console_scripts\": [\n \"activeloop = hub.cli.command:cli\",\n \"activeloop-local = hub.cli.local:cli\",\n \"activeloop-dev = hub.cli.dev:cli\",\n \"hub = hub.cli.command:cli\",\n ]\n },\n tests_require=[\"pytest\", \"mock>=1.0.1\"],\n)\n", "path": "setup.py"}]}
1,224
133
gh_patches_debug_63280
rasdani/github-patches
git_diff
pre-commit__pre-commit-1113
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> rust hook requires `--path` attribute Cargo has changed how packages get installed and requires an extra `--path <destination>` attribute. Symptom: ``` [INFO] Initializing environment for https://github.com/nix-community/nixpkgs-fmt. [INFO] Installing environment for https://github.com/nix-community/nixpkgs-fmt. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... An unexpected error has occurred: CalledProcessError: Command: ('/nix/store/fcc3x8zwq1c0667xjs7bkn6ay8j4fdpz-rust-1.38.0-nightly-2019-08-07-ad7c55e1f/bin/cargo', 'install', '--bins', '--root', '/home/zimbatm/.cache/pre-commit/repoeft6xm6t/rustenv-default') Return code: 101 Expected return code: 0 Output: (none) Errors: error: Using `cargo install` to install the binaries for the package in current working directory is no longer supported, use `cargo install --path .` instead. Use `cargo build` if you want to simply build the package. ``` I guess the fix should be done where here: https://github.com/pre-commit/pre-commit/blob/9c6a1d80d6b94c86a1785a40a51389e83accac3e/pre_commit/languages/rust.py#L87 Do we want to make pre-commit compatible with multiple versions of cargo or just the latest one? /cc @asottile @chriskuehl </issue> <code> [start of pre_commit/languages/rust.py] 1 from __future__ import unicode_literals 2 3 import contextlib 4 import os.path 5 6 import toml 7 8 import pre_commit.constants as C 9 from pre_commit.envcontext import envcontext 10 from pre_commit.envcontext import Var 11 from pre_commit.languages import helpers 12 from pre_commit.util import clean_path_on_failure 13 from pre_commit.util import cmd_output 14 15 16 ENVIRONMENT_DIR = 'rustenv' 17 get_default_version = helpers.basic_get_default_version 18 healthy = helpers.basic_healthy 19 20 21 def get_env_patch(target_dir): 22 return ( 23 ( 24 'PATH', 25 (os.path.join(target_dir, 'bin'), os.pathsep, Var('PATH')), 26 ), 27 ) 28 29 30 @contextlib.contextmanager 31 def in_env(prefix): 32 target_dir = prefix.path( 33 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT), 34 ) 35 with envcontext(get_env_patch(target_dir)): 36 yield 37 38 39 def _add_dependencies(cargo_toml_path, additional_dependencies): 40 with open(cargo_toml_path, 'r+') as f: 41 cargo_toml = toml.load(f) 42 cargo_toml.setdefault('dependencies', {}) 43 for dep in additional_dependencies: 44 name, _, spec = dep.partition(':') 45 cargo_toml['dependencies'][name] = spec or '*' 46 f.seek(0) 47 toml.dump(cargo_toml, f) 48 f.truncate() 49 50 51 def install_environment(prefix, version, additional_dependencies): 52 helpers.assert_version_default('rust', version) 53 directory = prefix.path( 54 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT), 55 ) 56 57 # There are two cases where we might want to specify more dependencies: 58 # as dependencies for the library being built, and as binary packages 59 # to be `cargo install`'d. 60 # 61 # Unlike e.g. Python, if we just `cargo install` a library, it won't be 62 # used for compilation. And if we add a crate providing a binary to the 63 # `Cargo.toml`, the binary won't be built. 64 # 65 # Because of this, we allow specifying "cli" dependencies by prefixing 66 # with 'cli:'. 67 cli_deps = { 68 dep for dep in additional_dependencies if dep.startswith('cli:') 69 } 70 lib_deps = set(additional_dependencies) - cli_deps 71 72 if len(lib_deps) > 0: 73 _add_dependencies(prefix.path('Cargo.toml'), lib_deps) 74 75 with clean_path_on_failure(directory): 76 packages_to_install = {()} 77 for cli_dep in cli_deps: 78 cli_dep = cli_dep[len('cli:'):] 79 package, _, version = cli_dep.partition(':') 80 if version != '': 81 packages_to_install.add((package, '--version', version)) 82 else: 83 packages_to_install.add((package,)) 84 85 for package in packages_to_install: 86 cmd_output( 87 'cargo', 'install', '--bins', '--root', directory, *package, 88 cwd=prefix.prefix_dir 89 ) 90 91 92 def run_hook(hook, file_args): 93 with in_env(hook.prefix): 94 return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args) 95 [end of pre_commit/languages/rust.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/languages/rust.py b/pre_commit/languages/rust.py --- a/pre_commit/languages/rust.py +++ b/pre_commit/languages/rust.py @@ -73,7 +73,7 @@ _add_dependencies(prefix.path('Cargo.toml'), lib_deps) with clean_path_on_failure(directory): - packages_to_install = {()} + packages_to_install = {('--path', '.')} for cli_dep in cli_deps: cli_dep = cli_dep[len('cli:'):] package, _, version = cli_dep.partition(':')
{"golden_diff": "diff --git a/pre_commit/languages/rust.py b/pre_commit/languages/rust.py\n--- a/pre_commit/languages/rust.py\n+++ b/pre_commit/languages/rust.py\n@@ -73,7 +73,7 @@\n _add_dependencies(prefix.path('Cargo.toml'), lib_deps)\n \n with clean_path_on_failure(directory):\n- packages_to_install = {()}\n+ packages_to_install = {('--path', '.')}\n for cli_dep in cli_deps:\n cli_dep = cli_dep[len('cli:'):]\n package, _, version = cli_dep.partition(':')\n", "issue": "rust hook requires `--path` attribute\nCargo has changed how packages get installed and requires an extra `--path <destination>` attribute.\r\n\r\nSymptom:\r\n```\r\n[INFO] Initializing environment for https://github.com/nix-community/nixpkgs-fmt.\r\n[INFO] Installing environment for https://github.com/nix-community/nixpkgs-fmt.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nAn unexpected error has occurred: CalledProcessError: Command: ('/nix/store/fcc3x8zwq1c0667xjs7bkn6ay8j4fdpz-rust-1.38.0-nightly-2019-08-07-ad7c55e1f/bin/cargo', 'install', '--bins', '--root', '/home/zimbatm/.cache/pre-commit/repoeft6xm6t/rustenv-default')\r\nReturn code: 101\r\nExpected return code: 0\r\nOutput: (none)\r\nErrors: \r\n error: Using `cargo install` to install the binaries for the package in current working directory is no longer supported, use `cargo install --path .` instead. Use `cargo build` if you want to simply build the package.\r\n```\r\n\r\nI guess the fix should be done where here: https://github.com/pre-commit/pre-commit/blob/9c6a1d80d6b94c86a1785a40a51389e83accac3e/pre_commit/languages/rust.py#L87\r\n\r\nDo we want to make pre-commit compatible with multiple versions of cargo or just the latest one?\r\n\r\n/cc @asottile @chriskuehl \n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os.path\n\nimport toml\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'rustenv'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(target_dir):\n return (\n (\n 'PATH',\n (os.path.join(target_dir, 'bin'), os.pathsep, Var('PATH')),\n ),\n )\n\n\[email protected]\ndef in_env(prefix):\n target_dir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n with envcontext(get_env_patch(target_dir)):\n yield\n\n\ndef _add_dependencies(cargo_toml_path, additional_dependencies):\n with open(cargo_toml_path, 'r+') as f:\n cargo_toml = toml.load(f)\n cargo_toml.setdefault('dependencies', {})\n for dep in additional_dependencies:\n name, _, spec = dep.partition(':')\n cargo_toml['dependencies'][name] = spec or '*'\n f.seek(0)\n toml.dump(cargo_toml, f)\n f.truncate()\n\n\ndef install_environment(prefix, version, additional_dependencies):\n helpers.assert_version_default('rust', version)\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n # There are two cases where we might want to specify more dependencies:\n # as dependencies for the library being built, and as binary packages\n # to be `cargo install`'d.\n #\n # Unlike e.g. Python, if we just `cargo install` a library, it won't be\n # used for compilation. And if we add a crate providing a binary to the\n # `Cargo.toml`, the binary won't be built.\n #\n # Because of this, we allow specifying \"cli\" dependencies by prefixing\n # with 'cli:'.\n cli_deps = {\n dep for dep in additional_dependencies if dep.startswith('cli:')\n }\n lib_deps = set(additional_dependencies) - cli_deps\n\n if len(lib_deps) > 0:\n _add_dependencies(prefix.path('Cargo.toml'), lib_deps)\n\n with clean_path_on_failure(directory):\n packages_to_install = {()}\n for cli_dep in cli_deps:\n cli_dep = cli_dep[len('cli:'):]\n package, _, version = cli_dep.partition(':')\n if version != '':\n packages_to_install.add((package, '--version', version))\n else:\n packages_to_install.add((package,))\n\n for package in packages_to_install:\n cmd_output(\n 'cargo', 'install', '--bins', '--root', directory, *package,\n cwd=prefix.prefix_dir\n )\n\n\ndef run_hook(hook, file_args):\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/rust.py"}]}
1,768
126
gh_patches_debug_25965
rasdani/github-patches
git_diff
facebookresearch__fairseq-4808
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [fairseq] Guard call to `shape_as_tensor` with `is_in_onnx_export()` This is a no-op in eager and in ONNX export, but it's better for other tracers if this is preserved as shapes directly instead of converted to a tensor. There is a little annoying code duplication with `torch.jit.is_scripting()`, which is unforunately necessary because we didn't implement compile-time short circuiting correctly in TorchScript lol. </issue> <code> [start of fairseq/modules/sinusoidal_positional_embedding.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. 2 # 3 # This source code is licensed under the MIT license found in the 4 # LICENSE file in the root directory of this source tree. 5 6 import math 7 from typing import Any, Optional 8 9 import torch 10 import torch.onnx.operators 11 from fairseq import utils 12 from torch import Tensor, nn 13 14 15 class SinusoidalPositionalEmbedding(nn.Module): 16 """This module produces sinusoidal positional embeddings of any length. 17 18 Padding symbols are ignored. 19 """ 20 21 def __init__(self, embedding_dim, padding_idx, init_size=1024): 22 super().__init__() 23 self.embedding_dim = embedding_dim 24 self.padding_idx = padding_idx if padding_idx is not None else 0 25 self.weights = SinusoidalPositionalEmbedding.get_embedding( 26 init_size, embedding_dim, padding_idx 27 ) 28 self.onnx_trace = False 29 self.register_buffer("_float_tensor", torch.FloatTensor(1)) 30 self.max_positions = int(1e5) 31 32 def prepare_for_onnx_export_(self): 33 self.onnx_trace = True 34 35 @staticmethod 36 def get_embedding( 37 num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None 38 ): 39 """Build sinusoidal embeddings. 40 41 This matches the implementation in tensor2tensor, but differs slightly 42 from the description in Section 3.5 of "Attention Is All You Need". 43 """ 44 half_dim = embedding_dim // 2 45 emb = math.log(10000) / (half_dim - 1) 46 emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) 47 emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze( 48 1 49 ) * emb.unsqueeze(0) 50 emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view( 51 num_embeddings, -1 52 ) 53 if embedding_dim % 2 == 1: 54 # zero pad 55 emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) 56 if padding_idx is not None: 57 emb[padding_idx, :] = 0 58 return emb 59 60 def forward( 61 self, 62 input, 63 incremental_state: Optional[Any] = None, 64 timestep: Optional[Tensor] = None, 65 positions: Optional[Any] = None, 66 ): 67 """Input is expected to be of size [bsz x seqlen].""" 68 if torch.jit.is_scripting(): 69 bspair = torch.onnx.operators.shape_as_tensor(input) 70 elif torch.onnx.is_in_onnx_export(): 71 bspair = torch.onnx.operators.shape_as_tensor(input) 72 else: 73 bspair = input.size() 74 bsz, seq_len = bspair[0], bspair[1] 75 max_pos = self.padding_idx + 1 + seq_len 76 if self.weights is None or max_pos > self.weights.size(0): 77 # recompute/expand embeddings if needed 78 self.weights = SinusoidalPositionalEmbedding.get_embedding( 79 max_pos, self.embedding_dim, self.padding_idx 80 ) 81 self.weights = self.weights.to(self._float_tensor) 82 83 if incremental_state is not None: 84 # positions is the same for every token when decoding a single step 85 pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len 86 if self.onnx_trace: 87 return ( 88 self.weights.index_select(index=self.padding_idx + pos, dim=0) 89 .unsqueeze(1) 90 .repeat(bsz, 1, 1) 91 ) 92 return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) 93 94 positions = utils.make_positions( 95 input, self.padding_idx, onnx_trace=self.onnx_trace 96 ) 97 if self.onnx_trace: 98 flat_embeddings = self.weights.detach().index_select(0, positions.view(-1)) 99 embedding_shape = torch.cat( 100 (bsz, seq_len, torch.tensor([-1], dtype=torch.long)) 101 ) 102 embeddings = torch.onnx.operators.reshape_from_tensor_shape( 103 flat_embeddings, embedding_shape 104 ) 105 return embeddings 106 return ( 107 self.weights.index_select(0, positions.view(-1)) 108 .view(bsz, seq_len, -1) 109 .detach() 110 ) 111 [end of fairseq/modules/sinusoidal_positional_embedding.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/fairseq/modules/sinusoidal_positional_embedding.py b/fairseq/modules/sinusoidal_positional_embedding.py --- a/fairseq/modules/sinusoidal_positional_embedding.py +++ b/fairseq/modules/sinusoidal_positional_embedding.py @@ -65,12 +65,7 @@ positions: Optional[Any] = None, ): """Input is expected to be of size [bsz x seqlen].""" - if torch.jit.is_scripting(): - bspair = torch.onnx.operators.shape_as_tensor(input) - elif torch.onnx.is_in_onnx_export(): - bspair = torch.onnx.operators.shape_as_tensor(input) - else: - bspair = input.size() + bspair = torch.onnx.operators.shape_as_tensor(input) bsz, seq_len = bspair[0], bspair[1] max_pos = self.padding_idx + 1 + seq_len if self.weights is None or max_pos > self.weights.size(0): @@ -97,7 +92,7 @@ if self.onnx_trace: flat_embeddings = self.weights.detach().index_select(0, positions.view(-1)) embedding_shape = torch.cat( - (bsz, seq_len, torch.tensor([-1], dtype=torch.long)) + (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long)) ) embeddings = torch.onnx.operators.reshape_from_tensor_shape( flat_embeddings, embedding_shape
{"golden_diff": "diff --git a/fairseq/modules/sinusoidal_positional_embedding.py b/fairseq/modules/sinusoidal_positional_embedding.py\n--- a/fairseq/modules/sinusoidal_positional_embedding.py\n+++ b/fairseq/modules/sinusoidal_positional_embedding.py\n@@ -65,12 +65,7 @@\n positions: Optional[Any] = None,\n ):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n- if torch.jit.is_scripting():\n- bspair = torch.onnx.operators.shape_as_tensor(input)\n- elif torch.onnx.is_in_onnx_export():\n- bspair = torch.onnx.operators.shape_as_tensor(input)\n- else:\n- bspair = input.size()\n+ bspair = torch.onnx.operators.shape_as_tensor(input)\n bsz, seq_len = bspair[0], bspair[1]\n max_pos = self.padding_idx + 1 + seq_len\n if self.weights is None or max_pos > self.weights.size(0):\n@@ -97,7 +92,7 @@\n if self.onnx_trace:\n flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))\n embedding_shape = torch.cat(\n- (bsz, seq_len, torch.tensor([-1], dtype=torch.long))\n+ (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))\n )\n embeddings = torch.onnx.operators.reshape_from_tensor_shape(\n flat_embeddings, embedding_shape\n", "issue": "[fairseq] Guard call to `shape_as_tensor` with `is_in_onnx_export()`\nThis is a no-op in eager and in ONNX export, but it's better for other\ntracers if this is preserved as shapes directly instead of converted to\na tensor.\n\nThere is a little annoying code duplication with\n`torch.jit.is_scripting()`, which is unforunately necessary because we\ndidn't implement compile-time short circuiting correctly in TorchScript\nlol.\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom typing import Any, Optional\n\nimport torch\nimport torch.onnx.operators\nfrom fairseq import utils\nfrom torch import Tensor, nn\n\n\nclass SinusoidalPositionalEmbedding(nn.Module):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\n\n Padding symbols are ignored.\n \"\"\"\n\n def __init__(self, embedding_dim, padding_idx, init_size=1024):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.padding_idx = padding_idx if padding_idx is not None else 0\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n init_size, embedding_dim, padding_idx\n )\n self.onnx_trace = False\n self.register_buffer(\"_float_tensor\", torch.FloatTensor(1))\n self.max_positions = int(1e5)\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n @staticmethod\n def get_embedding(\n num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None\n ):\n \"\"\"Build sinusoidal embeddings.\n\n This matches the implementation in tensor2tensor, but differs slightly\n from the description in Section 3.5 of \"Attention Is All You Need\".\n \"\"\"\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)\n emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(\n 1\n ) * emb.unsqueeze(0)\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(\n num_embeddings, -1\n )\n if embedding_dim % 2 == 1:\n # zero pad\n emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)\n if padding_idx is not None:\n emb[padding_idx, :] = 0\n return emb\n\n def forward(\n self,\n input,\n incremental_state: Optional[Any] = None,\n timestep: Optional[Tensor] = None,\n positions: Optional[Any] = None,\n ):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n if torch.jit.is_scripting():\n bspair = torch.onnx.operators.shape_as_tensor(input)\n elif torch.onnx.is_in_onnx_export():\n bspair = torch.onnx.operators.shape_as_tensor(input)\n else:\n bspair = input.size()\n bsz, seq_len = bspair[0], bspair[1]\n max_pos = self.padding_idx + 1 + seq_len\n if self.weights is None or max_pos > self.weights.size(0):\n # recompute/expand embeddings if needed\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n max_pos, self.embedding_dim, self.padding_idx\n )\n self.weights = self.weights.to(self._float_tensor)\n\n if incremental_state is not None:\n # positions is the same for every token when decoding a single step\n pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len\n if self.onnx_trace:\n return (\n self.weights.index_select(index=self.padding_idx + pos, dim=0)\n .unsqueeze(1)\n .repeat(bsz, 1, 1)\n )\n return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)\n\n positions = utils.make_positions(\n input, self.padding_idx, onnx_trace=self.onnx_trace\n )\n if self.onnx_trace:\n flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))\n embedding_shape = torch.cat(\n (bsz, seq_len, torch.tensor([-1], dtype=torch.long))\n )\n embeddings = torch.onnx.operators.reshape_from_tensor_shape(\n flat_embeddings, embedding_shape\n )\n return embeddings\n return (\n self.weights.index_select(0, positions.view(-1))\n .view(bsz, seq_len, -1)\n .detach()\n )\n", "path": "fairseq/modules/sinusoidal_positional_embedding.py"}]}
1,819
339
gh_patches_debug_1148
rasdani/github-patches
git_diff
PaddlePaddle__PaddleSpeech-19
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix some problems in the ctc beam search decoder - [x] Make character's index in FST starting from one, otherwise wrong decoding results would be produced especially when space is the first character in the vocabulary; - [x] Add version check in the setup script; - [x] Remove unused code. </issue> <code> [start of decoders/swig/setup.py] 1 """Script to build and install decoder package.""" 2 from __future__ import absolute_import 3 from __future__ import division 4 from __future__ import print_function 5 6 from setuptools import setup, Extension, distutils 7 import glob 8 import platform 9 import os, sys 10 import multiprocessing.pool 11 import argparse 12 13 parser = argparse.ArgumentParser(description=__doc__) 14 parser.add_argument( 15 "--num_processes", 16 default=1, 17 type=int, 18 help="Number of cpu processes to build package. (default: %(default)d)") 19 args = parser.parse_known_args() 20 21 # reconstruct sys.argv to pass to setup below 22 sys.argv = [sys.argv[0]] + args[1] 23 24 25 # monkey-patch for parallel compilation 26 # See: https://stackoverflow.com/a/13176803 27 def parallelCCompile(self, 28 sources, 29 output_dir=None, 30 macros=None, 31 include_dirs=None, 32 debug=0, 33 extra_preargs=None, 34 extra_postargs=None, 35 depends=None): 36 # those lines are copied from distutils.ccompiler.CCompiler directly 37 macros, objects, extra_postargs, pp_opts, build = self._setup_compile( 38 output_dir, macros, include_dirs, sources, depends, extra_postargs) 39 cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) 40 41 # parallel code 42 def _single_compile(obj): 43 try: 44 src, ext = build[obj] 45 except KeyError: 46 return 47 self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) 48 49 # convert to list, imap is evaluated on-demand 50 thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes) 51 list(thread_pool.imap(_single_compile, objects)) 52 return objects 53 54 55 def compile_test(header, library): 56 dummy_path = os.path.join(os.path.dirname(__file__), "dummy") 57 command = "bash -c \"g++ -include " + header \ 58 + " -l" + library + " -x c++ - <<<'int main() {}' -o " \ 59 + dummy_path + " >/dev/null 2>/dev/null && rm " \ 60 + dummy_path + " 2>/dev/null\"" 61 return os.system(command) == 0 62 63 64 # hack compile to support parallel compiling 65 distutils.ccompiler.CCompiler.compile = parallelCCompile 66 67 FILES = glob.glob('kenlm/util/*.cc') \ 68 + glob.glob('kenlm/lm/*.cc') \ 69 + glob.glob('kenlm/util/double-conversion/*.cc') 70 71 FILES += glob.glob('openfst-1.6.3/src/lib/*.cc') 72 73 FILES = [ 74 fn for fn in FILES 75 if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith( 76 'unittest.cc')) 77 ] 78 79 LIBS = ['stdc++'] 80 if platform.system() != 'Darwin': 81 LIBS.append('rt') 82 83 ARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11'] 84 85 if compile_test('zlib.h', 'z'): 86 ARGS.append('-DHAVE_ZLIB') 87 LIBS.append('z') 88 89 if compile_test('bzlib.h', 'bz2'): 90 ARGS.append('-DHAVE_BZLIB') 91 LIBS.append('bz2') 92 93 if compile_test('lzma.h', 'lzma'): 94 ARGS.append('-DHAVE_XZLIB') 95 LIBS.append('lzma') 96 97 os.system('swig -python -c++ ./decoders.i') 98 99 decoders_module = [ 100 Extension( 101 name='_swig_decoders', 102 sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'), 103 language='c++', 104 include_dirs=[ 105 '.', 106 'kenlm', 107 'openfst-1.6.3/src/include', 108 'ThreadPool', 109 ], 110 libraries=LIBS, 111 extra_compile_args=ARGS) 112 ] 113 114 setup( 115 name='swig_decoders', 116 version='1.0', 117 description="""CTC decoders""", 118 ext_modules=decoders_module, 119 py_modules=['swig_decoders'], ) 120 [end of decoders/swig/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/decoders/swig/setup.py b/decoders/swig/setup.py --- a/decoders/swig/setup.py +++ b/decoders/swig/setup.py @@ -113,7 +113,7 @@ setup( name='swig_decoders', - version='1.0', + version='1.1', description="""CTC decoders""", ext_modules=decoders_module, py_modules=['swig_decoders'], )
{"golden_diff": "diff --git a/decoders/swig/setup.py b/decoders/swig/setup.py\n--- a/decoders/swig/setup.py\n+++ b/decoders/swig/setup.py\n@@ -113,7 +113,7 @@\n \n setup(\n name='swig_decoders',\n- version='1.0',\n+ version='1.1',\n description=\"\"\"CTC decoders\"\"\",\n ext_modules=decoders_module,\n py_modules=['swig_decoders'], )\n", "issue": "Fix some problems in the ctc beam search decoder\n- [x] Make character's index in FST starting from one, otherwise wrong decoding results would be produced especially when space is the first character in the vocabulary;\r\n- [x] Add version check in the setup script;\r\n- [x] Remove unused code. \r\n\n", "before_files": [{"content": "\"\"\"Script to build and install decoder package.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom setuptools import setup, Extension, distutils\nimport glob\nimport platform\nimport os, sys\nimport multiprocessing.pool\nimport argparse\n\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument(\n \"--num_processes\",\n default=1,\n type=int,\n help=\"Number of cpu processes to build package. (default: %(default)d)\")\nargs = parser.parse_known_args()\n\n# reconstruct sys.argv to pass to setup below\nsys.argv = [sys.argv[0]] + args[1]\n\n\n# monkey-patch for parallel compilation\n# See: https://stackoverflow.com/a/13176803\ndef parallelCCompile(self,\n sources,\n output_dir=None,\n macros=None,\n include_dirs=None,\n debug=0,\n extra_preargs=None,\n extra_postargs=None,\n depends=None):\n # those lines are copied from distutils.ccompiler.CCompiler directly\n macros, objects, extra_postargs, pp_opts, build = self._setup_compile(\n output_dir, macros, include_dirs, sources, depends, extra_postargs)\n cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)\n\n # parallel code\n def _single_compile(obj):\n try:\n src, ext = build[obj]\n except KeyError:\n return\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\n\n # convert to list, imap is evaluated on-demand\n thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)\n list(thread_pool.imap(_single_compile, objects))\n return objects\n\n\ndef compile_test(header, library):\n dummy_path = os.path.join(os.path.dirname(__file__), \"dummy\")\n command = \"bash -c \\\"g++ -include \" + header \\\n + \" -l\" + library + \" -x c++ - <<<'int main() {}' -o \" \\\n + dummy_path + \" >/dev/null 2>/dev/null && rm \" \\\n + dummy_path + \" 2>/dev/null\\\"\"\n return os.system(command) == 0\n\n\n# hack compile to support parallel compiling\ndistutils.ccompiler.CCompiler.compile = parallelCCompile\n\nFILES = glob.glob('kenlm/util/*.cc') \\\n + glob.glob('kenlm/lm/*.cc') \\\n + glob.glob('kenlm/util/double-conversion/*.cc')\n\nFILES += glob.glob('openfst-1.6.3/src/lib/*.cc')\n\nFILES = [\n fn for fn in FILES\n if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(\n 'unittest.cc'))\n]\n\nLIBS = ['stdc++']\nif platform.system() != 'Darwin':\n LIBS.append('rt')\n\nARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11']\n\nif compile_test('zlib.h', 'z'):\n ARGS.append('-DHAVE_ZLIB')\n LIBS.append('z')\n\nif compile_test('bzlib.h', 'bz2'):\n ARGS.append('-DHAVE_BZLIB')\n LIBS.append('bz2')\n\nif compile_test('lzma.h', 'lzma'):\n ARGS.append('-DHAVE_XZLIB')\n LIBS.append('lzma')\n\nos.system('swig -python -c++ ./decoders.i')\n\ndecoders_module = [\n Extension(\n name='_swig_decoders',\n sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),\n language='c++',\n include_dirs=[\n '.',\n 'kenlm',\n 'openfst-1.6.3/src/include',\n 'ThreadPool',\n ],\n libraries=LIBS,\n extra_compile_args=ARGS)\n]\n\nsetup(\n name='swig_decoders',\n version='1.0',\n description=\"\"\"CTC decoders\"\"\",\n ext_modules=decoders_module,\n py_modules=['swig_decoders'], )\n", "path": "decoders/swig/setup.py"}]}
1,747
108
gh_patches_debug_30084
rasdani/github-patches
git_diff
cobbler__cobbler-3264
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Task logs don't end ### Describe the bug Task logs contain the normal server logs, even though the task has stopped. This leads to duplicated logs in all created files. ### Steps to reproduce 1. `cobbler mkloaders` (or any other task) 2. Repeat step 1 3. See logs being appended to completed tasks ### Expected behavior The files should only contain the logs for the task. ### Cobbler version Commit: 2a5c7ce9c1533bbf6f6b9050198d98aca45a06ae ````paste below Cobbler 3.4.0 source: ?, ? build time: Fri Oct 14 14:17:09 2022 ```` ### Operating system ``` e62e937a2de9:/var/log/cobbler/tasks # cat /etc/os-release NAME="openSUSE Leap" VERSION="15.3" ID="opensuse-leap" ID_LIKE="suse opensuse" VERSION_ID="15.3" PRETTY_NAME="openSUSE Leap 15.3" ANSI_COLOR="0;32" CPE_NAME="cpe:/o:opensuse:leap:15.3" BUG_REPORT_URL="https://bugs.opensuse.org" HOME_URL="https://www.opensuse.org/" ``` ### Cobbler log Not relevant ### Screenshots Not relevant ### Additional information Found by accident during another debugging session </issue> <code> [start of cobbler/utils/thread.py] 1 """ 2 This module is responsible for managing the custom common threading logic Cobbler has. 3 """ 4 5 import logging 6 import pathlib 7 from threading import Thread 8 from typing import Callable 9 10 from cobbler import enums 11 from cobbler import utils 12 13 14 class CobblerThread(Thread): 15 """ 16 This is a custom thread that has a custom logger as well as logic to execute Cobbler triggers. 17 """ 18 19 def __init__( 20 self, 21 event_id: str, 22 remote, 23 options: dict, 24 task_name: str, 25 api, 26 run: Callable, 27 on_done: Callable = None, 28 ): 29 """ 30 This constructor creates a Cobbler thread which then may be run by calling ``run()``. 31 32 :param event_id: The event-id which is associated with this thread. Also used as thread name 33 :param remote: The Cobbler remote object to execute actions with. 34 :param options: Additional options which can be passed into the Thread. 35 :param task_name: The high level task name which is used to trigger pre- and post-task triggers 36 :param api: The Cobbler api object to resolve information with. 37 :param run: The callable that is going to be executed with this thread. 38 :param on_done: An optional callable that is going to be executed after ``run`` but before the triggers. 39 """ 40 super().__init__(name=event_id) 41 self.event_id = event_id 42 self.remote = remote 43 self.logger = logging.getLogger() 44 self.__setup_logger() 45 self._run = run 46 self.on_done = on_done 47 if options is None: 48 options = {} 49 self.options = options 50 self.task_name = task_name 51 self.api = api 52 53 def __setup_logger(self): 54 """ 55 Utility function that will set up the Python logger for the tasks in a special directory. 56 """ 57 filename = pathlib.Path("/var/log/cobbler/tasks") / f"{self.event_id}.log" 58 task_log_handler = logging.FileHandler(str(filename), encoding="utf-8") 59 task_log_formatter = logging.Formatter( 60 "[%(threadName)s] %(asctime)s - %(levelname)s | %(message)s" 61 ) 62 task_log_handler.setFormatter(task_log_formatter) 63 self.logger.setLevel(logging.INFO) 64 self.logger.addHandler(task_log_handler) 65 66 def _set_task_state(self, new_state: enums.EventStatus): 67 """ 68 Set the state of the task. (For internal use only) 69 70 :param new_state: The new state of the task. 71 """ 72 if not isinstance(new_state, enums.EventStatus): 73 raise TypeError('"new_state" needs to be of type enums.EventStatus!') 74 if self.event_id not in self.remote.events: 75 raise ValueError('"event_id" not existing!') 76 self.remote.events[self.event_id].state = new_state 77 # clear the list of who has read it 78 self.remote.events[self.event_id].read_by_who = [] 79 if new_state == enums.EventStatus.COMPLETE: 80 self.logger.info("### TASK COMPLETE ###") 81 elif new_state == enums.EventStatus.FAILED: 82 self.logger.error("### TASK FAILED ###") 83 84 def run(self): 85 """ 86 Run the thread. 87 88 :return: The return code of the action. This may a boolean or a Linux return code. 89 """ 90 self.logger.info("start_task(%s); event_id(%s)", self.task_name, self.event_id) 91 try: 92 if utils.run_triggers( 93 api=self.api, 94 globber=f"/var/lib/cobbler/triggers/task/{self.task_name}/pre/*", 95 additional=self.options, 96 ): 97 self._set_task_state(enums.EventStatus.FAILED) 98 return False 99 rc = self._run(self) 100 if rc is not None and not rc: 101 self._set_task_state(enums.EventStatus.FAILED) 102 else: 103 self._set_task_state(enums.EventStatus.COMPLETE) 104 if self.on_done is not None: 105 self.on_done() 106 utils.run_triggers( 107 api=self.api, 108 globber=f"/var/lib/cobbler/triggers/task/{self.task_name}/post/*", 109 additional=self.options, 110 ) 111 return rc 112 except Exception: 113 utils.log_exc() 114 self._set_task_state(enums.EventStatus.FAILED) 115 return False 116 [end of cobbler/utils/thread.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cobbler/utils/thread.py b/cobbler/utils/thread.py --- a/cobbler/utils/thread.py +++ b/cobbler/utils/thread.py @@ -41,6 +41,7 @@ self.event_id = event_id self.remote = remote self.logger = logging.getLogger() + self.__task_log_handler = None self.__setup_logger() self._run = run self.on_done = on_done @@ -55,13 +56,13 @@ Utility function that will set up the Python logger for the tasks in a special directory. """ filename = pathlib.Path("/var/log/cobbler/tasks") / f"{self.event_id}.log" - task_log_handler = logging.FileHandler(str(filename), encoding="utf-8") + self.__task_log_handler = logging.FileHandler(str(filename), encoding="utf-8") task_log_formatter = logging.Formatter( "[%(threadName)s] %(asctime)s - %(levelname)s | %(message)s" ) - task_log_handler.setFormatter(task_log_formatter) + self.__task_log_handler.setFormatter(task_log_formatter) self.logger.setLevel(logging.INFO) - self.logger.addHandler(task_log_handler) + self.logger.addHandler(self.__task_log_handler) def _set_task_state(self, new_state: enums.EventStatus): """ @@ -113,3 +114,5 @@ utils.log_exc() self._set_task_state(enums.EventStatus.FAILED) return False + finally: + self.logger.removeHandler(self.__task_log_handler)
{"golden_diff": "diff --git a/cobbler/utils/thread.py b/cobbler/utils/thread.py\n--- a/cobbler/utils/thread.py\n+++ b/cobbler/utils/thread.py\n@@ -41,6 +41,7 @@\n self.event_id = event_id\n self.remote = remote\n self.logger = logging.getLogger()\n+ self.__task_log_handler = None\n self.__setup_logger()\n self._run = run\n self.on_done = on_done\n@@ -55,13 +56,13 @@\n Utility function that will set up the Python logger for the tasks in a special directory.\n \"\"\"\n filename = pathlib.Path(\"/var/log/cobbler/tasks\") / f\"{self.event_id}.log\"\n- task_log_handler = logging.FileHandler(str(filename), encoding=\"utf-8\")\n+ self.__task_log_handler = logging.FileHandler(str(filename), encoding=\"utf-8\")\n task_log_formatter = logging.Formatter(\n \"[%(threadName)s] %(asctime)s - %(levelname)s | %(message)s\"\n )\n- task_log_handler.setFormatter(task_log_formatter)\n+ self.__task_log_handler.setFormatter(task_log_formatter)\n self.logger.setLevel(logging.INFO)\n- self.logger.addHandler(task_log_handler)\n+ self.logger.addHandler(self.__task_log_handler)\n \n def _set_task_state(self, new_state: enums.EventStatus):\n \"\"\"\n@@ -113,3 +114,5 @@\n utils.log_exc()\n self._set_task_state(enums.EventStatus.FAILED)\n return False\n+ finally:\n+ self.logger.removeHandler(self.__task_log_handler)\n", "issue": "Task logs don't end\n### Describe the bug\r\n\r\nTask logs contain the normal server logs, even though the task has stopped. This leads to duplicated logs in all created files.\r\n\r\n### Steps to reproduce\r\n\r\n1. `cobbler mkloaders` (or any other task)\r\n2. Repeat step 1\r\n3. See logs being appended to completed tasks\r\n\r\n### Expected behavior\r\n\r\nThe files should only contain the logs for the task.\r\n\r\n### Cobbler version\r\n\r\nCommit: 2a5c7ce9c1533bbf6f6b9050198d98aca45a06ae\r\n\r\n````paste below\r\nCobbler 3.4.0\r\n source: ?, ?\r\n build time: Fri Oct 14 14:17:09 2022\r\n````\r\n\r\n### Operating system\r\n\r\n```\r\ne62e937a2de9:/var/log/cobbler/tasks # cat /etc/os-release \r\nNAME=\"openSUSE Leap\"\r\nVERSION=\"15.3\"\r\nID=\"opensuse-leap\"\r\nID_LIKE=\"suse opensuse\"\r\nVERSION_ID=\"15.3\"\r\nPRETTY_NAME=\"openSUSE Leap 15.3\"\r\nANSI_COLOR=\"0;32\"\r\nCPE_NAME=\"cpe:/o:opensuse:leap:15.3\"\r\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\r\nHOME_URL=\"https://www.opensuse.org/\"\r\n```\r\n\r\n### Cobbler log\r\n\r\nNot relevant\r\n\r\n### Screenshots\r\n\r\nNot relevant\r\n\r\n### Additional information\r\n\r\nFound by accident during another debugging session\r\n\n", "before_files": [{"content": "\"\"\"\nThis module is responsible for managing the custom common threading logic Cobbler has.\n\"\"\"\n\nimport logging\nimport pathlib\nfrom threading import Thread\nfrom typing import Callable\n\nfrom cobbler import enums\nfrom cobbler import utils\n\n\nclass CobblerThread(Thread):\n \"\"\"\n This is a custom thread that has a custom logger as well as logic to execute Cobbler triggers.\n \"\"\"\n\n def __init__(\n self,\n event_id: str,\n remote,\n options: dict,\n task_name: str,\n api,\n run: Callable,\n on_done: Callable = None,\n ):\n \"\"\"\n This constructor creates a Cobbler thread which then may be run by calling ``run()``.\n\n :param event_id: The event-id which is associated with this thread. Also used as thread name\n :param remote: The Cobbler remote object to execute actions with.\n :param options: Additional options which can be passed into the Thread.\n :param task_name: The high level task name which is used to trigger pre- and post-task triggers\n :param api: The Cobbler api object to resolve information with.\n :param run: The callable that is going to be executed with this thread.\n :param on_done: An optional callable that is going to be executed after ``run`` but before the triggers.\n \"\"\"\n super().__init__(name=event_id)\n self.event_id = event_id\n self.remote = remote\n self.logger = logging.getLogger()\n self.__setup_logger()\n self._run = run\n self.on_done = on_done\n if options is None:\n options = {}\n self.options = options\n self.task_name = task_name\n self.api = api\n\n def __setup_logger(self):\n \"\"\"\n Utility function that will set up the Python logger for the tasks in a special directory.\n \"\"\"\n filename = pathlib.Path(\"/var/log/cobbler/tasks\") / f\"{self.event_id}.log\"\n task_log_handler = logging.FileHandler(str(filename), encoding=\"utf-8\")\n task_log_formatter = logging.Formatter(\n \"[%(threadName)s] %(asctime)s - %(levelname)s | %(message)s\"\n )\n task_log_handler.setFormatter(task_log_formatter)\n self.logger.setLevel(logging.INFO)\n self.logger.addHandler(task_log_handler)\n\n def _set_task_state(self, new_state: enums.EventStatus):\n \"\"\"\n Set the state of the task. (For internal use only)\n\n :param new_state: The new state of the task.\n \"\"\"\n if not isinstance(new_state, enums.EventStatus):\n raise TypeError('\"new_state\" needs to be of type enums.EventStatus!')\n if self.event_id not in self.remote.events:\n raise ValueError('\"event_id\" not existing!')\n self.remote.events[self.event_id].state = new_state\n # clear the list of who has read it\n self.remote.events[self.event_id].read_by_who = []\n if new_state == enums.EventStatus.COMPLETE:\n self.logger.info(\"### TASK COMPLETE ###\")\n elif new_state == enums.EventStatus.FAILED:\n self.logger.error(\"### TASK FAILED ###\")\n\n def run(self):\n \"\"\"\n Run the thread.\n\n :return: The return code of the action. This may a boolean or a Linux return code.\n \"\"\"\n self.logger.info(\"start_task(%s); event_id(%s)\", self.task_name, self.event_id)\n try:\n if utils.run_triggers(\n api=self.api,\n globber=f\"/var/lib/cobbler/triggers/task/{self.task_name}/pre/*\",\n additional=self.options,\n ):\n self._set_task_state(enums.EventStatus.FAILED)\n return False\n rc = self._run(self)\n if rc is not None and not rc:\n self._set_task_state(enums.EventStatus.FAILED)\n else:\n self._set_task_state(enums.EventStatus.COMPLETE)\n if self.on_done is not None:\n self.on_done()\n utils.run_triggers(\n api=self.api,\n globber=f\"/var/lib/cobbler/triggers/task/{self.task_name}/post/*\",\n additional=self.options,\n )\n return rc\n except Exception:\n utils.log_exc()\n self._set_task_state(enums.EventStatus.FAILED)\n return False\n", "path": "cobbler/utils/thread.py"}]}
2,041
346
gh_patches_debug_14660
rasdani/github-patches
git_diff
lhotse-speech__lhotse-103
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> A few questions... I came across a few confusions while I was reading the code in order to write an example. It would be helpful for me if they are clarified ( I may have missed something). https://github.com/lhotse-speech/lhotse/blob/master/lhotse/kaldi.py#L68 why `duration - start` rather than just `duration`? https://github.com/lhotse-speech/lhotse/blob/master/lhotse/audio.py#L178 why not `[n_sources, n_channels, n_samples]`? Thanks! </issue> <code> [start of lhotse/kaldi.py] 1 from collections import defaultdict 2 from pathlib import Path 3 from typing import Dict, Optional, Tuple 4 5 from lhotse.audio import AudioSource, Recording, RecordingSet 6 from lhotse.supervision import SupervisionSegment, SupervisionSet 7 from lhotse.utils import Pathlike 8 9 10 def load_kaldi_data_dir(path: Pathlike, sampling_rate: int) -> Tuple[RecordingSet, Optional[SupervisionSet]]: 11 """ 12 Load a Kaldi data directory and convert it to a Lhotse RecordingSet and SupervisionSet manifests. 13 For this to work, at least the wav.scp file must exist. 14 SupervisionSet is created only when a segments file exists. 15 All the other files (text, utt2spk, etc.) are optional, and some of them might not be handled yet. 16 In particular, feats.scp files are ignored. 17 """ 18 path = Path(path) 19 assert path.is_dir() 20 21 # must exist for RecordingSet 22 recordings = load_kaldi_text_mapping(path / 'wav.scp', must_exist=True) 23 24 durations = defaultdict(float) 25 reco2dur = path / 'reco2dur' 26 if not reco2dur.is_file(): 27 raise ValueError(f"No such file: '{reco2dur}' -- fix it by running: utils/data/get_reco2dur.sh <data-dir>") 28 with reco2dur.open() as f: 29 for line in f: 30 recording_id, dur = line.strip().split() 31 durations[recording_id] = float(dur) 32 33 audio_set = RecordingSet.from_recordings( 34 Recording( 35 id=recording_id, 36 sources=[ 37 AudioSource( 38 type='command' if path_or_cmd.endswith('|') else 'file', 39 channels=[0], 40 source=path_or_cmd[:-1] if path_or_cmd.endswith('|') else path_or_cmd 41 ) 42 ], 43 sampling_rate=sampling_rate, 44 num_samples=int(durations[recording_id] * sampling_rate), 45 duration=durations[recording_id] 46 ) 47 for recording_id, path_or_cmd in recordings.items() 48 ) 49 50 # must exist for SupervisionSet 51 segments = path / 'segments' 52 if not segments.is_file(): 53 return audio_set, None 54 55 with segments.open() as f: 56 supervision_segments = [l.strip().split() for l in f] 57 58 texts = load_kaldi_text_mapping(path / 'text') 59 speakers = load_kaldi_text_mapping(path / 'utt2spk') 60 genders = load_kaldi_text_mapping(path / 'spk2gender') 61 languages = load_kaldi_text_mapping(path / 'utt2lang') 62 63 supervision_set = SupervisionSet.from_segments( 64 SupervisionSegment( 65 id=segment_id, 66 recording_id=recording_id, 67 start=float(start), 68 duration=float(duration) - float(start), 69 channel=0, 70 text=texts[segment_id], 71 language=languages[segment_id], 72 speaker=speakers[segment_id], 73 gender=genders[speakers[segment_id]] 74 ) 75 for segment_id, recording_id, start, duration in supervision_segments 76 ) 77 78 return audio_set, supervision_set 79 80 81 def load_kaldi_text_mapping(path: Path, must_exist: bool = False) -> Dict[str, Optional[str]]: 82 """Load Kaldi files such as utt2spk, spk2gender, text, etc. as a dict.""" 83 mapping = defaultdict(lambda: None) 84 if path.is_file(): 85 with path.open() as f: 86 mapping = dict(line.strip().split(' ', maxsplit=1) for line in f) 87 elif must_exist: 88 raise ValueError(f"No such file: {path}") 89 return mapping 90 [end of lhotse/kaldi.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lhotse/kaldi.py b/lhotse/kaldi.py --- a/lhotse/kaldi.py +++ b/lhotse/kaldi.py @@ -65,14 +65,14 @@ id=segment_id, recording_id=recording_id, start=float(start), - duration=float(duration) - float(start), + duration=float(end) - float(start), channel=0, text=texts[segment_id], language=languages[segment_id], speaker=speakers[segment_id], gender=genders[speakers[segment_id]] ) - for segment_id, recording_id, start, duration in supervision_segments + for segment_id, recording_id, start, end in supervision_segments ) return audio_set, supervision_set
{"golden_diff": "diff --git a/lhotse/kaldi.py b/lhotse/kaldi.py\n--- a/lhotse/kaldi.py\n+++ b/lhotse/kaldi.py\n@@ -65,14 +65,14 @@\n id=segment_id,\n recording_id=recording_id,\n start=float(start),\n- duration=float(duration) - float(start),\n+ duration=float(end) - float(start),\n channel=0,\n text=texts[segment_id],\n language=languages[segment_id],\n speaker=speakers[segment_id],\n gender=genders[speakers[segment_id]]\n )\n- for segment_id, recording_id, start, duration in supervision_segments\n+ for segment_id, recording_id, start, end in supervision_segments\n )\n \n return audio_set, supervision_set\n", "issue": "A few questions...\nI came across a few confusions while I was reading the code in order to write an example. It would be helpful for me if they are clarified ( I may have missed something).\r\n\r\nhttps://github.com/lhotse-speech/lhotse/blob/master/lhotse/kaldi.py#L68\r\nwhy `duration - start` rather than just `duration`?\r\n\r\nhttps://github.com/lhotse-speech/lhotse/blob/master/lhotse/audio.py#L178\r\nwhy not `[n_sources, n_channels, n_samples]`?\r\n\r\nThanks!\n", "before_files": [{"content": "from collections import defaultdict\nfrom pathlib import Path\nfrom typing import Dict, Optional, Tuple\n\nfrom lhotse.audio import AudioSource, Recording, RecordingSet\nfrom lhotse.supervision import SupervisionSegment, SupervisionSet\nfrom lhotse.utils import Pathlike\n\n\ndef load_kaldi_data_dir(path: Pathlike, sampling_rate: int) -> Tuple[RecordingSet, Optional[SupervisionSet]]:\n \"\"\"\n Load a Kaldi data directory and convert it to a Lhotse RecordingSet and SupervisionSet manifests.\n For this to work, at least the wav.scp file must exist.\n SupervisionSet is created only when a segments file exists.\n All the other files (text, utt2spk, etc.) are optional, and some of them might not be handled yet.\n In particular, feats.scp files are ignored.\n \"\"\"\n path = Path(path)\n assert path.is_dir()\n\n # must exist for RecordingSet\n recordings = load_kaldi_text_mapping(path / 'wav.scp', must_exist=True)\n\n durations = defaultdict(float)\n reco2dur = path / 'reco2dur'\n if not reco2dur.is_file():\n raise ValueError(f\"No such file: '{reco2dur}' -- fix it by running: utils/data/get_reco2dur.sh <data-dir>\")\n with reco2dur.open() as f:\n for line in f:\n recording_id, dur = line.strip().split()\n durations[recording_id] = float(dur)\n\n audio_set = RecordingSet.from_recordings(\n Recording(\n id=recording_id,\n sources=[\n AudioSource(\n type='command' if path_or_cmd.endswith('|') else 'file',\n channels=[0],\n source=path_or_cmd[:-1] if path_or_cmd.endswith('|') else path_or_cmd\n )\n ],\n sampling_rate=sampling_rate,\n num_samples=int(durations[recording_id] * sampling_rate),\n duration=durations[recording_id]\n )\n for recording_id, path_or_cmd in recordings.items()\n )\n\n # must exist for SupervisionSet\n segments = path / 'segments'\n if not segments.is_file():\n return audio_set, None\n\n with segments.open() as f:\n supervision_segments = [l.strip().split() for l in f]\n\n texts = load_kaldi_text_mapping(path / 'text')\n speakers = load_kaldi_text_mapping(path / 'utt2spk')\n genders = load_kaldi_text_mapping(path / 'spk2gender')\n languages = load_kaldi_text_mapping(path / 'utt2lang')\n\n supervision_set = SupervisionSet.from_segments(\n SupervisionSegment(\n id=segment_id,\n recording_id=recording_id,\n start=float(start),\n duration=float(duration) - float(start),\n channel=0,\n text=texts[segment_id],\n language=languages[segment_id],\n speaker=speakers[segment_id],\n gender=genders[speakers[segment_id]]\n )\n for segment_id, recording_id, start, duration in supervision_segments\n )\n\n return audio_set, supervision_set\n\n\ndef load_kaldi_text_mapping(path: Path, must_exist: bool = False) -> Dict[str, Optional[str]]:\n \"\"\"Load Kaldi files such as utt2spk, spk2gender, text, etc. as a dict.\"\"\"\n mapping = defaultdict(lambda: None)\n if path.is_file():\n with path.open() as f:\n mapping = dict(line.strip().split(' ', maxsplit=1) for line in f)\n elif must_exist:\n raise ValueError(f\"No such file: {path}\")\n return mapping\n", "path": "lhotse/kaldi.py"}]}
1,621
176
gh_patches_debug_8194
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-6402
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve regex for semver in automation rules We have a very basic pattern, but on the semver faq they have a better one https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string This needs to be changed and with a comment to the link from where it came from. https://github.com/readthedocs/readthedocs.org/blob/ff0ad67991e599f79e9d956a3733785ccb78c505/readthedocs/builds/constants.py#L113-L113 </issue> <code> [start of readthedocs/builds/constants.py] 1 """Constants for the builds app.""" 2 3 from django.conf import settings 4 from django.utils.translation import ugettext_lazy as _ 5 6 7 BUILD_STATE_TRIGGERED = 'triggered' 8 BUILD_STATE_CLONING = 'cloning' 9 BUILD_STATE_INSTALLING = 'installing' 10 BUILD_STATE_BUILDING = 'building' 11 BUILD_STATE_FINISHED = 'finished' 12 13 BUILD_STATE = ( 14 (BUILD_STATE_TRIGGERED, _('Triggered')), 15 (BUILD_STATE_CLONING, _('Cloning')), 16 (BUILD_STATE_INSTALLING, _('Installing')), 17 (BUILD_STATE_BUILDING, _('Building')), 18 (BUILD_STATE_FINISHED, _('Finished')), 19 ) 20 21 BUILD_TYPES = ( 22 ('html', _('HTML')), 23 ('pdf', _('PDF')), 24 ('epub', _('Epub')), 25 # There is currently no support for building man/dash formats, but we keep 26 # it there since the DB might still contain those values for legacy 27 # projects. 28 ('man', _('Manpage')), 29 ('dash', _('Dash')), 30 ) 31 32 # Manager name for Internal Versions or Builds. 33 # ie: Versions and Builds Excluding pull request/merge request Versions and Builds. 34 INTERNAL = 'internal' 35 # Manager name for External Versions or Builds. 36 # ie: Only pull request/merge request Versions and Builds. 37 EXTERNAL = 'external' 38 EXTERNAL_TEXT = _('External') 39 40 BRANCH = 'branch' 41 BRANCH_TEXT = _('Branch') 42 TAG = 'tag' 43 TAG_TEXT = _('Tag') 44 UNKNOWN = 'unknown' 45 UNKNOWN_TEXT = _('Unknown') 46 47 VERSION_TYPES = ( 48 (BRANCH, BRANCH_TEXT), 49 (TAG, TAG_TEXT), 50 (EXTERNAL, EXTERNAL_TEXT), 51 (UNKNOWN, UNKNOWN_TEXT), 52 ) 53 54 LATEST = settings.RTD_LATEST 55 LATEST_VERBOSE_NAME = settings.RTD_LATEST_VERBOSE_NAME 56 57 STABLE = settings.RTD_STABLE 58 STABLE_VERBOSE_NAME = settings.RTD_STABLE_VERBOSE_NAME 59 60 # Those names are specialcased version names. They do not correspond to 61 # branches/tags in a project's repository. 62 NON_REPOSITORY_VERSIONS = ( 63 LATEST, 64 STABLE, 65 ) 66 67 # General Build Statuses 68 BUILD_STATUS_FAILURE = 'failed' 69 BUILD_STATUS_PENDING = 'pending' 70 BUILD_STATUS_SUCCESS = 'success' 71 72 # GitHub Build Statuses 73 GITHUB_BUILD_STATUS_FAILURE = 'failure' 74 GITHUB_BUILD_STATUS_PENDING = 'pending' 75 GITHUB_BUILD_STATUS_SUCCESS = 'success' 76 77 # GitLab Build Statuses 78 GITLAB_BUILD_STATUS_FAILURE = 'failed' 79 GITLAB_BUILD_STATUS_PENDING = 'pending' 80 GITLAB_BUILD_STATUS_SUCCESS = 'success' 81 82 # Used to select correct Build status and description to be sent to each service API 83 SELECT_BUILD_STATUS = { 84 BUILD_STATUS_FAILURE: { 85 'github': GITHUB_BUILD_STATUS_FAILURE, 86 'gitlab': GITLAB_BUILD_STATUS_FAILURE, 87 'description': 'Read the Docs build failed!', 88 }, 89 BUILD_STATUS_PENDING: { 90 'github': GITHUB_BUILD_STATUS_PENDING, 91 'gitlab': GITLAB_BUILD_STATUS_PENDING, 92 'description': 'Read the Docs build is in progress!', 93 }, 94 BUILD_STATUS_SUCCESS: { 95 'github': GITHUB_BUILD_STATUS_SUCCESS, 96 'gitlab': GITLAB_BUILD_STATUS_SUCCESS, 97 'description': 'Read the Docs build succeeded!', 98 }, 99 } 100 101 RTD_BUILD_STATUS_API_NAME = 'continuous-documentation/read-the-docs' 102 103 GITHUB_EXTERNAL_VERSION_NAME = 'Pull Request' 104 GITLAB_EXTERNAL_VERSION_NAME = 'Merge Request' 105 GENERIC_EXTERNAL_VERSION_NAME = 'External Version' 106 107 108 # Automation rules 109 110 ALL_VERSIONS = 'all-versions' 111 ALL_VERSIONS_REGEX = r'.*' 112 SEMVER_VERSIONS = 'semver-versions' 113 SEMVER_VERSIONS_REGEX = r'^v?(\d+\.)(\d+\.)(\d+)(-.+)?$' 114 115 116 PREDEFINED_MATCH_ARGS = ( 117 (ALL_VERSIONS, _('Any version')), 118 (SEMVER_VERSIONS, _('SemVer versions')), 119 (None, _('Custom match')), 120 ) 121 122 PREDEFINED_MATCH_ARGS_VALUES = { 123 ALL_VERSIONS: ALL_VERSIONS_REGEX, 124 SEMVER_VERSIONS: SEMVER_VERSIONS_REGEX, 125 } 126 [end of readthedocs/builds/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/readthedocs/builds/constants.py b/readthedocs/builds/constants.py --- a/readthedocs/builds/constants.py +++ b/readthedocs/builds/constants.py @@ -110,7 +110,10 @@ ALL_VERSIONS = 'all-versions' ALL_VERSIONS_REGEX = r'.*' SEMVER_VERSIONS = 'semver-versions' -SEMVER_VERSIONS_REGEX = r'^v?(\d+\.)(\d+\.)(\d+)(-.+)?$' + +# Pattern referred from +# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string +SEMVER_VERSIONS_REGEX = r'^v?(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$' # noqa PREDEFINED_MATCH_ARGS = (
{"golden_diff": "diff --git a/readthedocs/builds/constants.py b/readthedocs/builds/constants.py\n--- a/readthedocs/builds/constants.py\n+++ b/readthedocs/builds/constants.py\n@@ -110,7 +110,10 @@\n ALL_VERSIONS = 'all-versions'\n ALL_VERSIONS_REGEX = r'.*'\n SEMVER_VERSIONS = 'semver-versions'\n-SEMVER_VERSIONS_REGEX = r'^v?(\\d+\\.)(\\d+\\.)(\\d+)(-.+)?$'\n+\n+# Pattern referred from\n+# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string\n+SEMVER_VERSIONS_REGEX = r'^v?(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$' # noqa\n \n \n PREDEFINED_MATCH_ARGS = (\n", "issue": "Improve regex for semver in automation rules\nWe have a very basic pattern, but on the semver faq they have a better one\r\n\r\nhttps://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string\r\n\r\nThis needs to be changed and with a comment to the link from where it came from.\r\n\r\nhttps://github.com/readthedocs/readthedocs.org/blob/ff0ad67991e599f79e9d956a3733785ccb78c505/readthedocs/builds/constants.py#L113-L113\n", "before_files": [{"content": "\"\"\"Constants for the builds app.\"\"\"\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\n\nBUILD_STATE_TRIGGERED = 'triggered'\nBUILD_STATE_CLONING = 'cloning'\nBUILD_STATE_INSTALLING = 'installing'\nBUILD_STATE_BUILDING = 'building'\nBUILD_STATE_FINISHED = 'finished'\n\nBUILD_STATE = (\n (BUILD_STATE_TRIGGERED, _('Triggered')),\n (BUILD_STATE_CLONING, _('Cloning')),\n (BUILD_STATE_INSTALLING, _('Installing')),\n (BUILD_STATE_BUILDING, _('Building')),\n (BUILD_STATE_FINISHED, _('Finished')),\n)\n\nBUILD_TYPES = (\n ('html', _('HTML')),\n ('pdf', _('PDF')),\n ('epub', _('Epub')),\n # There is currently no support for building man/dash formats, but we keep\n # it there since the DB might still contain those values for legacy\n # projects.\n ('man', _('Manpage')),\n ('dash', _('Dash')),\n)\n\n# Manager name for Internal Versions or Builds.\n# ie: Versions and Builds Excluding pull request/merge request Versions and Builds.\nINTERNAL = 'internal'\n# Manager name for External Versions or Builds.\n# ie: Only pull request/merge request Versions and Builds.\nEXTERNAL = 'external'\nEXTERNAL_TEXT = _('External')\n\nBRANCH = 'branch'\nBRANCH_TEXT = _('Branch')\nTAG = 'tag'\nTAG_TEXT = _('Tag')\nUNKNOWN = 'unknown'\nUNKNOWN_TEXT = _('Unknown')\n\nVERSION_TYPES = (\n (BRANCH, BRANCH_TEXT),\n (TAG, TAG_TEXT),\n (EXTERNAL, EXTERNAL_TEXT),\n (UNKNOWN, UNKNOWN_TEXT),\n)\n\nLATEST = settings.RTD_LATEST\nLATEST_VERBOSE_NAME = settings.RTD_LATEST_VERBOSE_NAME\n\nSTABLE = settings.RTD_STABLE\nSTABLE_VERBOSE_NAME = settings.RTD_STABLE_VERBOSE_NAME\n\n# Those names are specialcased version names. They do not correspond to\n# branches/tags in a project's repository.\nNON_REPOSITORY_VERSIONS = (\n LATEST,\n STABLE,\n)\n\n# General Build Statuses\nBUILD_STATUS_FAILURE = 'failed'\nBUILD_STATUS_PENDING = 'pending'\nBUILD_STATUS_SUCCESS = 'success'\n\n# GitHub Build Statuses\nGITHUB_BUILD_STATUS_FAILURE = 'failure'\nGITHUB_BUILD_STATUS_PENDING = 'pending'\nGITHUB_BUILD_STATUS_SUCCESS = 'success'\n\n# GitLab Build Statuses\nGITLAB_BUILD_STATUS_FAILURE = 'failed'\nGITLAB_BUILD_STATUS_PENDING = 'pending'\nGITLAB_BUILD_STATUS_SUCCESS = 'success'\n\n# Used to select correct Build status and description to be sent to each service API\nSELECT_BUILD_STATUS = {\n BUILD_STATUS_FAILURE: {\n 'github': GITHUB_BUILD_STATUS_FAILURE,\n 'gitlab': GITLAB_BUILD_STATUS_FAILURE,\n 'description': 'Read the Docs build failed!',\n },\n BUILD_STATUS_PENDING: {\n 'github': GITHUB_BUILD_STATUS_PENDING,\n 'gitlab': GITLAB_BUILD_STATUS_PENDING,\n 'description': 'Read the Docs build is in progress!',\n },\n BUILD_STATUS_SUCCESS: {\n 'github': GITHUB_BUILD_STATUS_SUCCESS,\n 'gitlab': GITLAB_BUILD_STATUS_SUCCESS,\n 'description': 'Read the Docs build succeeded!',\n },\n}\n\nRTD_BUILD_STATUS_API_NAME = 'continuous-documentation/read-the-docs'\n\nGITHUB_EXTERNAL_VERSION_NAME = 'Pull Request'\nGITLAB_EXTERNAL_VERSION_NAME = 'Merge Request'\nGENERIC_EXTERNAL_VERSION_NAME = 'External Version'\n\n\n# Automation rules\n\nALL_VERSIONS = 'all-versions'\nALL_VERSIONS_REGEX = r'.*'\nSEMVER_VERSIONS = 'semver-versions'\nSEMVER_VERSIONS_REGEX = r'^v?(\\d+\\.)(\\d+\\.)(\\d+)(-.+)?$'\n\n\nPREDEFINED_MATCH_ARGS = (\n (ALL_VERSIONS, _('Any version')),\n (SEMVER_VERSIONS, _('SemVer versions')),\n (None, _('Custom match')),\n)\n\nPREDEFINED_MATCH_ARGS_VALUES = {\n ALL_VERSIONS: ALL_VERSIONS_REGEX,\n SEMVER_VERSIONS: SEMVER_VERSIONS_REGEX,\n}\n", "path": "readthedocs/builds/constants.py"}]}
1,809
282
gh_patches_debug_56402
rasdani/github-patches
git_diff
encode__httpx-361
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Missing py.typed declaration? `mypy` is complaining about not being able to find type annotations for `httpx`: `error: Cannot find module named 'httpx'` I'm somewhat new to using type annotations/static type checking in Python, but from the mypy documentation [here](https://mypy.readthedocs.io/en/latest/installed_packages.html#making-pep-561-compatible-packages) it looks like there may be a missing declaration in `setup.py`? </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 import re 5 from pathlib import Path 6 7 from setuptools import setup 8 9 10 def get_version(package): 11 """ 12 Return package version as listed in `__version__` in `init.py`. 13 """ 14 version = Path(package, "__version__.py").read_text() 15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", version).group(1) 16 17 18 def get_long_description(): 19 """ 20 Return the README. 21 """ 22 long_description = "" 23 with open("README.md", encoding="utf8") as f: 24 long_description += f.read() 25 long_description += "\n\n" 26 with open("CHANGELOG.md", encoding="utf8") as f: 27 long_description += f.read() 28 return long_description 29 30 31 def get_packages(package): 32 """ 33 Return root package and all sub-packages. 34 """ 35 return [str(path.parent) for path in Path(package).glob("**/__init__.py")] 36 37 38 setup( 39 name="httpx", 40 python_requires=">=3.6", 41 version=get_version("httpx"), 42 url="https://github.com/encode/httpx", 43 license="BSD", 44 description="The next generation HTTP client.", 45 long_description=get_long_description(), 46 long_description_content_type="text/markdown", 47 author="Tom Christie", 48 author_email="[email protected]", 49 package_data={"httpx": ["py.typed"]}, 50 packages=get_packages("httpx"), 51 include_package_data=True, 52 install_requires=[ 53 "certifi", 54 "chardet==3.*", 55 "h11==0.8.*", 56 "h2==3.*", 57 "hstspreload>=2019.8.27", 58 "idna==2.*", 59 "rfc3986==1.*", 60 ], 61 classifiers=[ 62 "Development Status :: 3 - Alpha", 63 "Environment :: Web Environment", 64 "Intended Audience :: Developers", 65 "License :: OSI Approved :: BSD License", 66 "Operating System :: OS Independent", 67 "Topic :: Internet :: WWW/HTTP", 68 "Programming Language :: Python :: 3", 69 "Programming Language :: Python :: 3.6", 70 "Programming Language :: Python :: 3.7", 71 "Programming Language :: Python :: 3.8", 72 ], 73 ) 74 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -49,6 +49,7 @@ package_data={"httpx": ["py.typed"]}, packages=get_packages("httpx"), include_package_data=True, + zip_safe=False, install_requires=[ "certifi", "chardet==3.*",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -49,6 +49,7 @@\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n+ zip_safe=False,\n install_requires=[\n \"certifi\",\n \"chardet==3.*\",\n", "issue": "Missing py.typed declaration?\n`mypy` is complaining about not being able to find type annotations for `httpx`: \r\n\r\n`error: Cannot find module named 'httpx'`\r\n\r\nI'm somewhat new to using type annotations/static type checking in Python, but from the mypy documentation [here](https://mypy.readthedocs.io/en/latest/installed_packages.html#making-pep-561-compatible-packages) it looks like there may be a missing declaration in `setup.py`?\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n install_requires=[\n \"certifi\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"hstspreload>=2019.8.27\",\n \"idna==2.*\",\n \"rfc3986==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}]}
1,279
84
gh_patches_debug_23
rasdani/github-patches
git_diff
horovod__horovod-3745
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> No module named 'packaging' when install horovod It seems that the horovod v0.26.0 has some dependency problems. How long does it take for a new patch version to be released or should I pin the horovod version? ^_^ ![image](https://user-images.githubusercontent.com/32220263/195746141-2a0050f5-1eaf-4f7b-9a62-50fd6b13f2ff.png) </issue> <code> [start of horovod/__init__.py] 1 from horovod.runner import run 2 3 __version__ = '0.26.0' 4 [end of horovod/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/horovod/__init__.py b/horovod/__init__.py --- a/horovod/__init__.py +++ b/horovod/__init__.py @@ -1,3 +1,3 @@ from horovod.runner import run -__version__ = '0.26.0' +__version__ = '0.26.1'
{"golden_diff": "diff --git a/horovod/__init__.py b/horovod/__init__.py\n--- a/horovod/__init__.py\n+++ b/horovod/__init__.py\n@@ -1,3 +1,3 @@\n from horovod.runner import run\n \n-__version__ = '0.26.0'\n+__version__ = '0.26.1'\n", "issue": "No module named 'packaging' when install horovod\nIt seems that the horovod v0.26.0 has some dependency problems.\r\n\r\nHow long does it take for a new patch version to be released or should I pin the horovod version? ^_^\r\n\r\n![image](https://user-images.githubusercontent.com/32220263/195746141-2a0050f5-1eaf-4f7b-9a62-50fd6b13f2ff.png)\r\n\n", "before_files": [{"content": "from horovod.runner import run\n\n__version__ = '0.26.0'\n", "path": "horovod/__init__.py"}]}
689
89
gh_patches_debug_29710
rasdani/github-patches
git_diff
keras-team__autokeras-166
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Model evaluation on test set yields different results on the same model <!--- **If you are reporting a bug:** * Verify that your issue is not being currently addressed by other issues or pull requests. * Please note that Auto-Keras is only compatible with **Python 3.6**. * Tag the issue with the `bug report` tag. --> ### Bug Description Test set evaluation yields different results if re-run. ### Reproducing Steps Steps to reproduce the behavior: re-run multiple times: ```python y_pred = clf.evaluate(X_test, y_test) print(y_pred) ``` will yield different results: ``` 0.0992 0.1032 0.101 0.0989 ``` Further, using manual evaluation: ```python y_prediction = clf.predict(x_test=X_test) from sklearn.metrics import accuracy_score accuracy_score(y_pred=y_prediction, y_true=y_test) ``` leads to different results as well. It looks like the model either uses some random function (AFAIK all: ```random_states=42``` in the source code) or there is some major error in the pipeline that causes different predictions of the test set all the time. ### Expected Behavior Final evaluation on a test set should not lead to different results using the same model on the same data. ### Setup Details Include the details about the versions of: - OS type and version: Linux - Python: 3.6.5 - autokeras: 0.2.11 - scikit-learn:0.19.1 - numpy:1.14.5 - keras: 2.2.2 - scipy:1.1.0 - tensorflow: 1.10.0 - pytorch:0.4.1 ### Additional context This error is verified on MNIST and Fashion-MNIST. </issue> <code> [start of autokeras/preprocessor.py] 1 import torch 2 3 import numpy as np 4 from torch.utils.data import Dataset, DataLoader 5 from torchvision.transforms import ToPILImage, RandomCrop, RandomHorizontalFlip, ToTensor, Normalize, Compose 6 7 from autokeras.constant import Constant 8 9 10 class OneHotEncoder: 11 """A class that can format data. 12 13 This class provides ways to transform data's classification label into vector. 14 15 Attributes: 16 data: The input data 17 n_classes: The number of classes in the classification problem. 18 labels: The number of labels. 19 label_to_vec: Mapping from label to vector. 20 int_to_label: Mapping from int to label. 21 """ 22 23 def __init__(self): 24 """Initialize a OneHotEncoder""" 25 self.data = None 26 self.n_classes = 0 27 self.labels = None 28 self.label_to_vec = {} 29 self.int_to_label = {} 30 31 def fit(self, data): 32 """Create mapping from label to vector, and vector to label.""" 33 data = np.array(data).flatten() 34 self.labels = set(data) 35 self.n_classes = len(self.labels) 36 for index, label in enumerate(self.labels): 37 vec = np.array([0] * self.n_classes) 38 vec[index] = 1 39 self.label_to_vec[label] = vec 40 self.int_to_label[index] = label 41 42 def transform(self, data): 43 """Get vector for every element in the data array.""" 44 data = np.array(data) 45 if len(data.shape) > 1: 46 data = data.flatten() 47 return np.array(list(map(lambda x: self.label_to_vec[x], data))) 48 49 def inverse_transform(self, data): 50 """Get label for every element in data.""" 51 return np.array(list(map(lambda x: self.int_to_label[x], np.argmax(np.array(data), axis=1)))) 52 53 54 class DataTransformer: 55 def __init__(self, data, augment=Constant.DATA_AUGMENTATION): 56 self.max_val = data.max() 57 data = data / self.max_val 58 self.mean = np.mean(data, axis=(0, 1, 2), keepdims=True).flatten() 59 self.std = np.std(data, axis=(0, 1, 2), keepdims=True).flatten() 60 self.augment = augment 61 62 def transform_train(self, data, targets=None, batch_size=None): 63 if not self.augment: 64 augment_list = [] 65 else: 66 augment_list = [ToPILImage(), 67 RandomCrop(data.shape[1:3], padding=4), 68 RandomHorizontalFlip(), 69 ToTensor() 70 ] 71 common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))] 72 compose_list = augment_list + common_list 73 74 return self._transform(batch_size, compose_list, data, targets) 75 76 def transform_test(self, data, targets=None, batch_size=None): 77 common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))] 78 compose_list = common_list 79 80 return self._transform(batch_size, compose_list, data, targets) 81 82 def _transform(self, batch_size, compose_list, data, targets): 83 if batch_size is None: 84 batch_size = Constant.MAX_BATCH_SIZE 85 batch_size = min(len(data), batch_size) 86 data = data / self.max_val 87 data = torch.Tensor(data.transpose(0, 3, 1, 2)) 88 data_transforms = Compose(compose_list) 89 dataset = MultiTransformDataset(data, targets, data_transforms) 90 return DataLoader(dataset, batch_size=batch_size, shuffle=True) 91 92 93 class MultiTransformDataset(Dataset): 94 def __init__(self, dataset, target, compose): 95 self.dataset = dataset 96 self.target = target 97 self.compose = compose 98 99 def __getitem__(self, index): 100 feature = self.dataset[index] 101 if self.target is None: 102 return self.compose(feature) 103 return self.compose(feature), self.target[index] 104 105 def __len__(self): 106 return len(self.dataset) 107 [end of autokeras/preprocessor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/autokeras/preprocessor.py b/autokeras/preprocessor.py --- a/autokeras/preprocessor.py +++ b/autokeras/preprocessor.py @@ -71,23 +71,31 @@ common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))] compose_list = augment_list + common_list - return self._transform(batch_size, compose_list, data, targets) + dataset = self._transform(compose_list, data, targets) + + if batch_size is None: + batch_size = Constant.MAX_BATCH_SIZE + batch_size = min(len(data), batch_size) + + return DataLoader(dataset, batch_size=batch_size, shuffle=True) def transform_test(self, data, targets=None, batch_size=None): common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))] compose_list = common_list - return self._transform(batch_size, compose_list, data, targets) + dataset = self._transform(compose_list, data, targets) - def _transform(self, batch_size, compose_list, data, targets): if batch_size is None: batch_size = Constant.MAX_BATCH_SIZE batch_size = min(len(data), batch_size) + + return DataLoader(dataset, batch_size=batch_size, shuffle=False) + + def _transform(self, compose_list, data, targets): data = data / self.max_val data = torch.Tensor(data.transpose(0, 3, 1, 2)) data_transforms = Compose(compose_list) - dataset = MultiTransformDataset(data, targets, data_transforms) - return DataLoader(dataset, batch_size=batch_size, shuffle=True) + return MultiTransformDataset(data, targets, data_transforms) class MultiTransformDataset(Dataset):
{"golden_diff": "diff --git a/autokeras/preprocessor.py b/autokeras/preprocessor.py\n--- a/autokeras/preprocessor.py\n+++ b/autokeras/preprocessor.py\n@@ -71,23 +71,31 @@\n common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]\n compose_list = augment_list + common_list\n \n- return self._transform(batch_size, compose_list, data, targets)\n+ dataset = self._transform(compose_list, data, targets)\n+\n+ if batch_size is None:\n+ batch_size = Constant.MAX_BATCH_SIZE\n+ batch_size = min(len(data), batch_size)\n+\n+ return DataLoader(dataset, batch_size=batch_size, shuffle=True)\n \n def transform_test(self, data, targets=None, batch_size=None):\n common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]\n compose_list = common_list\n \n- return self._transform(batch_size, compose_list, data, targets)\n+ dataset = self._transform(compose_list, data, targets)\n \n- def _transform(self, batch_size, compose_list, data, targets):\n if batch_size is None:\n batch_size = Constant.MAX_BATCH_SIZE\n batch_size = min(len(data), batch_size)\n+\n+ return DataLoader(dataset, batch_size=batch_size, shuffle=False)\n+\n+ def _transform(self, compose_list, data, targets):\n data = data / self.max_val\n data = torch.Tensor(data.transpose(0, 3, 1, 2))\n data_transforms = Compose(compose_list)\n- dataset = MultiTransformDataset(data, targets, data_transforms)\n- return DataLoader(dataset, batch_size=batch_size, shuffle=True)\n+ return MultiTransformDataset(data, targets, data_transforms)\n \n \n class MultiTransformDataset(Dataset):\n", "issue": "Model evaluation on test set yields different results on the same model\n<!---\r\n**If you are reporting a bug:**\r\n* Verify that your issue is not being currently addressed by other issues or pull requests.\r\n* Please note that Auto-Keras is only compatible with **Python 3.6**.\r\n* Tag the issue with the `bug report` tag.\r\n-->\r\n\r\n### Bug Description\r\nTest set evaluation yields different results if re-run.\r\n\r\n### Reproducing Steps\r\nSteps to reproduce the behavior:\r\nre-run multiple times:\r\n```python\r\ny_pred = clf.evaluate(X_test, y_test)\r\nprint(y_pred)\r\n```\r\nwill yield different results:\r\n```\r\n0.0992\r\n0.1032\r\n0.101\r\n0.0989\r\n```\r\n\r\nFurther, using manual evaluation:\r\n```python\r\ny_prediction = clf.predict(x_test=X_test)\r\nfrom sklearn.metrics import accuracy_score\r\naccuracy_score(y_pred=y_prediction, y_true=y_test)\r\n```\r\nleads to different results as well. It looks like the model either uses some random function (AFAIK all: ```random_states=42``` in the source code) or there is some major error in the pipeline that causes different predictions of the test set all the time.\r\n\r\n### Expected Behavior\r\nFinal evaluation on a test set should not lead to different results using the same model on the same data.\r\n\r\n### Setup Details\r\nInclude the details about the versions of:\r\n - OS type and version: Linux\r\n - Python: 3.6.5\r\n - autokeras: 0.2.11\r\n - scikit-learn:0.19.1\r\n - numpy:1.14.5\r\n - keras: 2.2.2\r\n - scipy:1.1.0\r\n - tensorflow: 1.10.0\r\n - pytorch:0.4.1\r\n\r\n### Additional context\r\nThis error is verified on MNIST and Fashion-MNIST.\n", "before_files": [{"content": "import torch\n\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision.transforms import ToPILImage, RandomCrop, RandomHorizontalFlip, ToTensor, Normalize, Compose\n\nfrom autokeras.constant import Constant\n\n\nclass OneHotEncoder:\n \"\"\"A class that can format data.\n\n This class provides ways to transform data's classification label into vector.\n\n Attributes:\n data: The input data\n n_classes: The number of classes in the classification problem.\n labels: The number of labels.\n label_to_vec: Mapping from label to vector.\n int_to_label: Mapping from int to label.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize a OneHotEncoder\"\"\"\n self.data = None\n self.n_classes = 0\n self.labels = None\n self.label_to_vec = {}\n self.int_to_label = {}\n\n def fit(self, data):\n \"\"\"Create mapping from label to vector, and vector to label.\"\"\"\n data = np.array(data).flatten()\n self.labels = set(data)\n self.n_classes = len(self.labels)\n for index, label in enumerate(self.labels):\n vec = np.array([0] * self.n_classes)\n vec[index] = 1\n self.label_to_vec[label] = vec\n self.int_to_label[index] = label\n\n def transform(self, data):\n \"\"\"Get vector for every element in the data array.\"\"\"\n data = np.array(data)\n if len(data.shape) > 1:\n data = data.flatten()\n return np.array(list(map(lambda x: self.label_to_vec[x], data)))\n\n def inverse_transform(self, data):\n \"\"\"Get label for every element in data.\"\"\"\n return np.array(list(map(lambda x: self.int_to_label[x], np.argmax(np.array(data), axis=1))))\n\n\nclass DataTransformer:\n def __init__(self, data, augment=Constant.DATA_AUGMENTATION):\n self.max_val = data.max()\n data = data / self.max_val\n self.mean = np.mean(data, axis=(0, 1, 2), keepdims=True).flatten()\n self.std = np.std(data, axis=(0, 1, 2), keepdims=True).flatten()\n self.augment = augment\n\n def transform_train(self, data, targets=None, batch_size=None):\n if not self.augment:\n augment_list = []\n else:\n augment_list = [ToPILImage(),\n RandomCrop(data.shape[1:3], padding=4),\n RandomHorizontalFlip(),\n ToTensor()\n ]\n common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]\n compose_list = augment_list + common_list\n\n return self._transform(batch_size, compose_list, data, targets)\n\n def transform_test(self, data, targets=None, batch_size=None):\n common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]\n compose_list = common_list\n\n return self._transform(batch_size, compose_list, data, targets)\n\n def _transform(self, batch_size, compose_list, data, targets):\n if batch_size is None:\n batch_size = Constant.MAX_BATCH_SIZE\n batch_size = min(len(data), batch_size)\n data = data / self.max_val\n data = torch.Tensor(data.transpose(0, 3, 1, 2))\n data_transforms = Compose(compose_list)\n dataset = MultiTransformDataset(data, targets, data_transforms)\n return DataLoader(dataset, batch_size=batch_size, shuffle=True)\n\n\nclass MultiTransformDataset(Dataset):\n def __init__(self, dataset, target, compose):\n self.dataset = dataset\n self.target = target\n self.compose = compose\n\n def __getitem__(self, index):\n feature = self.dataset[index]\n if self.target is None:\n return self.compose(feature)\n return self.compose(feature), self.target[index]\n\n def __len__(self):\n return len(self.dataset)\n", "path": "autokeras/preprocessor.py"}]}
2,014
393
gh_patches_debug_9194
rasdani/github-patches
git_diff
akvo__akvo-rsr-2129
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Show results framework for private projects ## Test plan GIVEN the 'My results' tab in MyRSR WHEN searching for a private project THEN the project should appear in the list WHEN the private project is opened in 'My results' THEN the user should have full access to the results framework, just like a regular public project ## Issue description Currently, we have disabled the results framework in MyRSR for private projects. However, it should be possible to enter results data for private projects as well. </issue> <code> [start of akvo/rest/views/typeahead.py] 1 # -*- coding: utf-8 -*- 2 3 """Akvo RSR is covered by the GNU Affero General Public License. 4 See more details in the license.txt file located at the root folder of the 5 Akvo RSR module. For additional details on the GNU license please 6 see < http://www.gnu.org/licenses/agpl.html >. 7 """ 8 9 from akvo.rest.serializers import (TypeaheadCountrySerializer, 10 TypeaheadOrganisationSerializer, 11 TypeaheadProjectSerializer, 12 TypeaheadProjectUpdateSerializer) 13 from akvo.rsr.models import Country, Organisation, Project, ProjectUpdate 14 15 from rest_framework.decorators import api_view 16 from rest_framework.response import Response 17 18 19 def rejig(queryset, serializer): 20 """Rearrange & add queryset count to the response data.""" 21 return { 22 'count': queryset.count(), 23 'results': serializer.data 24 } 25 26 27 @api_view(['GET']) 28 def typeahead_country(request): 29 countries = Country.objects.all() 30 return Response( 31 rejig(countries, TypeaheadCountrySerializer(countries, many=True)) 32 ) 33 34 35 @api_view(['GET']) 36 def typeahead_organisation(request): 37 organisations = Organisation.objects.all() 38 return Response( 39 rejig(organisations, TypeaheadOrganisationSerializer(organisations, 40 many=True)) 41 ) 42 43 44 @api_view(['GET']) 45 def typeahead_user_organisations(request): 46 user = request.user 47 is_admin = user.is_active and (user.is_superuser or user.is_admin) 48 organisations = user.approved_organisations() if not is_admin else Organisation.objects.all() 49 return Response( 50 rejig(organisations, TypeaheadOrganisationSerializer(organisations, 51 many=True)) 52 ) 53 54 55 @api_view(['GET']) 56 def typeahead_project(request): 57 projects = Project.objects.all().exclude(title='') 58 return Response( 59 rejig(projects, TypeaheadProjectSerializer(projects, many=True)) 60 ) 61 62 63 @api_view(['GET']) 64 def typeahead_user_projects(request): 65 user = request.user 66 is_admin = user.is_active and (user.is_superuser or user.is_admin) 67 if is_admin: 68 projects = Project.objects.all() 69 else: 70 projects = user.approved_organisations().all_projects() 71 projects = projects.exclude(title='') 72 return Response( 73 rejig(projects, TypeaheadProjectSerializer(projects, many=True)) 74 ) 75 76 77 @api_view(['GET']) 78 def typeahead_impact_projects(request): 79 user = request.user 80 projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects() 81 projects = projects.published().filter(is_impact_project=True, is_public=True).order_by('title') 82 83 return Response( 84 rejig(projects, TypeaheadProjectSerializer(projects, many=True)) 85 ) 86 87 88 @api_view(['GET']) 89 def typeahead_projectupdate(request): 90 updates = ProjectUpdate.objects.all() 91 return Response( 92 rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True)) 93 ) 94 [end of akvo/rest/views/typeahead.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rest/views/typeahead.py b/akvo/rest/views/typeahead.py --- a/akvo/rest/views/typeahead.py +++ b/akvo/rest/views/typeahead.py @@ -78,7 +78,7 @@ def typeahead_impact_projects(request): user = request.user projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects() - projects = projects.published().filter(is_impact_project=True, is_public=True).order_by('title') + projects = projects.published().filter(is_impact_project=True).order_by('title') return Response( rejig(projects, TypeaheadProjectSerializer(projects, many=True))
{"golden_diff": "diff --git a/akvo/rest/views/typeahead.py b/akvo/rest/views/typeahead.py\n--- a/akvo/rest/views/typeahead.py\n+++ b/akvo/rest/views/typeahead.py\n@@ -78,7 +78,7 @@\n def typeahead_impact_projects(request):\n user = request.user\n projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()\n- projects = projects.published().filter(is_impact_project=True, is_public=True).order_by('title')\n+ projects = projects.published().filter(is_impact_project=True).order_by('title')\n \n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n", "issue": "Show results framework for private projects\n## Test plan\n\nGIVEN the 'My results' tab in MyRSR\nWHEN searching for a private project\nTHEN the project should appear in the list\n\nWHEN the private project is opened in 'My results'\nTHEN the user should have full access to the results framework, just like a regular public project\n## Issue description\n\nCurrently, we have disabled the results framework in MyRSR for private projects. However, it should be possible to enter results data for private projects as well.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rest.serializers import (TypeaheadCountrySerializer,\n TypeaheadOrganisationSerializer,\n TypeaheadProjectSerializer,\n TypeaheadProjectUpdateSerializer)\nfrom akvo.rsr.models import Country, Organisation, Project, ProjectUpdate\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n\ndef rejig(queryset, serializer):\n \"\"\"Rearrange & add queryset count to the response data.\"\"\"\n return {\n 'count': queryset.count(),\n 'results': serializer.data\n }\n\n\n@api_view(['GET'])\ndef typeahead_country(request):\n countries = Country.objects.all()\n return Response(\n rejig(countries, TypeaheadCountrySerializer(countries, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_organisation(request):\n organisations = Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_organisations(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n organisations = user.approved_organisations() if not is_admin else Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_project(request):\n projects = Project.objects.all().exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_projects(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n if is_admin:\n projects = Project.objects.all()\n else:\n projects = user.approved_organisations().all_projects()\n projects = projects.exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_impact_projects(request):\n user = request.user\n projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()\n projects = projects.published().filter(is_impact_project=True, is_public=True).order_by('title')\n\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_projectupdate(request):\n updates = ProjectUpdate.objects.all()\n return Response(\n rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))\n )\n", "path": "akvo/rest/views/typeahead.py"}]}
1,457
154
gh_patches_debug_24995
rasdani/github-patches
git_diff
e-valuation__EvaP-1853
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Investigate Github Action caching One can cache certain directories in github actions. For Python, caching the entire installation is used (including all the `site-packages`), so that wheels don't need to be rebuild. Additionally, the download could be faster. We should investigate how much there is to gain. </issue> <code> [start of evap/evaluation/management/commands/scss.py] 1 import os 2 import subprocess # nosec 3 4 from django.conf import settings 5 from django.core.management.base import BaseCommand 6 7 8 class Command(BaseCommand): 9 def add_arguments(self, parser): 10 parser.add_argument( 11 "--watch", 12 action="store_true", 13 help="Watch stylesheets and recompile when they change.", 14 ) 15 parser.add_argument( 16 "--production", 17 action="store_true", 18 help="Compress output stylesheet and do not generate source maps." 19 " Intended to use in production deployment.", 20 ) 21 22 def handle(self, *args, **options): 23 static_directory = settings.STATICFILES_DIRS[0] 24 command = [ 25 "npx", 26 "sass", 27 os.path.join(static_directory, "scss", "evap.scss"), 28 os.path.join(static_directory, "css", "evap.css"), 29 ] 30 31 if options["watch"]: 32 command += ["--watch", "--poll"] 33 34 if options["production"]: 35 command += ["--style", "compressed", "--no-source-map"] 36 37 try: 38 subprocess.run(command, check=True) # nosec 39 except FileNotFoundError: 40 print("Could not find sass command", file=self.stderr) 41 except KeyboardInterrupt: 42 pass 43 [end of evap/evaluation/management/commands/scss.py] [start of evap/evaluation/management/commands/ts.py] 1 import argparse 2 import os 3 import subprocess # nosec 4 import unittest 5 6 from django.conf import settings 7 from django.core.management import call_command 8 from django.core.management.base import BaseCommand, CommandError 9 from django.test.runner import DiscoverRunner 10 11 12 class RenderPagesRunner(DiscoverRunner): 13 """Test runner which only includes `render_pages.*` methods. 14 The actual logic of the page rendering is implemented in the `@render_pages` decorator.""" 15 16 test_loader = unittest.TestLoader() 17 18 def __init__(self, **kwargs): 19 super().__init__(**kwargs) 20 self.test_loader.testMethodPrefix = "render_pages" 21 22 23 class Command(BaseCommand): 24 def add_arguments(self, parser: argparse.ArgumentParser): 25 subparsers = parser.add_subparsers(dest="command", required=True) 26 compile_parser = subparsers.add_parser("compile") 27 compile_parser.add_argument( 28 "--watch", 29 action="store_true", 30 help="Watch scripts and recompile when they change.", 31 ) 32 self.add_fresh_argument(compile_parser) 33 test_parser = subparsers.add_parser("test") 34 self.add_fresh_argument(test_parser) 35 subparsers.add_parser("render_pages") 36 37 @staticmethod 38 def add_fresh_argument(parser: argparse.ArgumentParser): 39 parser.add_argument( 40 "--fresh", 41 action="store_true", 42 help="Delete .tsbuildinfo.json before compilation to force a fresh compilation." 43 "This is useful when incremental compilation does not yield the expected output.", 44 ) 45 46 def handle(self, *args, **options): 47 if options["command"] == "compile": 48 self.compile(**options) 49 elif options["command"] == "test": 50 self.test(**options) 51 elif options["command"] == "render_pages": 52 self.render_pages(**options) 53 54 def run_command(self, command): 55 try: 56 subprocess.run(command, check=True) # nosec 57 except FileNotFoundError: 58 print(f"Could not find {command[0]} command", file=self.stderr) 59 except KeyboardInterrupt: 60 pass 61 except subprocess.CalledProcessError as e: 62 raise CommandError("Error during command execution", returncode=e.returncode) from e 63 64 def compile(self, watch=False, fresh=False, **_options): 65 static_directory = settings.STATICFILES_DIRS[0] 66 command = [ 67 "npx", 68 "tsc", 69 "--project", 70 os.path.join(static_directory, "ts", "tsconfig.compile.json"), 71 ] 72 73 if watch: 74 command += ["--watch"] 75 76 if fresh: 77 try: 78 os.remove(os.path.join(static_directory, "ts", ".tsbuildinfo.json")) 79 except FileNotFoundError: 80 pass 81 82 self.run_command(command) 83 84 def test(self, **options): 85 call_command("scss") 86 self.compile(**options) 87 self.render_pages() 88 self.run_command(["npx", "jest"]) 89 90 @staticmethod 91 def render_pages(**_options): 92 # Enable debug mode as otherwise a collectstatic beforehand would be necessary, 93 # as missing static files would result into an error. 94 test_runner = RenderPagesRunner(debug_mode=True) 95 failed_tests = test_runner.run_tests([]) 96 if failed_tests > 0: 97 raise CommandError("Failures during render_pages") 98 [end of evap/evaluation/management/commands/ts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/evap/evaluation/management/commands/scss.py b/evap/evaluation/management/commands/scss.py --- a/evap/evaluation/management/commands/scss.py +++ b/evap/evaluation/management/commands/scss.py @@ -2,7 +2,7 @@ import subprocess # nosec from django.conf import settings -from django.core.management.base import BaseCommand +from django.core.management.base import BaseCommand, CommandError class Command(BaseCommand): @@ -36,7 +36,7 @@ try: subprocess.run(command, check=True) # nosec - except FileNotFoundError: - print("Could not find sass command", file=self.stderr) + except FileNotFoundError as e: + raise CommandError("Could not find sass command") from e except KeyboardInterrupt: pass diff --git a/evap/evaluation/management/commands/ts.py b/evap/evaluation/management/commands/ts.py --- a/evap/evaluation/management/commands/ts.py +++ b/evap/evaluation/management/commands/ts.py @@ -54,8 +54,8 @@ def run_command(self, command): try: subprocess.run(command, check=True) # nosec - except FileNotFoundError: - print(f"Could not find {command[0]} command", file=self.stderr) + except FileNotFoundError as e: + raise CommandError(f"Could not find {command[0]} command") from e except KeyboardInterrupt: pass except subprocess.CalledProcessError as e:
{"golden_diff": "diff --git a/evap/evaluation/management/commands/scss.py b/evap/evaluation/management/commands/scss.py\n--- a/evap/evaluation/management/commands/scss.py\n+++ b/evap/evaluation/management/commands/scss.py\n@@ -2,7 +2,7 @@\n import subprocess # nosec\n \n from django.conf import settings\n-from django.core.management.base import BaseCommand\n+from django.core.management.base import BaseCommand, CommandError\n \n \n class Command(BaseCommand):\n@@ -36,7 +36,7 @@\n \n try:\n subprocess.run(command, check=True) # nosec\n- except FileNotFoundError:\n- print(\"Could not find sass command\", file=self.stderr)\n+ except FileNotFoundError as e:\n+ raise CommandError(\"Could not find sass command\") from e\n except KeyboardInterrupt:\n pass\ndiff --git a/evap/evaluation/management/commands/ts.py b/evap/evaluation/management/commands/ts.py\n--- a/evap/evaluation/management/commands/ts.py\n+++ b/evap/evaluation/management/commands/ts.py\n@@ -54,8 +54,8 @@\n def run_command(self, command):\n try:\n subprocess.run(command, check=True) # nosec\n- except FileNotFoundError:\n- print(f\"Could not find {command[0]} command\", file=self.stderr)\n+ except FileNotFoundError as e:\n+ raise CommandError(f\"Could not find {command[0]} command\") from e\n except KeyboardInterrupt:\n pass\n except subprocess.CalledProcessError as e:\n", "issue": "Investigate Github Action caching\nOne can cache certain directories in github actions. For Python, caching the entire installation is used (including all the `site-packages`), so that wheels don't need to be rebuild. Additionally, the download could be faster. We should investigate how much there is to gain.\n", "before_files": [{"content": "import os\nimport subprocess # nosec\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"--watch\",\n action=\"store_true\",\n help=\"Watch stylesheets and recompile when they change.\",\n )\n parser.add_argument(\n \"--production\",\n action=\"store_true\",\n help=\"Compress output stylesheet and do not generate source maps.\"\n \" Intended to use in production deployment.\",\n )\n\n def handle(self, *args, **options):\n static_directory = settings.STATICFILES_DIRS[0]\n command = [\n \"npx\",\n \"sass\",\n os.path.join(static_directory, \"scss\", \"evap.scss\"),\n os.path.join(static_directory, \"css\", \"evap.css\"),\n ]\n\n if options[\"watch\"]:\n command += [\"--watch\", \"--poll\"]\n\n if options[\"production\"]:\n command += [\"--style\", \"compressed\", \"--no-source-map\"]\n\n try:\n subprocess.run(command, check=True) # nosec\n except FileNotFoundError:\n print(\"Could not find sass command\", file=self.stderr)\n except KeyboardInterrupt:\n pass\n", "path": "evap/evaluation/management/commands/scss.py"}, {"content": "import argparse\nimport os\nimport subprocess # nosec\nimport unittest\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.test.runner import DiscoverRunner\n\n\nclass RenderPagesRunner(DiscoverRunner):\n \"\"\"Test runner which only includes `render_pages.*` methods.\n The actual logic of the page rendering is implemented in the `@render_pages` decorator.\"\"\"\n\n test_loader = unittest.TestLoader()\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.test_loader.testMethodPrefix = \"render_pages\"\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser: argparse.ArgumentParser):\n subparsers = parser.add_subparsers(dest=\"command\", required=True)\n compile_parser = subparsers.add_parser(\"compile\")\n compile_parser.add_argument(\n \"--watch\",\n action=\"store_true\",\n help=\"Watch scripts and recompile when they change.\",\n )\n self.add_fresh_argument(compile_parser)\n test_parser = subparsers.add_parser(\"test\")\n self.add_fresh_argument(test_parser)\n subparsers.add_parser(\"render_pages\")\n\n @staticmethod\n def add_fresh_argument(parser: argparse.ArgumentParser):\n parser.add_argument(\n \"--fresh\",\n action=\"store_true\",\n help=\"Delete .tsbuildinfo.json before compilation to force a fresh compilation.\"\n \"This is useful when incremental compilation does not yield the expected output.\",\n )\n\n def handle(self, *args, **options):\n if options[\"command\"] == \"compile\":\n self.compile(**options)\n elif options[\"command\"] == \"test\":\n self.test(**options)\n elif options[\"command\"] == \"render_pages\":\n self.render_pages(**options)\n\n def run_command(self, command):\n try:\n subprocess.run(command, check=True) # nosec\n except FileNotFoundError:\n print(f\"Could not find {command[0]} command\", file=self.stderr)\n except KeyboardInterrupt:\n pass\n except subprocess.CalledProcessError as e:\n raise CommandError(\"Error during command execution\", returncode=e.returncode) from e\n\n def compile(self, watch=False, fresh=False, **_options):\n static_directory = settings.STATICFILES_DIRS[0]\n command = [\n \"npx\",\n \"tsc\",\n \"--project\",\n os.path.join(static_directory, \"ts\", \"tsconfig.compile.json\"),\n ]\n\n if watch:\n command += [\"--watch\"]\n\n if fresh:\n try:\n os.remove(os.path.join(static_directory, \"ts\", \".tsbuildinfo.json\"))\n except FileNotFoundError:\n pass\n\n self.run_command(command)\n\n def test(self, **options):\n call_command(\"scss\")\n self.compile(**options)\n self.render_pages()\n self.run_command([\"npx\", \"jest\"])\n\n @staticmethod\n def render_pages(**_options):\n # Enable debug mode as otherwise a collectstatic beforehand would be necessary,\n # as missing static files would result into an error.\n test_runner = RenderPagesRunner(debug_mode=True)\n failed_tests = test_runner.run_tests([])\n if failed_tests > 0:\n raise CommandError(\"Failures during render_pages\")\n", "path": "evap/evaluation/management/commands/ts.py"}]}
1,850
349
gh_patches_debug_34206
rasdani/github-patches
git_diff
kymatio__kymatio-244
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> RELEASE requirements? Hi, When doing: ``` conda install pytorch torchvision -c pytorch pip install -i https://test.pypi.org/simple/ kymatio ``` then cloning the corresponding release, and doing `pytest`, I have tons of errors that are evidences that none of the requirements.txt packages are installed. Actually, this makes me think we should have a pip for the GPU version(like `pip install kymatio-gpu`)? https://github.com/kymatio/kymatio/blob/master/requirements.txt </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import csv 3 import importlib 4 import os 5 import shutil 6 import sys 7 from setuptools import setup, find_packages 8 9 # Constants 10 DISTNAME = 'kymatio' 11 DESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration' 12 URL = 'https://kymatio.github.io' 13 LICENSE = 'BSD-3-Clause' 14 15 16 # Parse description 17 with open('README.md') as f: 18 README = f.read().split('\n') 19 LONG_DESCRIPTION = '\n'.join([x for x in README if not x[:3]=='[![']) 20 21 22 # Parse version.py 23 kymatio_version_spec = importlib.util.spec_from_file_location( 24 'kymatio_version', 'kymatio/version.py') 25 kymatio_version_module = importlib.util.module_from_spec(kymatio_version_spec) 26 kymatio_version_spec.loader.exec_module(kymatio_version_module) 27 VERSION = kymatio_version_module.version 28 29 30 # Parse requirements.txt 31 with open('requirements.txt', 'r') as f: 32 REQUIREMENTS = f.read().split('\n') 33 34 35 setup_info = dict( 36 # Metadata 37 name=DISTNAME, 38 version=VERSION, 39 author=('Edouard Oyallon, Eugene Belilovsky, Sergey Zagoruyko, ' 40 'Michael Eickenberg, Mathieu Andreux, Georgios Exarchakis, ' 41 'Louis Thiry, Vincent Lostanlen, Joakim Andén, ' 42 'Tomás Angles, Gabriel Huang, Roberto Leonarduzzi'), 43 author_email=('[email protected], [email protected], ' 44 '[email protected], [email protected], ' 45 '[email protected], [email protected], ' 46 '[email protected], [email protected], [email protected], ' 47 '[email protected], [email protected], [email protected]'), 48 url=URL, 49 download_url='https://github.com/kymatio/kymatio/releases', 50 classifiers=['Intended Audience :: Education', 51 'Intended Audience :: Science/Research', 52 'License :: OSI Approved :: BSD License', 53 'Natural Language :: English', 54 'Operating System :: MacOS', 55 'Operating System :: Microsoft :: Windows', 56 'Operating System :: POSIX :: Linux', 57 'Programming Language :: Python :: 3.4', 58 'Programming Language :: Python :: 3.5', 59 'Programming Language :: Python :: 3.6', 60 'Programming Language :: Python :: 3.7', 61 'Programming Language :: Python :: 3.8', 62 'Topic :: Multimedia :: Graphics :: 3D Modeling', 63 'Topic :: Multimedia :: Sound/Audio :: Analysis', 64 'Topic :: Scientific/Engineering :: Artificial Intelligence', 65 'Topic :: Scientific/Engineering :: Chemistry', 66 'Topic :: Scientific/Engineering :: Image Recognition', 67 'Topic :: Scientific/Engineering :: Information Analysis', 68 'Topic :: Scientific/Engineering :: Mathematics', 69 'Topic :: Scientific/Engineering :: Physics', 70 'Topic :: Software Development :: Libraries :: Python Modules', 71 ], 72 description=DESCRIPTION, 73 long_description=LONG_DESCRIPTION, 74 long_description_content_type='text/markdown', 75 license=LICENSE, 76 packages=find_packages(exclude=('test',)), 77 install_requires=REQUIREMENTS, 78 zip_safe=True, 79 ) 80 81 setup(**setup_info) 82 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ # Constants DISTNAME = 'kymatio' DESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration' -URL = 'https://kymatio.github.io' +URL = 'https://www.kymat.io' LICENSE = 'BSD-3-Clause' @@ -47,15 +47,18 @@ '[email protected], [email protected], [email protected]'), url=URL, download_url='https://github.com/kymatio/kymatio/releases', + project_urls={ + 'Documentation': 'https://www.kymat.io/codereference.html', + 'Source': 'https://github.com/kymatio/kymatio/', + 'Tracker': 'https://github.com/kymatio/kymatio/issues', + 'Authors': 'https://github.com/kymatio/kymatio/blob/master/AUTHORS.md' + }, classifiers=['Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Operating System :: MacOS', - 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: Linux', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', @@ -72,6 +75,7 @@ description=DESCRIPTION, long_description=LONG_DESCRIPTION, long_description_content_type='text/markdown', + python_requires='>=3.6', license=LICENSE, packages=find_packages(exclude=('test',)), install_requires=REQUIREMENTS,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n # Constants\n DISTNAME = 'kymatio'\n DESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'\n-URL = 'https://kymatio.github.io'\n+URL = 'https://www.kymat.io'\n LICENSE = 'BSD-3-Clause'\n \n \n@@ -47,15 +47,18 @@\n '[email protected], [email protected], [email protected]'),\n url=URL,\n download_url='https://github.com/kymatio/kymatio/releases',\n+ project_urls={\n+ 'Documentation': 'https://www.kymat.io/codereference.html',\n+ 'Source': 'https://github.com/kymatio/kymatio/',\n+ 'Tracker': 'https://github.com/kymatio/kymatio/issues',\n+ 'Authors': 'https://github.com/kymatio/kymatio/blob/master/AUTHORS.md'\n+ },\n classifiers=['Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n- 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n- 'Programming Language :: Python :: 3.4',\n- 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n@@ -72,6 +75,7 @@\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n+ python_requires='>=3.6',\n license=LICENSE,\n packages=find_packages(exclude=('test',)),\n install_requires=REQUIREMENTS,\n", "issue": "RELEASE requirements?\nHi,\r\n\r\nWhen doing:\r\n\r\n```\r\nconda install pytorch torchvision -c pytorch\r\npip install -i https://test.pypi.org/simple/ kymatio\r\n```\r\n\r\nthen cloning the corresponding release, and doing `pytest`, I have tons of errors that are evidences that none of the requirements.txt packages are installed. Actually, this makes me think we should have a pip for the GPU version(like `pip install kymatio-gpu`)?\r\n\r\nhttps://github.com/kymatio/kymatio/blob/master/requirements.txt\n", "before_files": [{"content": "#!/usr/bin/env python\nimport csv\nimport importlib\nimport os\nimport shutil\nimport sys\nfrom setuptools import setup, find_packages\n\n# Constants\nDISTNAME = 'kymatio'\nDESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'\nURL = 'https://kymatio.github.io'\nLICENSE = 'BSD-3-Clause'\n\n\n# Parse description\nwith open('README.md') as f:\n README = f.read().split('\\n')\n LONG_DESCRIPTION = '\\n'.join([x for x in README if not x[:3]=='[!['])\n\n\n# Parse version.py\nkymatio_version_spec = importlib.util.spec_from_file_location(\n 'kymatio_version', 'kymatio/version.py')\nkymatio_version_module = importlib.util.module_from_spec(kymatio_version_spec)\nkymatio_version_spec.loader.exec_module(kymatio_version_module)\nVERSION = kymatio_version_module.version\n\n\n# Parse requirements.txt\nwith open('requirements.txt', 'r') as f:\n REQUIREMENTS = f.read().split('\\n')\n\n\nsetup_info = dict(\n # Metadata\n name=DISTNAME,\n version=VERSION,\n author=('Edouard Oyallon, Eugene Belilovsky, Sergey Zagoruyko, '\n 'Michael Eickenberg, Mathieu Andreux, Georgios Exarchakis, '\n 'Louis Thiry, Vincent Lostanlen, Joakim And\u00e9n, '\n 'Tom\u00e1s Angles, Gabriel Huang, Roberto Leonarduzzi'),\n author_email=('[email protected], [email protected], '\n '[email protected], [email protected], '\n '[email protected], [email protected], '\n '[email protected], [email protected], [email protected], '\n '[email protected], [email protected], [email protected]'),\n url=URL,\n download_url='https://github.com/kymatio/kymatio/releases',\n classifiers=['Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling',\n 'Topic :: Multimedia :: Sound/Audio :: Analysis',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Chemistry',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n license=LICENSE,\n packages=find_packages(exclude=('test',)),\n install_requires=REQUIREMENTS,\n zip_safe=True,\n)\n\nsetup(**setup_info)\n", "path": "setup.py"}]}
1,557
428
gh_patches_debug_27447
rasdani/github-patches
git_diff
bridgecrewio__checkov-1310
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CKV_AZURE_103 not accepting vsts_configuration as valid Git repository **Describe the bug** The rule CKV_AZURE_103 only accepts the Github configuration for Git source control for an Azure Data Factory instance. However, it is possible to configure a Git source control using the `vsts_configuration`. **To Reproduce** Steps to reproduce the behavior: Create the following resource ```hcl resource "azurerm_data_factory" "main" { # General name = "dummy-name" resource_group_name = azurerm_resource_group.primary.name location = azurerm_resource_group.primary.location # Azure DevOps vsts_configuration { account_name = var.account_name branch_name = var.branch_name project_name = var.project_name repository_name = var.repository_name root_folder = var.root_folder tenant_id = data.azurerm_client_config.current.tenant_id } } ``` **Expected behavior** Expected to accept both vsts_configuration and github_configuration as valid Git source control configurations. **Desktop (please complete the following information):** - OS: Ubuntu 20.04 LTS - Checkov Version 2.0.86 </issue> <code> [start of checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py] 1 from checkov.common.models.consts import ANY_VALUE 2 from checkov.common.models.enums import CheckCategories 3 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 4 5 6 class DataFactoryUsesGitRepository(BaseResourceValueCheck): 7 def __init__(self): 8 name = "Ensure that Azure Data Factory uses Git repository for source control" 9 id = "CKV_AZURE_103" 10 supported_resources = ['azurerm_data_factory'] 11 categories = [CheckCategories.GENERAL_SECURITY] 12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 13 14 def get_inspected_key(self): 15 return "github_configuration/[0]/repository_name" 16 17 def get_expected_value(self): 18 return ANY_VALUE 19 20 21 check = DataFactoryUsesGitRepository() 22 [end of checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py b/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py --- a/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py +++ b/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py @@ -1,21 +1,25 @@ -from checkov.common.models.consts import ANY_VALUE -from checkov.common.models.enums import CheckCategories -from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck +from typing import Dict, List, Any +from checkov.common.models.enums import CheckCategories, CheckResult +from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck -class DataFactoryUsesGitRepository(BaseResourceValueCheck): - def __init__(self): + +class DataFactoryUsesGitRepository(BaseResourceCheck): + def __init__(self) -> None: name = "Ensure that Azure Data Factory uses Git repository for source control" id = "CKV_AZURE_103" - supported_resources = ['azurerm_data_factory'] + supported_resources = ["azurerm_data_factory"] categories = [CheckCategories.GENERAL_SECURITY] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - def get_inspected_key(self): - return "github_configuration/[0]/repository_name" - - def get_expected_value(self): - return ANY_VALUE + def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult: + github = conf.get("github_configuration", [{}])[0] + if github.get("repository_name"): + return CheckResult.PASSED + vsts = conf.get("vsts_configuration", [{}])[0] + if vsts.get("repository_name"): + return CheckResult.PASSED + return CheckResult.FAILED check = DataFactoryUsesGitRepository()
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py b/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py\n--- a/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py\n+++ b/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py\n@@ -1,21 +1,25 @@\n-from checkov.common.models.consts import ANY_VALUE\n-from checkov.common.models.enums import CheckCategories\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n+from typing import Dict, List, Any\n \n+from checkov.common.models.enums import CheckCategories, CheckResult\n+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n-class DataFactoryUsesGitRepository(BaseResourceValueCheck):\n- def __init__(self):\n+\n+class DataFactoryUsesGitRepository(BaseResourceCheck):\n+ def __init__(self) -> None:\n name = \"Ensure that Azure Data Factory uses Git repository for source control\"\n id = \"CKV_AZURE_103\"\n- supported_resources = ['azurerm_data_factory']\n+ supported_resources = [\"azurerm_data_factory\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def get_inspected_key(self):\n- return \"github_configuration/[0]/repository_name\"\n-\n- def get_expected_value(self):\n- return ANY_VALUE\n+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n+ github = conf.get(\"github_configuration\", [{}])[0]\n+ if github.get(\"repository_name\"):\n+ return CheckResult.PASSED\n+ vsts = conf.get(\"vsts_configuration\", [{}])[0]\n+ if vsts.get(\"repository_name\"):\n+ return CheckResult.PASSED\n+ return CheckResult.FAILED\n \n \n check = DataFactoryUsesGitRepository()\n", "issue": "CKV_AZURE_103 not accepting vsts_configuration as valid Git repository\n**Describe the bug**\r\nThe rule CKV_AZURE_103 only accepts the Github configuration for Git source control for an Azure Data Factory instance. However, it is possible to configure a Git source control using the `vsts_configuration`. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nCreate the following resource\r\n```hcl\r\nresource \"azurerm_data_factory\" \"main\" {\r\n\r\n # General\r\n name = \"dummy-name\"\r\n resource_group_name = azurerm_resource_group.primary.name\r\n location = azurerm_resource_group.primary.location\r\n\r\n # Azure DevOps\r\n vsts_configuration {\r\n account_name = var.account_name\r\n branch_name = var.branch_name\r\n project_name = var.project_name\r\n repository_name = var.repository_name\r\n root_folder = var.root_folder\r\n tenant_id = data.azurerm_client_config.current.tenant_id\r\n }\r\n\r\n}\r\n```\r\n\r\n**Expected behavior**\r\nExpected to accept both vsts_configuration and github_configuration as valid Git source control configurations.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Ubuntu 20.04 LTS\r\n - Checkov Version 2.0.86\n", "before_files": [{"content": "from checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass DataFactoryUsesGitRepository(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Azure Data Factory uses Git repository for source control\"\n id = \"CKV_AZURE_103\"\n supported_resources = ['azurerm_data_factory']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"github_configuration/[0]/repository_name\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = DataFactoryUsesGitRepository()\n", "path": "checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py"}]}
1,037
437
gh_patches_debug_37684
rasdani/github-patches
git_diff
pulp__pulpcore-4335
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Need a way to directly migrate pulp_labels from 3.21 to 3.28 **Is your feature request related to a problem? Please describe.** As RHUI developers, we are heavy users of pulp_labels on repository objects. As the current 4.5 release is on pulpcore 3.21, we would like our next release (4.6) to be based on pulpcore 3.28, and we need to ensure a smooth upgrade path for our customers. As it is currently laid out, the 3.21 to 3.28 migration path requires an intermediate step (<3.25) where `pulpcore-manager datarepair-labels` command is available. **Describe the solution you'd like** The ideal solution would be to make Django migrations take care of that automatically, without requiring an intermediate step/release. Since we live in non-ideal world, a solution where an additional command like `datarepair-labels` is available would also suffice. **Describe alternatives you've considered** We have considered an intermediate release of RHUI 4.6 with pulpcore 3.22 followed by RHUI 4.7 built on 3.28, but it would create a lot of migration issues for our customers, with many of them still running RHUI 4.0. </issue> <code> [start of pulpcore/app/migrations/0104_delete_label.py] 1 # Generated by Django 3.2.18 on 2023-05-04 07:06 2 3 from django.db import migrations 4 5 6 def check_no_existing_labels(apps, schema_editor): 7 Label = apps.get_model("core", "Label") 8 if Label.objects.exists(): 9 raise RuntimeError( 10 "There are remaining labels. Please revert to pulpcore<3.25 and make sure all labels are properly mirgated or deleted." 11 ) 12 13 14 class Migration(migrations.Migration): 15 16 dependencies = [ 17 ('core', '0103_alter_export_task'), 18 ] 19 20 operations = [ 21 migrations.RunPython( 22 code=check_no_existing_labels, 23 reverse_code=migrations.RunPython.noop, 24 elidable=True, 25 ), 26 migrations.DeleteModel( 27 name='Label', 28 ), 29 ] 30 [end of pulpcore/app/migrations/0104_delete_label.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pulpcore/app/migrations/0104_delete_label.py b/pulpcore/app/migrations/0104_delete_label.py --- a/pulpcore/app/migrations/0104_delete_label.py +++ b/pulpcore/app/migrations/0104_delete_label.py @@ -1,6 +1,45 @@ # Generated by Django 3.2.18 on 2023-05-04 07:06 from django.db import migrations +from django.db.models.expressions import OuterRef, RawSQL + + +def migrate_remaining_labels(apps, schema_editor): + """ + This data migration handles the "but what about plugins" problem noted in the issue [0], with only two caveats: + + Case 1: If there were to exist a plugin containing a Model whose model-name ended in (for example) "Repository", + that was NOT a detail-model of a Repository master-model, AND that plugin allowed Labels for such a model - then, + upon running this migration, those Labels would be lost. + + Case 2: If there were to exist a plugin containing a Model that was a Detail of (for example) Repository, + but named something like "PluginRepositoryButWhy", and that plugin allowed Labels, + and instances of such a Model had Labels associated with them - then this migration would fail, + because the Labels would not be found, migrated, and deleted, and the old-Label table would not be able to be dropped. + + And the plugins described above would have to have existed and been in use with pulpcore/3.21,only - + if they appeared with core/3.22, they'd be using new-Labels and all would be (already) well. + + No such plugins/Models exist, to the best of our knowledge. + + [0] https://github.com/pulp/pulpcore/issues/4319 + """ + Label = apps.get_model("core", "Label") + Repository = apps.get_model("core", "Repository") + Remote = apps.get_model("core", "Remote") + Distribution = apps.get_model("core", "Distribution") + ContentType = apps.get_model("contenttypes", "ContentType") + + for master_model, model_name in [(Repository, "repository"), (Remote, "remote"), (Distribution, "distribution")]: + detail_ctypes = ContentType.objects.filter(app_label__ne="core", model__endswith=model_name) + affected_ids = Label.objects.filter(content_type__in=detail_ctypes).values("object_id").distinct() + label_subq = Label.objects.filter( + content_type__in=detail_ctypes, object_id=OuterRef("pulp_id") + ).annotate( + label_data=RawSQL("hstore(array_agg(key), array_agg(value))", []) + ).values("label_data") + master_model.objects.filter(pulp_id__in=affected_ids).update(pulp_labels=label_subq) + Label.objects.filter(content_type__in=detail_ctypes).delete() def check_no_existing_labels(apps, schema_editor): @@ -18,11 +57,20 @@ ] operations = [ + migrations.RunPython( + code=migrate_remaining_labels, + reverse_code=migrations.RunPython.noop, + elidable=True, + ), migrations.RunPython( code=check_no_existing_labels, reverse_code=migrations.RunPython.noop, elidable=True, ), + migrations.RunSQL( + sql="SET CONSTRAINTS ALL IMMEDIATE;", + reverse_sql="", + ), migrations.DeleteModel( name='Label', ),
{"golden_diff": "diff --git a/pulpcore/app/migrations/0104_delete_label.py b/pulpcore/app/migrations/0104_delete_label.py\n--- a/pulpcore/app/migrations/0104_delete_label.py\n+++ b/pulpcore/app/migrations/0104_delete_label.py\n@@ -1,6 +1,45 @@\n # Generated by Django 3.2.18 on 2023-05-04 07:06\n \n from django.db import migrations\n+from django.db.models.expressions import OuterRef, RawSQL\n+\n+\n+def migrate_remaining_labels(apps, schema_editor):\n+ \"\"\"\n+ This data migration handles the \"but what about plugins\" problem noted in the issue [0], with only two caveats:\n+\n+ Case 1: If there were to exist a plugin containing a Model whose model-name ended in (for example) \"Repository\",\n+ that was NOT a detail-model of a Repository master-model, AND that plugin allowed Labels for such a model - then,\n+ upon running this migration, those Labels would be lost.\n+\n+ Case 2: If there were to exist a plugin containing a Model that was a Detail of (for example) Repository,\n+ but named something like \"PluginRepositoryButWhy\", and that plugin allowed Labels,\n+ and instances of such a Model had Labels associated with them - then this migration would fail,\n+ because the Labels would not be found, migrated, and deleted, and the old-Label table would not be able to be dropped.\n+\n+ And the plugins described above would have to have existed and been in use with pulpcore/3.21,only -\n+ if they appeared with core/3.22, they'd be using new-Labels and all would be (already) well.\n+\n+ No such plugins/Models exist, to the best of our knowledge.\n+\n+ [0] https://github.com/pulp/pulpcore/issues/4319\n+ \"\"\"\n+ Label = apps.get_model(\"core\", \"Label\")\n+ Repository = apps.get_model(\"core\", \"Repository\")\n+ Remote = apps.get_model(\"core\", \"Remote\")\n+ Distribution = apps.get_model(\"core\", \"Distribution\")\n+ ContentType = apps.get_model(\"contenttypes\", \"ContentType\")\n+\n+ for master_model, model_name in [(Repository, \"repository\"), (Remote, \"remote\"), (Distribution, \"distribution\")]:\n+ detail_ctypes = ContentType.objects.filter(app_label__ne=\"core\", model__endswith=model_name)\n+ affected_ids = Label.objects.filter(content_type__in=detail_ctypes).values(\"object_id\").distinct()\n+ label_subq = Label.objects.filter(\n+ content_type__in=detail_ctypes, object_id=OuterRef(\"pulp_id\")\n+ ).annotate(\n+ label_data=RawSQL(\"hstore(array_agg(key), array_agg(value))\", [])\n+ ).values(\"label_data\")\n+ master_model.objects.filter(pulp_id__in=affected_ids).update(pulp_labels=label_subq)\n+ Label.objects.filter(content_type__in=detail_ctypes).delete()\n \n \n def check_no_existing_labels(apps, schema_editor):\n@@ -18,11 +57,20 @@\n ]\n \n operations = [\n+ migrations.RunPython(\n+ code=migrate_remaining_labels,\n+ reverse_code=migrations.RunPython.noop,\n+ elidable=True,\n+ ),\n migrations.RunPython(\n code=check_no_existing_labels,\n reverse_code=migrations.RunPython.noop,\n elidable=True,\n ),\n+ migrations.RunSQL(\n+ sql=\"SET CONSTRAINTS ALL IMMEDIATE;\",\n+ reverse_sql=\"\",\n+ ),\n migrations.DeleteModel(\n name='Label',\n ),\n", "issue": "Need a way to directly migrate pulp_labels from 3.21 to 3.28\n**Is your feature request related to a problem? Please describe.**\r\nAs RHUI developers, we are heavy users of pulp_labels on repository objects. As the current 4.5 release is on pulpcore 3.21, we would like our next release (4.6) to be based on pulpcore 3.28, and we need to ensure a smooth upgrade path for our customers. As it is currently laid out, the 3.21 to 3.28 migration path requires an intermediate step (<3.25) where `pulpcore-manager datarepair-labels` command is available.\r\n\r\n**Describe the solution you'd like**\r\nThe ideal solution would be to make Django migrations take care of that automatically, without requiring an intermediate step/release. Since we live in non-ideal world, a solution where an additional command like `datarepair-labels` is available would also suffice.\r\n\r\n**Describe alternatives you've considered**\r\nWe have considered an intermediate release of RHUI 4.6 with pulpcore 3.22 followed by RHUI 4.7 built on 3.28, but it would create a lot of migration issues for our customers, with many of them still running RHUI 4.0.\r\n\r\n\n", "before_files": [{"content": "# Generated by Django 3.2.18 on 2023-05-04 07:06\n\nfrom django.db import migrations\n\n\ndef check_no_existing_labels(apps, schema_editor):\n Label = apps.get_model(\"core\", \"Label\")\n if Label.objects.exists():\n raise RuntimeError(\n \"There are remaining labels. Please revert to pulpcore<3.25 and make sure all labels are properly mirgated or deleted.\"\n )\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0103_alter_export_task'),\n ]\n\n operations = [\n migrations.RunPython(\n code=check_no_existing_labels,\n reverse_code=migrations.RunPython.noop,\n elidable=True,\n ),\n migrations.DeleteModel(\n name='Label',\n ),\n ]\n", "path": "pulpcore/app/migrations/0104_delete_label.py"}]}
1,070
815
gh_patches_debug_11491
rasdani/github-patches
git_diff
scikit-hep__pyhf-444
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pyhf commandline tools requires uproot extra # Description just issuing `pyhf --help` requires the uproot extra since `commandline.py` imports `writexml`. # Expected Behavior I don't need uproot if I don't want to use json2xml or xml2json # Actual Behavior I can't use `pyhf` without installing uproot. # Steps to Reproduce install master and run pyhf # Checklist - [x] Run `git fetch` to get the most up to date version of `master` - [x] Searched through existing Issues to confirm this is not a duplicate issue - [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue </issue> <code> [start of pyhf/commandline.py] 1 import logging 2 3 import click 4 import json 5 import os 6 7 from . import writexml 8 from .utils import hypotest 9 from .pdf import Workspace 10 from .version import __version__ 11 12 logging.basicConfig() 13 log = logging.getLogger(__name__) 14 15 # This is only needed for Python 2/3 compatibility 16 def ensure_dirs(path): 17 try: 18 os.makedirs(path, exist_ok=True) 19 except TypeError: 20 if not os.path.exists(path): 21 os.makedirs(path) 22 23 24 @click.group(context_settings=dict(help_option_names=['-h', '--help'])) 25 @click.version_option(version=__version__) 26 def pyhf(): 27 pass 28 29 30 @pyhf.command() 31 @click.argument('entrypoint-xml', type=click.Path(exists=True)) 32 @click.option( 33 '--basedir', 34 help='The base directory for the XML files to point relative to.', 35 type=click.Path(exists=True), 36 default=os.getcwd(), 37 ) 38 @click.option( 39 '--output-file', 40 help='The location of the output json file. If not specified, prints to screen.', 41 default=None, 42 ) 43 @click.option('--track-progress/--hide-progress', default=True) 44 def xml2json(entrypoint_xml, basedir, output_file, track_progress): 45 """ Entrypoint XML: The top-level XML file for the PDF definition. """ 46 try: 47 import uproot 48 49 assert uproot 50 except ImportError: 51 log.error( 52 "xml2json requires uproot, please install pyhf using the " 53 "xmlio extra: pip install pyhf[xmlio] or install uproot " 54 "manually: pip install uproot" 55 ) 56 from . import readxml 57 58 spec = readxml.parse(entrypoint_xml, basedir, track_progress=track_progress) 59 if output_file is None: 60 print(json.dumps(spec, indent=4, sort_keys=True)) 61 else: 62 with open(output_file, 'w+') as out_file: 63 json.dump(spec, out_file, indent=4, sort_keys=True) 64 log.debug("Written to {0:s}".format(output_file)) 65 66 67 @pyhf.command() 68 @click.argument('workspace', default='-') 69 @click.option('--output-dir', type=click.Path(exists=True), default='.') 70 @click.option('--specroot', default='config') 71 @click.option('--dataroot', default='data') 72 @click.option('--resultprefix', default='FitConfig') 73 def json2xml(workspace, output_dir, specroot, dataroot, resultprefix): 74 try: 75 import uproot 76 77 assert uproot 78 except ImportError: 79 log.error( 80 "json2xml requires uproot, please install pyhf using the " 81 "xmlio extra: pip install pyhf[xmlio] or install uproot " 82 "manually: pip install uproot" 83 ) 84 85 ensure_dirs(output_dir) 86 with click.open_file(workspace, 'r') as specstream: 87 d = json.load(specstream) 88 ensure_dirs(os.path.join(output_dir, specroot)) 89 ensure_dirs(os.path.join(output_dir, dataroot)) 90 with click.open_file( 91 os.path.join(output_dir, '{0:s}.xml'.format(resultprefix)), 'w' 92 ) as outstream: 93 outstream.write( 94 writexml.writexml( 95 d, 96 os.path.join(output_dir, specroot), 97 os.path.join(output_dir, dataroot), 98 resultprefix, 99 ).decode('utf-8') 100 ) 101 102 103 @pyhf.command() 104 @click.argument('workspace', default='-') 105 @click.option( 106 '--output-file', 107 help='The location of the output json file. If not specified, prints to screen.', 108 default=None, 109 ) 110 @click.option('--measurement', default=None) 111 @click.option('-p', '--patch', multiple=True) 112 @click.option('--testpoi', default=1.0) 113 def cls(workspace, output_file, measurement, patch, testpoi): 114 with click.open_file(workspace, 'r') as specstream: 115 wspec = json.load(specstream) 116 117 w = Workspace(wspec) 118 119 patches = [json.loads(click.open_file(pfile, 'r').read()) for pfile in patch] 120 p = w.model(measurement_name=measurement, patches=patches) 121 result = hypotest(testpoi, w.data(p), p, return_expected_set=True) 122 result = {'CLs_obs': result[0].tolist()[0], 'CLs_exp': result[-1].ravel().tolist()} 123 if output_file is None: 124 print(json.dumps(result, indent=4, sort_keys=True)) 125 else: 126 with open(output_file, 'w+') as out_file: 127 json.dump(result, out_file, indent=4, sort_keys=True) 128 log.debug("Written to {0:s}".format(output_file)) 129 [end of pyhf/commandline.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyhf/commandline.py b/pyhf/commandline.py --- a/pyhf/commandline.py +++ b/pyhf/commandline.py @@ -4,7 +4,6 @@ import json import os -from . import writexml from .utils import hypotest from .pdf import Workspace from .version import __version__ @@ -81,6 +80,7 @@ "xmlio extra: pip install pyhf[xmlio] or install uproot " "manually: pip install uproot" ) + from . import writexml ensure_dirs(output_dir) with click.open_file(workspace, 'r') as specstream:
{"golden_diff": "diff --git a/pyhf/commandline.py b/pyhf/commandline.py\n--- a/pyhf/commandline.py\n+++ b/pyhf/commandline.py\n@@ -4,7 +4,6 @@\n import json\n import os\n \n-from . import writexml\n from .utils import hypotest\n from .pdf import Workspace\n from .version import __version__\n@@ -81,6 +80,7 @@\n \"xmlio extra: pip install pyhf[xmlio] or install uproot \"\n \"manually: pip install uproot\"\n )\n+ from . import writexml\n \n ensure_dirs(output_dir)\n with click.open_file(workspace, 'r') as specstream:\n", "issue": "pyhf commandline tools requires uproot extra\n# Description\r\n\r\njust issuing `pyhf --help` requires the uproot extra since `commandline.py` imports `writexml`.\r\n\r\n# Expected Behavior\r\n\r\nI don't need uproot if I don't want to use json2xml or xml2json\r\n\r\n# Actual Behavior\r\n\r\nI can't use `pyhf` without installing uproot.\r\n\r\n# Steps to Reproduce\r\n\r\ninstall master and run pyhf\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "import logging\n\nimport click\nimport json\nimport os\n\nfrom . import writexml\nfrom .utils import hypotest\nfrom .pdf import Workspace\nfrom .version import __version__\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n# This is only needed for Python 2/3 compatibility\ndef ensure_dirs(path):\n try:\n os.makedirs(path, exist_ok=True)\n except TypeError:\n if not os.path.exists(path):\n os.makedirs(path)\n\n\[email protected](context_settings=dict(help_option_names=['-h', '--help']))\[email protected]_option(version=__version__)\ndef pyhf():\n pass\n\n\[email protected]()\[email protected]('entrypoint-xml', type=click.Path(exists=True))\[email protected](\n '--basedir',\n help='The base directory for the XML files to point relative to.',\n type=click.Path(exists=True),\n default=os.getcwd(),\n)\[email protected](\n '--output-file',\n help='The location of the output json file. If not specified, prints to screen.',\n default=None,\n)\[email protected]('--track-progress/--hide-progress', default=True)\ndef xml2json(entrypoint_xml, basedir, output_file, track_progress):\n \"\"\" Entrypoint XML: The top-level XML file for the PDF definition. \"\"\"\n try:\n import uproot\n\n assert uproot\n except ImportError:\n log.error(\n \"xml2json requires uproot, please install pyhf using the \"\n \"xmlio extra: pip install pyhf[xmlio] or install uproot \"\n \"manually: pip install uproot\"\n )\n from . import readxml\n\n spec = readxml.parse(entrypoint_xml, basedir, track_progress=track_progress)\n if output_file is None:\n print(json.dumps(spec, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(spec, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n\n\[email protected]()\[email protected]('workspace', default='-')\[email protected]('--output-dir', type=click.Path(exists=True), default='.')\[email protected]('--specroot', default='config')\[email protected]('--dataroot', default='data')\[email protected]('--resultprefix', default='FitConfig')\ndef json2xml(workspace, output_dir, specroot, dataroot, resultprefix):\n try:\n import uproot\n\n assert uproot\n except ImportError:\n log.error(\n \"json2xml requires uproot, please install pyhf using the \"\n \"xmlio extra: pip install pyhf[xmlio] or install uproot \"\n \"manually: pip install uproot\"\n )\n\n ensure_dirs(output_dir)\n with click.open_file(workspace, 'r') as specstream:\n d = json.load(specstream)\n ensure_dirs(os.path.join(output_dir, specroot))\n ensure_dirs(os.path.join(output_dir, dataroot))\n with click.open_file(\n os.path.join(output_dir, '{0:s}.xml'.format(resultprefix)), 'w'\n ) as outstream:\n outstream.write(\n writexml.writexml(\n d,\n os.path.join(output_dir, specroot),\n os.path.join(output_dir, dataroot),\n resultprefix,\n ).decode('utf-8')\n )\n\n\[email protected]()\[email protected]('workspace', default='-')\[email protected](\n '--output-file',\n help='The location of the output json file. If not specified, prints to screen.',\n default=None,\n)\[email protected]('--measurement', default=None)\[email protected]('-p', '--patch', multiple=True)\[email protected]('--testpoi', default=1.0)\ndef cls(workspace, output_file, measurement, patch, testpoi):\n with click.open_file(workspace, 'r') as specstream:\n wspec = json.load(specstream)\n\n w = Workspace(wspec)\n\n patches = [json.loads(click.open_file(pfile, 'r').read()) for pfile in patch]\n p = w.model(measurement_name=measurement, patches=patches)\n result = hypotest(testpoi, w.data(p), p, return_expected_set=True)\n result = {'CLs_obs': result[0].tolist()[0], 'CLs_exp': result[-1].ravel().tolist()}\n if output_file is None:\n print(json.dumps(result, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(result, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n", "path": "pyhf/commandline.py"}]}
2,011
149
gh_patches_debug_1273
rasdani/github-patches
git_diff
ivy-llc__ivy-17162
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> is_integer </issue> <code> [start of ivy/functional/frontends/paddle/tensor/attribute.py] 1 # global 2 import ivy 3 from ivy.functional.frontends.paddle.func_wrapper import ( 4 to_ivy_arrays_and_back, 5 ) 6 7 8 @to_ivy_arrays_and_back 9 def is_complex(x): 10 return ivy.is_complex_dtype(x) 11 12 13 @to_ivy_arrays_and_back 14 def is_floating_point(x): 15 return ivy.is_float_dtype(x) 16 [end of ivy/functional/frontends/paddle/tensor/attribute.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/paddle/tensor/attribute.py b/ivy/functional/frontends/paddle/tensor/attribute.py --- a/ivy/functional/frontends/paddle/tensor/attribute.py +++ b/ivy/functional/frontends/paddle/tensor/attribute.py @@ -10,6 +10,11 @@ return ivy.is_complex_dtype(x) +@to_ivy_arrays_and_back +def is_integer(x): + return ivy.is_int_dtype(x) + + @to_ivy_arrays_and_back def is_floating_point(x): return ivy.is_float_dtype(x)
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/attribute.py b/ivy/functional/frontends/paddle/tensor/attribute.py\n--- a/ivy/functional/frontends/paddle/tensor/attribute.py\n+++ b/ivy/functional/frontends/paddle/tensor/attribute.py\n@@ -10,6 +10,11 @@\n return ivy.is_complex_dtype(x)\n \n \n+@to_ivy_arrays_and_back\n+def is_integer(x):\n+ return ivy.is_int_dtype(x)\n+\n+\n @to_ivy_arrays_and_back\n def is_floating_point(x):\n return ivy.is_float_dtype(x)\n", "issue": "is_integer\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef is_complex(x):\n return ivy.is_complex_dtype(x)\n\n\n@to_ivy_arrays_and_back\ndef is_floating_point(x):\n return ivy.is_float_dtype(x)\n", "path": "ivy/functional/frontends/paddle/tensor/attribute.py"}]}
656
139
gh_patches_debug_28376
rasdani/github-patches
git_diff
iterative__dvc-4075
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Implement `--no-exec` option for `import-url` command `dvc import-url` creates new `.dvc` file, just as `dvc run`. Sometimes files which would be imported are already present locally and it's quite inconvenient that they should be downloaded again in order to create a pipeline step. Because of that it would be great to add `--no-exec` option: we create pipeline step, then use `dvc commit` to update its md5 with already downloaded file. </issue> <code> [start of dvc/repo/imp_url.py] 1 import os 2 3 from dvc.repo.scm_context import scm_context 4 from dvc.utils import relpath, resolve_output, resolve_paths 5 from dvc.utils.fs import path_isin 6 7 from ..exceptions import OutputDuplicationError 8 from . import locked 9 10 11 @locked 12 @scm_context 13 def imp_url(self, url, out=None, fname=None, erepo=None, frozen=True): 14 from dvc.dvcfile import Dvcfile 15 from dvc.stage import Stage, create_stage 16 17 out = resolve_output(url, out) 18 path, wdir, out = resolve_paths(self, out) 19 20 # NOTE: when user is importing something from within their own repository 21 if ( 22 erepo is None 23 and os.path.exists(url) 24 and path_isin(os.path.abspath(url), self.root_dir) 25 ): 26 url = relpath(url, wdir) 27 28 stage = create_stage( 29 Stage, 30 self, 31 fname or path, 32 wdir=wdir, 33 deps=[url], 34 outs=[out], 35 erepo=erepo, 36 ) 37 38 if stage is None: 39 return None 40 41 dvcfile = Dvcfile(self, stage.path) 42 dvcfile.remove() 43 44 try: 45 self.check_modified_graph([stage]) 46 except OutputDuplicationError as exc: 47 raise OutputDuplicationError(exc.output, set(exc.stages) - {stage}) 48 49 stage.run() 50 51 stage.frozen = frozen 52 53 dvcfile.dump(stage) 54 55 return stage 56 [end of dvc/repo/imp_url.py] [start of dvc/command/imp_url.py] 1 import argparse 2 import logging 3 4 from dvc.command import completion 5 from dvc.command.base import CmdBase, append_doc_link 6 from dvc.exceptions import DvcException 7 8 logger = logging.getLogger(__name__) 9 10 11 class CmdImportUrl(CmdBase): 12 def run(self): 13 try: 14 self.repo.imp_url( 15 self.args.url, out=self.args.out, fname=self.args.file 16 ) 17 except DvcException: 18 logger.exception( 19 "failed to import {}. You could also try downloading " 20 "it manually, and adding it with `dvc add`.".format( 21 self.args.url 22 ) 23 ) 24 return 1 25 return 0 26 27 28 def add_parser(subparsers, parent_parser): 29 IMPORT_HELP = ( 30 "Download or copy file from URL and take it under DVC control." 31 ) 32 33 import_parser = subparsers.add_parser( 34 "import-url", 35 parents=[parent_parser], 36 description=append_doc_link(IMPORT_HELP, "import-url"), 37 help=IMPORT_HELP, 38 formatter_class=argparse.RawTextHelpFormatter, 39 ) 40 import_parser.add_argument( 41 "url", 42 help="Supported urls:\n" 43 "/path/to/file\n" 44 "/path/to/directory\n" 45 "C:\\\\path\\to\\file\n" 46 "C:\\\\path\\to\\directory\n" 47 "https://example.com/path/to/file\n" 48 "s3://bucket/path/to/file\n" 49 "s3://bucket/path/to/directory\n" 50 "gs://bucket/path/to/file\n" 51 "gs://bucket/path/to/directory\n" 52 "hdfs://example.com/path/to/file\n" 53 "ssh://example.com:/path/to/file\n" 54 "ssh://example.com:/path/to/directory\n" 55 "remote://myremote/path/to/file (see `dvc remote`)", 56 ) 57 import_parser.add_argument( 58 "out", 59 nargs="?", 60 help="Destination path to put files to.", 61 choices=completion.Optional.DIR, 62 ) 63 import_parser.add_argument( 64 "--file", 65 help="Specify name of the DVC-file this command will generate.", 66 metavar="<filename>", 67 choices=completion.Optional.DIR, 68 ) 69 import_parser.set_defaults(func=CmdImportUrl) 70 [end of dvc/command/imp_url.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/command/imp_url.py b/dvc/command/imp_url.py --- a/dvc/command/imp_url.py +++ b/dvc/command/imp_url.py @@ -12,7 +12,10 @@ def run(self): try: self.repo.imp_url( - self.args.url, out=self.args.out, fname=self.args.file + self.args.url, + out=self.args.out, + fname=self.args.file, + no_exec=self.args.no_exec, ) except DvcException: logger.exception( @@ -66,4 +69,10 @@ metavar="<filename>", choices=completion.Optional.DIR, ) + import_parser.add_argument( + "--no-exec", + action="store_true", + default=False, + help="Only create stage file without actually download it.", + ) import_parser.set_defaults(func=CmdImportUrl) diff --git a/dvc/repo/imp_url.py b/dvc/repo/imp_url.py --- a/dvc/repo/imp_url.py +++ b/dvc/repo/imp_url.py @@ -10,7 +10,9 @@ @locked @scm_context -def imp_url(self, url, out=None, fname=None, erepo=None, frozen=True): +def imp_url( + self, url, out=None, fname=None, erepo=None, frozen=True, no_exec=False +): from dvc.dvcfile import Dvcfile from dvc.stage import Stage, create_stage @@ -46,7 +48,10 @@ except OutputDuplicationError as exc: raise OutputDuplicationError(exc.output, set(exc.stages) - {stage}) - stage.run() + if no_exec: + stage.ignore_outs() + else: + stage.run() stage.frozen = frozen
{"golden_diff": "diff --git a/dvc/command/imp_url.py b/dvc/command/imp_url.py\n--- a/dvc/command/imp_url.py\n+++ b/dvc/command/imp_url.py\n@@ -12,7 +12,10 @@\n def run(self):\n try:\n self.repo.imp_url(\n- self.args.url, out=self.args.out, fname=self.args.file\n+ self.args.url,\n+ out=self.args.out,\n+ fname=self.args.file,\n+ no_exec=self.args.no_exec,\n )\n except DvcException:\n logger.exception(\n@@ -66,4 +69,10 @@\n metavar=\"<filename>\",\n choices=completion.Optional.DIR,\n )\n+ import_parser.add_argument(\n+ \"--no-exec\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"Only create stage file without actually download it.\",\n+ )\n import_parser.set_defaults(func=CmdImportUrl)\ndiff --git a/dvc/repo/imp_url.py b/dvc/repo/imp_url.py\n--- a/dvc/repo/imp_url.py\n+++ b/dvc/repo/imp_url.py\n@@ -10,7 +10,9 @@\n \n @locked\n @scm_context\n-def imp_url(self, url, out=None, fname=None, erepo=None, frozen=True):\n+def imp_url(\n+ self, url, out=None, fname=None, erepo=None, frozen=True, no_exec=False\n+):\n from dvc.dvcfile import Dvcfile\n from dvc.stage import Stage, create_stage\n \n@@ -46,7 +48,10 @@\n except OutputDuplicationError as exc:\n raise OutputDuplicationError(exc.output, set(exc.stages) - {stage})\n \n- stage.run()\n+ if no_exec:\n+ stage.ignore_outs()\n+ else:\n+ stage.run()\n \n stage.frozen = frozen\n", "issue": "Implement `--no-exec` option for `import-url` command\n`dvc import-url` creates new `.dvc` file, just as `dvc run`. Sometimes files which would be imported are already present locally and it's quite inconvenient that they should be downloaded again in order to create a pipeline step.\r\n\r\nBecause of that it would be great to add `--no-exec` option: we create pipeline step, then use `dvc commit` to update its md5 with already downloaded file.\n", "before_files": [{"content": "import os\n\nfrom dvc.repo.scm_context import scm_context\nfrom dvc.utils import relpath, resolve_output, resolve_paths\nfrom dvc.utils.fs import path_isin\n\nfrom ..exceptions import OutputDuplicationError\nfrom . import locked\n\n\n@locked\n@scm_context\ndef imp_url(self, url, out=None, fname=None, erepo=None, frozen=True):\n from dvc.dvcfile import Dvcfile\n from dvc.stage import Stage, create_stage\n\n out = resolve_output(url, out)\n path, wdir, out = resolve_paths(self, out)\n\n # NOTE: when user is importing something from within their own repository\n if (\n erepo is None\n and os.path.exists(url)\n and path_isin(os.path.abspath(url), self.root_dir)\n ):\n url = relpath(url, wdir)\n\n stage = create_stage(\n Stage,\n self,\n fname or path,\n wdir=wdir,\n deps=[url],\n outs=[out],\n erepo=erepo,\n )\n\n if stage is None:\n return None\n\n dvcfile = Dvcfile(self, stage.path)\n dvcfile.remove()\n\n try:\n self.check_modified_graph([stage])\n except OutputDuplicationError as exc:\n raise OutputDuplicationError(exc.output, set(exc.stages) - {stage})\n\n stage.run()\n\n stage.frozen = frozen\n\n dvcfile.dump(stage)\n\n return stage\n", "path": "dvc/repo/imp_url.py"}, {"content": "import argparse\nimport logging\n\nfrom dvc.command import completion\nfrom dvc.command.base import CmdBase, append_doc_link\nfrom dvc.exceptions import DvcException\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdImportUrl(CmdBase):\n def run(self):\n try:\n self.repo.imp_url(\n self.args.url, out=self.args.out, fname=self.args.file\n )\n except DvcException:\n logger.exception(\n \"failed to import {}. You could also try downloading \"\n \"it manually, and adding it with `dvc add`.\".format(\n self.args.url\n )\n )\n return 1\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n IMPORT_HELP = (\n \"Download or copy file from URL and take it under DVC control.\"\n )\n\n import_parser = subparsers.add_parser(\n \"import-url\",\n parents=[parent_parser],\n description=append_doc_link(IMPORT_HELP, \"import-url\"),\n help=IMPORT_HELP,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n import_parser.add_argument(\n \"url\",\n help=\"Supported urls:\\n\"\n \"/path/to/file\\n\"\n \"/path/to/directory\\n\"\n \"C:\\\\\\\\path\\\\to\\\\file\\n\"\n \"C:\\\\\\\\path\\\\to\\\\directory\\n\"\n \"https://example.com/path/to/file\\n\"\n \"s3://bucket/path/to/file\\n\"\n \"s3://bucket/path/to/directory\\n\"\n \"gs://bucket/path/to/file\\n\"\n \"gs://bucket/path/to/directory\\n\"\n \"hdfs://example.com/path/to/file\\n\"\n \"ssh://example.com:/path/to/file\\n\"\n \"ssh://example.com:/path/to/directory\\n\"\n \"remote://myremote/path/to/file (see `dvc remote`)\",\n )\n import_parser.add_argument(\n \"out\",\n nargs=\"?\",\n help=\"Destination path to put files to.\",\n choices=completion.Optional.DIR,\n )\n import_parser.add_argument(\n \"--file\",\n help=\"Specify name of the DVC-file this command will generate.\",\n metavar=\"<filename>\",\n choices=completion.Optional.DIR,\n )\n import_parser.set_defaults(func=CmdImportUrl)\n", "path": "dvc/command/imp_url.py"}]}
1,711
419
gh_patches_debug_50783
rasdani/github-patches
git_diff
googleapis__google-cloud-python-3282
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error in NL API, 'Sentiment' object has no attribute 'score' Upon executing the following code on GCE & locally I'm getting 'score' not found error locally while it works on GCE. ``` def analyze_text(text): language_client = language.Client() document = language_client.document_from_text(text) annotations = document.annotate_text(include_sentiment=True) print(annotations.sentiment.score) print (annotations.sentiment.magnitude) ``` Only difference I can find is on GCE, the gcloud version is different than the local **GCE gcloud** gcloud --version Google Cloud SDK 145.0.0 alpha 2017.02.21 app-engine-python 1.9.50 beta 2017.02.21 bq 2.0.24 bq-nix 2.0.24 core 2017.02.21 core-nix 2017.02.21 gcloud gcloud-deps 2017.02.21 gcloud-deps-linux-x86_64 2017.02.21 gsutil 4.22 gsutil-nix 4.22 **Local gcloud** gcloud --version Google Cloud SDK 148.0.1 bq 2.0.24 bq-nix 2.0.24 core 2017.03.24 core-nix 2016.11.07 gcloud gcloud-deps 2017.03.17 gcloud-deps-darwin-x86_64 2017.02.21 gsutil 4.23 gsutil-nix 4.19 Debugging locally, the response from NL API consist of polarity property and not score. <img width="761" alt="image" src="https://cloud.githubusercontent.com/assets/1888535/24331449/62cbdbee-1252-11e7-9d29-8699df078fcf.png"> </issue> <code> [start of language/google/cloud/language/sentiment.py] 1 # Copyright 2016 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Definition for Google Cloud Natural Language API sentiment. 16 17 Sentiment is the response to an ``analyzeSentiment`` request. 18 """ 19 20 21 class Sentiment(object): 22 """A Google Cloud Natural Language API sentiment object. 23 24 .. _Sentiment message: https://cloud.google.com/natural-language/\ 25 reference/rest/v1/Sentiment 26 .. _Sentiment basics: https://cloud.google.com/natural-language/\ 27 docs/basics#sentiment-analysis-values 28 29 See `Sentiment message`_ and `Sentiment basics`_. 30 31 :type score: float 32 :param score: Score of the sentiment in the ``[-1.0, 1.0]`` range. 33 Larger numbers represent more positive sentiments. 34 35 :type magnitude: float 36 :param magnitude: A non-negative number in the ``[0, +inf)`` range, which 37 represents the absolute magnitude of sentiment 38 regardless of score (positive or negative). 39 """ 40 41 def __init__(self, score, magnitude): 42 self.score = score 43 self.magnitude = magnitude 44 45 @classmethod 46 def from_api_repr(cls, payload): 47 """Convert a Sentiment from the JSON API into a :class:`Sentiment`. 48 49 :param payload: dict 50 :type payload: The value from the backend. 51 52 :rtype: :class:`Sentiment` 53 :returns: The sentiment parsed from the API representation. 54 """ 55 score = payload['score'] 56 magnitude = payload['magnitude'] 57 return cls(score, magnitude) 58 [end of language/google/cloud/language/sentiment.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/language/google/cloud/language/sentiment.py b/language/google/cloud/language/sentiment.py --- a/language/google/cloud/language/sentiment.py +++ b/language/google/cloud/language/sentiment.py @@ -52,6 +52,6 @@ :rtype: :class:`Sentiment` :returns: The sentiment parsed from the API representation. """ - score = payload['score'] + score = payload.get('score', None) magnitude = payload['magnitude'] return cls(score, magnitude)
{"golden_diff": "diff --git a/language/google/cloud/language/sentiment.py b/language/google/cloud/language/sentiment.py\n--- a/language/google/cloud/language/sentiment.py\n+++ b/language/google/cloud/language/sentiment.py\n@@ -52,6 +52,6 @@\n :rtype: :class:`Sentiment`\n :returns: The sentiment parsed from the API representation.\n \"\"\"\n- score = payload['score']\n+ score = payload.get('score', None)\n magnitude = payload['magnitude']\n return cls(score, magnitude)\n", "issue": "Error in NL API, 'Sentiment' object has no attribute 'score'\nUpon executing the following code on GCE & locally I'm getting 'score' not found error locally while it works on GCE.\r\n\r\n```\r\ndef analyze_text(text):\r\n language_client = language.Client()\r\n document = language_client.document_from_text(text)\r\n annotations = document.annotate_text(include_sentiment=True)\r\n print(annotations.sentiment.score)\r\n print (annotations.sentiment.magnitude)\r\n```\r\n\r\nOnly difference I can find is on GCE, the gcloud version is different than the local\r\n**GCE gcloud**\r\ngcloud --version\r\nGoogle Cloud SDK 145.0.0\r\nalpha 2017.02.21\r\napp-engine-python 1.9.50\r\nbeta 2017.02.21\r\nbq 2.0.24\r\nbq-nix 2.0.24\r\ncore 2017.02.21\r\ncore-nix 2017.02.21\r\ngcloud \r\ngcloud-deps 2017.02.21\r\ngcloud-deps-linux-x86_64 2017.02.21\r\ngsutil 4.22\r\ngsutil-nix 4.22\r\n\r\n**Local gcloud**\r\ngcloud --version\r\nGoogle Cloud SDK 148.0.1\r\nbq 2.0.24\r\nbq-nix 2.0.24\r\ncore 2017.03.24\r\ncore-nix 2016.11.07\r\ngcloud \r\ngcloud-deps 2017.03.17\r\ngcloud-deps-darwin-x86_64 2017.02.21\r\ngsutil 4.23\r\ngsutil-nix 4.19\r\n\r\nDebugging locally, the response from NL API consist of polarity property and not score.\r\n\r\n<img width=\"761\" alt=\"image\" src=\"https://cloud.githubusercontent.com/assets/1888535/24331449/62cbdbee-1252-11e7-9d29-8699df078fcf.png\">\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Definition for Google Cloud Natural Language API sentiment.\n\nSentiment is the response to an ``analyzeSentiment`` request.\n\"\"\"\n\n\nclass Sentiment(object):\n \"\"\"A Google Cloud Natural Language API sentiment object.\n\n .. _Sentiment message: https://cloud.google.com/natural-language/\\\n reference/rest/v1/Sentiment\n .. _Sentiment basics: https://cloud.google.com/natural-language/\\\n docs/basics#sentiment-analysis-values\n\n See `Sentiment message`_ and `Sentiment basics`_.\n\n :type score: float\n :param score: Score of the sentiment in the ``[-1.0, 1.0]`` range.\n Larger numbers represent more positive sentiments.\n\n :type magnitude: float\n :param magnitude: A non-negative number in the ``[0, +inf)`` range, which\n represents the absolute magnitude of sentiment\n regardless of score (positive or negative).\n \"\"\"\n\n def __init__(self, score, magnitude):\n self.score = score\n self.magnitude = magnitude\n\n @classmethod\n def from_api_repr(cls, payload):\n \"\"\"Convert a Sentiment from the JSON API into a :class:`Sentiment`.\n\n :param payload: dict\n :type payload: The value from the backend.\n\n :rtype: :class:`Sentiment`\n :returns: The sentiment parsed from the API representation.\n \"\"\"\n score = payload['score']\n magnitude = payload['magnitude']\n return cls(score, magnitude)\n", "path": "language/google/cloud/language/sentiment.py"}]}
1,615
114
gh_patches_debug_35745
rasdani/github-patches
git_diff
uclapi__uclapi-977
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Webhooks They haven't been delivered for a _long_ time now, so we finally need to dig deep and see what is going wrong. The room data is definitely up to date, however. </issue> <code> [start of backend/uclapi/roombookings/management/commands/trigger_webhooks.py] 1 from django.core.management.base import BaseCommand 2 from roombookings.models import Lock, BookingA, BookingB 3 from roombookings.helpers import _serialize_bookings 4 from dashboard.models import Webhook, WebhookTriggerHistory 5 from datetime import datetime 6 from deepdiff import DeepDiff 7 import grequests 8 from django.utils import timezone 9 10 11 class Command(BaseCommand): 12 13 help = 'Diff roombooking result sets and notify relevant webhooks' 14 15 def handle(self, *args, **options): 16 self.stdout.write("Triggering webhooks") 17 18 # currently locked table is the old one, more recent one is not locked 19 lock = Lock.objects.all()[0] # there is only ever one lock 20 21 if lock.bookingA: 22 old_booking_table = BookingA 23 new_booking_table = BookingB 24 else: 25 old_booking_table = BookingB 26 new_booking_table = BookingA 27 28 now = datetime.now() 29 30 old_bookings = _serialize_bookings( 31 old_booking_table.objects.filter( 32 startdatetime__gt=now 33 ) 34 ) 35 new_bookings = _serialize_bookings( 36 new_booking_table.objects.filter( 37 startdatetime__gt=now 38 ) 39 ) 40 41 ddiff = DeepDiff(old_bookings, new_bookings, ignore_order=True) 42 43 webhooks = Webhook.objects.all() 44 # assumption: list of webhooks will be longer than ddiff 45 46 num_bookings_added = 0 47 num_bookings_removed = 0 48 if "iterable_item_added" in ddiff: 49 num_bookings_added = len( 50 ddiff["iterable_item_added"].values() 51 ) 52 53 if "iterable_item_removed" in ddiff: 54 num_bookings_removed = len( 55 ddiff["iterable_item_removed"].values() 56 ) 57 58 self.stdout.write( 59 "{} bookings added\n{} bookings removed.".format( 60 num_bookings_added, 61 num_bookings_removed 62 ) 63 ) 64 65 def webhook_map(webhook): 66 def webhook_filter(booking): 67 return ( 68 ( 69 webhook.siteid == '' or 70 booking["siteid"] == webhook.siteid 71 ) and 72 ( 73 webhook.roomid == '' or 74 booking["roomid"] == webhook.roomid 75 ) and 76 ( 77 webhook.contact == '' or 78 # mimick SQL 'like' 79 webhook.contact in str(booking["contact"]) 80 ) 81 ) 82 output = { 83 "webhook_in_db": webhook, 84 "url": webhook.url, 85 "verification_secret": webhook.verification_secret 86 } 87 if "iterable_item_added" in ddiff: 88 bookings_added = list(filter( 89 webhook_filter, ddiff["iterable_item_added"].values() 90 )) 91 if bookings_added != []: 92 output["bookings_added"] = bookings_added 93 if "iterable_item_removed" in ddiff: 94 bookings_removed = list(filter( 95 webhook_filter, ddiff["iterable_item_removed"].values() 96 )) 97 if bookings_removed != []: 98 output["bookings_removed"] = bookings_removed 99 100 return output 101 102 webhooks_to_enact = list(map(webhook_map, webhooks)) 103 104 unsent_requests = [] 105 for idx, webhook in enumerate(webhooks_to_enact): 106 payload = { 107 "service": "roombookings", 108 "name": "bookings_changed", 109 "verification_secret": webhook["verification_secret"], 110 "content": {} 111 } 112 113 if "bookings_added" in webhook: 114 payload["content"]["bookings_added"] = ( 115 webhook["bookings_added"] 116 ) 117 if "bookings_removed" in webhook: 118 payload["content"]["bookings_removed"] = ( 119 webhook["bookings_removed"] 120 ) 121 122 webhooks_to_enact[idx]["payload"] = payload 123 124 if payload["content"] != {}: 125 unsent_requests.append( 126 grequests.post( 127 webhook["url"], json=payload, headers={ 128 "User-Agent": "uclapi-bot/1" 129 } 130 ) 131 ) 132 self.stdout.write( 133 "Triggering {} webhooks.".format(len(unsent_requests)) 134 ) 135 grequests.map(unsent_requests) 136 137 for webhook in webhooks_to_enact: 138 if webhook["payload"]["content"] != {}: 139 webhook_in_db = webhook["webhook_in_db"] 140 webhook_in_db.last_fired = timezone.now() 141 webhook_in_db.save() 142 143 new_webhook_history_entry = WebhookTriggerHistory( 144 webhook=webhook_in_db, 145 payload=webhook["payload"] 146 ) 147 new_webhook_history_entry.save() 148 149 self.stdout.write("Webhooks triggered.") 150 [end of backend/uclapi/roombookings/management/commands/trigger_webhooks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py --- a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py +++ b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py @@ -4,16 +4,25 @@ from dashboard.models import Webhook, WebhookTriggerHistory from datetime import datetime from deepdiff import DeepDiff -import grequests from django.utils import timezone +from requests_futures.sessions import FuturesSession class Command(BaseCommand): help = 'Diff roombooking result sets and notify relevant webhooks' + def add_arguments(self, parser): + parser.add_argument( + '--debug', + action='store_true', + dest='debug', + help='Print webhook responses', + ) + def handle(self, *args, **options): self.stdout.write("Triggering webhooks") + session = FuturesSession() # currently locked table is the old one, more recent one is not locked lock = Lock.objects.all()[0] # there is only ever one lock @@ -121,10 +130,11 @@ webhooks_to_enact[idx]["payload"] = payload - if payload["content"] != {}: + if payload["content"] != {} and webhook["url"] != "": unsent_requests.append( - grequests.post( - webhook["url"], json=payload, headers={ + session.post( + webhook["url"], json=payload, + headers={ "User-Agent": "uclapi-bot/1" } ) @@ -132,7 +142,11 @@ self.stdout.write( "Triggering {} webhooks.".format(len(unsent_requests)) ) - grequests.map(unsent_requests) + if("debug" in options): + for i in unsent_requests: + self.stdout.write( + 'response status {0}'.format(i.result().status_code) + ) for webhook in webhooks_to_enact: if webhook["payload"]["content"] != {}:
{"golden_diff": "diff --git a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n--- a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n+++ b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n@@ -4,16 +4,25 @@\n from dashboard.models import Webhook, WebhookTriggerHistory\n from datetime import datetime\n from deepdiff import DeepDiff\n-import grequests\n from django.utils import timezone\n+from requests_futures.sessions import FuturesSession\n \n \n class Command(BaseCommand):\n \n help = 'Diff roombooking result sets and notify relevant webhooks'\n \n+ def add_arguments(self, parser):\n+ parser.add_argument(\n+ '--debug',\n+ action='store_true',\n+ dest='debug',\n+ help='Print webhook responses',\n+ )\n+\n def handle(self, *args, **options):\n self.stdout.write(\"Triggering webhooks\")\n+ session = FuturesSession()\n \n # currently locked table is the old one, more recent one is not locked\n lock = Lock.objects.all()[0] # there is only ever one lock\n@@ -121,10 +130,11 @@\n \n webhooks_to_enact[idx][\"payload\"] = payload\n \n- if payload[\"content\"] != {}:\n+ if payload[\"content\"] != {} and webhook[\"url\"] != \"\":\n unsent_requests.append(\n- grequests.post(\n- webhook[\"url\"], json=payload, headers={\n+ session.post(\n+ webhook[\"url\"], json=payload,\n+ headers={\n \"User-Agent\": \"uclapi-bot/1\"\n }\n )\n@@ -132,7 +142,11 @@\n self.stdout.write(\n \"Triggering {} webhooks.\".format(len(unsent_requests))\n )\n- grequests.map(unsent_requests)\n+ if(\"debug\" in options):\n+ for i in unsent_requests:\n+ self.stdout.write(\n+ 'response status {0}'.format(i.result().status_code)\n+ )\n \n for webhook in webhooks_to_enact:\n if webhook[\"payload\"][\"content\"] != {}:\n", "issue": "Webhooks\nThey haven't been delivered for a _long_ time now, so we finally need to dig deep and see what is going wrong. The room data is definitely up to date, however.\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\nfrom roombookings.models import Lock, BookingA, BookingB\nfrom roombookings.helpers import _serialize_bookings\nfrom dashboard.models import Webhook, WebhookTriggerHistory\nfrom datetime import datetime\nfrom deepdiff import DeepDiff\nimport grequests\nfrom django.utils import timezone\n\n\nclass Command(BaseCommand):\n\n help = 'Diff roombooking result sets and notify relevant webhooks'\n\n def handle(self, *args, **options):\n self.stdout.write(\"Triggering webhooks\")\n\n # currently locked table is the old one, more recent one is not locked\n lock = Lock.objects.all()[0] # there is only ever one lock\n\n if lock.bookingA:\n old_booking_table = BookingA\n new_booking_table = BookingB\n else:\n old_booking_table = BookingB\n new_booking_table = BookingA\n\n now = datetime.now()\n\n old_bookings = _serialize_bookings(\n old_booking_table.objects.filter(\n startdatetime__gt=now\n )\n )\n new_bookings = _serialize_bookings(\n new_booking_table.objects.filter(\n startdatetime__gt=now\n )\n )\n\n ddiff = DeepDiff(old_bookings, new_bookings, ignore_order=True)\n\n webhooks = Webhook.objects.all()\n # assumption: list of webhooks will be longer than ddiff\n\n num_bookings_added = 0\n num_bookings_removed = 0\n if \"iterable_item_added\" in ddiff:\n num_bookings_added = len(\n ddiff[\"iterable_item_added\"].values()\n )\n\n if \"iterable_item_removed\" in ddiff:\n num_bookings_removed = len(\n ddiff[\"iterable_item_removed\"].values()\n )\n\n self.stdout.write(\n \"{} bookings added\\n{} bookings removed.\".format(\n num_bookings_added,\n num_bookings_removed\n )\n )\n\n def webhook_map(webhook):\n def webhook_filter(booking):\n return (\n (\n webhook.siteid == '' or\n booking[\"siteid\"] == webhook.siteid\n ) and\n (\n webhook.roomid == '' or\n booking[\"roomid\"] == webhook.roomid\n ) and\n (\n webhook.contact == '' or\n # mimick SQL 'like'\n webhook.contact in str(booking[\"contact\"])\n )\n )\n output = {\n \"webhook_in_db\": webhook,\n \"url\": webhook.url,\n \"verification_secret\": webhook.verification_secret\n }\n if \"iterable_item_added\" in ddiff:\n bookings_added = list(filter(\n webhook_filter, ddiff[\"iterable_item_added\"].values()\n ))\n if bookings_added != []:\n output[\"bookings_added\"] = bookings_added\n if \"iterable_item_removed\" in ddiff:\n bookings_removed = list(filter(\n webhook_filter, ddiff[\"iterable_item_removed\"].values()\n ))\n if bookings_removed != []:\n output[\"bookings_removed\"] = bookings_removed\n\n return output\n\n webhooks_to_enact = list(map(webhook_map, webhooks))\n\n unsent_requests = []\n for idx, webhook in enumerate(webhooks_to_enact):\n payload = {\n \"service\": \"roombookings\",\n \"name\": \"bookings_changed\",\n \"verification_secret\": webhook[\"verification_secret\"],\n \"content\": {}\n }\n\n if \"bookings_added\" in webhook:\n payload[\"content\"][\"bookings_added\"] = (\n webhook[\"bookings_added\"]\n )\n if \"bookings_removed\" in webhook:\n payload[\"content\"][\"bookings_removed\"] = (\n webhook[\"bookings_removed\"]\n )\n\n webhooks_to_enact[idx][\"payload\"] = payload\n\n if payload[\"content\"] != {}:\n unsent_requests.append(\n grequests.post(\n webhook[\"url\"], json=payload, headers={\n \"User-Agent\": \"uclapi-bot/1\"\n }\n )\n )\n self.stdout.write(\n \"Triggering {} webhooks.\".format(len(unsent_requests))\n )\n grequests.map(unsent_requests)\n\n for webhook in webhooks_to_enact:\n if webhook[\"payload\"][\"content\"] != {}:\n webhook_in_db = webhook[\"webhook_in_db\"]\n webhook_in_db.last_fired = timezone.now()\n webhook_in_db.save()\n\n new_webhook_history_entry = WebhookTriggerHistory(\n webhook=webhook_in_db,\n payload=webhook[\"payload\"]\n )\n new_webhook_history_entry.save()\n\n self.stdout.write(\"Webhooks triggered.\")\n", "path": "backend/uclapi/roombookings/management/commands/trigger_webhooks.py"}]}
1,937
496
gh_patches_debug_34858
rasdani/github-patches
git_diff
ansible-collections__amazon.aws-430
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> aws_service_ip_ranges suppport for ipv6 ### Summary We are using amazon.aws collection and we noticed that the aws_service_ip_ranges does not have an option to return IPv6 ranges. ### Issue Type Feature Idea ### Component Name `{ lookup('aws_service_ip_ranges', region='us-west-2', service='ROUTE53_HEALTHCHECKS', ipv6_prefix=True, wantlist=True) }` Should return a list of IPv6 addresses that correspond to the Route53 health check. ### Pull Request #430 ### Additional Information <!--- Paste example playbooks or commands between quotes below --> ``` vars: rt53_ranges: "{{ lookup('aws_service_ip_ranges', region='us-west-2', service='ROUTE53_HEALTHCHECKS', ipv6_prefix=True, wantlist=True) }}" tasks: - name: "use list return option and iterate as a loop" debug: msg="{% for x in rt53_ranges %}{{ x }} {% endfor %}" # ###"2600:1f14:7ff:f800::/56,2600:1f14:fff:f800::/56" ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct </issue> <code> [start of plugins/lookup/aws_service_ip_ranges.py] 1 # (c) 2016 James Turner <[email protected]> 2 # (c) 2017 Ansible Project 3 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 4 from __future__ import (absolute_import, division, print_function) 5 __metaclass__ = type 6 7 DOCUMENTATION = ''' 8 lookup: aws_service_ip_ranges 9 author: 10 - James Turner <[email protected]> 11 requirements: 12 - must have public internet connectivity 13 short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3. 14 description: 15 - AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking. 16 - This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service. 17 options: 18 service: 19 description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS' 20 region: 21 description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1' 22 ''' 23 24 EXAMPLES = """ 25 vars: 26 ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}" 27 tasks: 28 29 - name: "use list return option and iterate as a loop" 30 debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}" 31 # "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 " 32 33 - name: "Pull S3 IP ranges, and print the default return style" 34 debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}" 35 # "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17" 36 """ 37 38 RETURN = """ 39 _raw: 40 description: comma-separated list of CIDR ranges 41 """ 42 43 44 import json 45 46 from ansible.errors import AnsibleError 47 from ansible.module_utils.six.moves.urllib.error import HTTPError 48 from ansible.module_utils.six.moves.urllib.error import URLError 49 from ansible.module_utils._text import to_native 50 from ansible.module_utils.urls import ConnectionError 51 from ansible.module_utils.urls import open_url 52 from ansible.module_utils.urls import SSLValidationError 53 from ansible.plugins.lookup import LookupBase 54 55 56 class LookupModule(LookupBase): 57 def run(self, terms, variables, **kwargs): 58 try: 59 resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json') 60 amazon_response = json.load(resp)['prefixes'] 61 except getattr(json.decoder, 'JSONDecodeError', ValueError) as e: 62 # on Python 3+, json.decoder.JSONDecodeError is raised for bad 63 # JSON. On 2.x it's a ValueError 64 raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e)) 65 except HTTPError as e: 66 raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e)) 67 except SSLValidationError as e: 68 raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e)) 69 except URLError as e: 70 raise AnsibleError("Failed look up IP range service: %s" % to_native(e)) 71 except ConnectionError as e: 72 raise AnsibleError("Error connecting to IP range service: %s" % to_native(e)) 73 74 if 'region' in kwargs: 75 region = kwargs['region'] 76 amazon_response = (item for item in amazon_response if item['region'] == region) 77 if 'service' in kwargs: 78 service = str.upper(kwargs['service']) 79 amazon_response = (item for item in amazon_response if item['service'] == service) 80 81 return [item['ip_prefix'] for item in amazon_response] 82 [end of plugins/lookup/aws_service_ip_ranges.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugins/lookup/aws_service_ip_ranges.py b/plugins/lookup/aws_service_ip_ranges.py --- a/plugins/lookup/aws_service_ip_ranges.py +++ b/plugins/lookup/aws_service_ip_ranges.py @@ -19,6 +19,9 @@ description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS' region: description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1' + ipv6_prefixes: + description: 'When I(ipv6_prefixes=True) the lookup will return ipv6 addresses instead of ipv4 addresses' + version_added: 2.1.0 ''' EXAMPLES = """ @@ -40,7 +43,6 @@ description: comma-separated list of CIDR ranges """ - import json from ansible.errors import AnsibleError @@ -55,9 +57,16 @@ class LookupModule(LookupBase): def run(self, terms, variables, **kwargs): + if "ipv6_prefixes" in kwargs and kwargs["ipv6_prefixes"]: + prefixes_label = "ipv6_prefixes" + ip_prefix_label = "ipv6_prefix" + else: + prefixes_label = "prefixes" + ip_prefix_label = "ip_prefix" + try: resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json') - amazon_response = json.load(resp)['prefixes'] + amazon_response = json.load(resp)[prefixes_label] except getattr(json.decoder, 'JSONDecodeError', ValueError) as e: # on Python 3+, json.decoder.JSONDecodeError is raised for bad # JSON. On 2.x it's a ValueError @@ -77,5 +86,5 @@ if 'service' in kwargs: service = str.upper(kwargs['service']) amazon_response = (item for item in amazon_response if item['service'] == service) - - return [item['ip_prefix'] for item in amazon_response] + iprange = [item[ip_prefix_label] for item in amazon_response] + return iprange
{"golden_diff": "diff --git a/plugins/lookup/aws_service_ip_ranges.py b/plugins/lookup/aws_service_ip_ranges.py\n--- a/plugins/lookup/aws_service_ip_ranges.py\n+++ b/plugins/lookup/aws_service_ip_ranges.py\n@@ -19,6 +19,9 @@\n description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'\n region:\n description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'\n+ ipv6_prefixes:\n+ description: 'When I(ipv6_prefixes=True) the lookup will return ipv6 addresses instead of ipv4 addresses'\n+ version_added: 2.1.0\n '''\n \n EXAMPLES = \"\"\"\n@@ -40,7 +43,6 @@\n description: comma-separated list of CIDR ranges\n \"\"\"\n \n-\n import json\n \n from ansible.errors import AnsibleError\n@@ -55,9 +57,16 @@\n \n class LookupModule(LookupBase):\n def run(self, terms, variables, **kwargs):\n+ if \"ipv6_prefixes\" in kwargs and kwargs[\"ipv6_prefixes\"]:\n+ prefixes_label = \"ipv6_prefixes\"\n+ ip_prefix_label = \"ipv6_prefix\"\n+ else:\n+ prefixes_label = \"prefixes\"\n+ ip_prefix_label = \"ip_prefix\"\n+\n try:\n resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')\n- amazon_response = json.load(resp)['prefixes']\n+ amazon_response = json.load(resp)[prefixes_label]\n except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:\n # on Python 3+, json.decoder.JSONDecodeError is raised for bad\n # JSON. On 2.x it's a ValueError\n@@ -77,5 +86,5 @@\n if 'service' in kwargs:\n service = str.upper(kwargs['service'])\n amazon_response = (item for item in amazon_response if item['service'] == service)\n-\n- return [item['ip_prefix'] for item in amazon_response]\n+ iprange = [item[ip_prefix_label] for item in amazon_response]\n+ return iprange\n", "issue": "aws_service_ip_ranges suppport for ipv6\n### Summary\r\n\r\nWe are using amazon.aws collection and we noticed that the aws_service_ip_ranges does not have an option to return IPv6 ranges.\r\n\r\n### Issue Type\r\n\r\nFeature Idea\r\n\r\n### Component Name\r\n\r\n`{ lookup('aws_service_ip_ranges', region='us-west-2', service='ROUTE53_HEALTHCHECKS', ipv6_prefix=True, wantlist=True) }`\r\nShould return a list of IPv6 addresses that correspond to the Route53 health check. \r\n\r\n\r\n### Pull Request\r\n#430\r\n\r\n### Additional Information\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```\r\nvars:\r\n rt53_ranges: \"{{ lookup('aws_service_ip_ranges', region='us-west-2', service='ROUTE53_HEALTHCHECKS', ipv6_prefix=True, wantlist=True) }}\"\r\ntasks:\r\n\r\n- name: \"use list return option and iterate as a loop\"\r\n debug: msg=\"{% for x in rt53_ranges %}{{ x }} {% endfor %}\"\r\n# ###\"2600:1f14:7ff:f800::/56,2600:1f14:fff:f800::/56\"\r\n```\r\n\r\n\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "# (c) 2016 James Turner <[email protected]>\n# (c) 2017 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\nlookup: aws_service_ip_ranges\nauthor:\n - James Turner <[email protected]>\nrequirements:\n - must have public internet connectivity\nshort_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.\ndescription:\n - AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.\n - This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.\noptions:\n service:\n description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'\n region:\n description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'\n'''\n\nEXAMPLES = \"\"\"\nvars:\n ec2_ranges: \"{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}\"\ntasks:\n\n- name: \"use list return option and iterate as a loop\"\n debug: msg=\"{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}\"\n# \"52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 \"\n\n- name: \"Pull S3 IP ranges, and print the default return style\"\n debug: msg=\"{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}\"\n# \"52.92.16.0/20,52.216.0.0/15,54.231.0.0/17\"\n\"\"\"\n\nRETURN = \"\"\"\n_raw:\n description: comma-separated list of CIDR ranges\n\"\"\"\n\n\nimport json\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils.six.moves.urllib.error import HTTPError\nfrom ansible.module_utils.six.moves.urllib.error import URLError\nfrom ansible.module_utils._text import to_native\nfrom ansible.module_utils.urls import ConnectionError\nfrom ansible.module_utils.urls import open_url\nfrom ansible.module_utils.urls import SSLValidationError\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass LookupModule(LookupBase):\n def run(self, terms, variables, **kwargs):\n try:\n resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')\n amazon_response = json.load(resp)['prefixes']\n except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:\n # on Python 3+, json.decoder.JSONDecodeError is raised for bad\n # JSON. On 2.x it's a ValueError\n raise AnsibleError(\"Could not decode AWS IP ranges: %s\" % to_native(e))\n except HTTPError as e:\n raise AnsibleError(\"Received HTTP error while pulling IP ranges: %s\" % to_native(e))\n except SSLValidationError as e:\n raise AnsibleError(\"Error validating the server's certificate for: %s\" % to_native(e))\n except URLError as e:\n raise AnsibleError(\"Failed look up IP range service: %s\" % to_native(e))\n except ConnectionError as e:\n raise AnsibleError(\"Error connecting to IP range service: %s\" % to_native(e))\n\n if 'region' in kwargs:\n region = kwargs['region']\n amazon_response = (item for item in amazon_response if item['region'] == region)\n if 'service' in kwargs:\n service = str.upper(kwargs['service'])\n amazon_response = (item for item in amazon_response if item['service'] == service)\n\n return [item['ip_prefix'] for item in amazon_response]\n", "path": "plugins/lookup/aws_service_ip_ranges.py"}]}
1,959
500
gh_patches_debug_57019
rasdani/github-patches
git_diff
fidals__shopelectro-415
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Убери пункт меню Убери mp3 колонки из меню. Их больше не будет. Я зашел в админку и выключил категорию. Надеюсь правильно ) http://prntscr.com/k553lt </issue> <code> [start of shopelectro/templatetags/se_extras.py] 1 import datetime 2 import math 3 4 from django import template 5 from django.conf import settings 6 from django.contrib.humanize.templatetags.humanize import intcomma 7 from django.template.defaultfilters import floatformat 8 from django.urls import reverse 9 10 from images.models import ImageMixin 11 from pages.models import Page 12 13 from shopelectro import config 14 from shopelectro.models import Category 15 16 register = template.Library() 17 18 19 # TODO - move it in catalog. Inspired by lp_electric 20 @register.simple_tag 21 def roots(): 22 return sorted( 23 Category.objects 24 .select_related('page') 25 .get_cached_trees(), # https://goo.gl/rFKiku 26 key=lambda x: x.page.position 27 ) 28 29 30 @register.simple_tag 31 def footer_links(): 32 return config.FOOTER_LINKS 33 34 35 # TODO - move in pages. Inspired by LP electric 36 @register.filter 37 def class_name(model): 38 """Return Model name.""" 39 return type(model).__name__ 40 41 42 @register.simple_tag 43 def time_to_call(): 44 def is_weekend(t): 45 return t.weekday() > 4 46 47 def is_friday(t): 48 return t.weekday() == 4 49 50 def not_yet_opened(t): 51 current_time = (t.hour, t.minute) 52 open_time = (10, 00) 53 return current_time < open_time and not is_weekend(t) 54 55 def is_closed(t): 56 current_time = (t.hour, t.minute) 57 closing_time = (16, 30) if is_friday(t) else (17, 30) 58 return current_time > closing_time 59 60 when_we_call = { 61 lambda now: is_weekend(now) or (is_friday(now) and is_closed(now)): 'В понедельник в 10:30', 62 lambda now: not_yet_opened(now): 'Сегодня в 10:30', 63 lambda now: is_closed(now) and not (is_friday(now) or is_weekend(now)): 'Завтра в 10:30', 64 lambda _: True: 'В течение 30 минут' 65 } 66 67 time_ = datetime.datetime.now() 68 call = ' позвонит менеджер и обсудит детали доставки.' 69 for condition, time in when_we_call.items(): 70 if condition(time_): 71 return time + call 72 73 74 # TODO - move it in pages. 75 @register.simple_tag 76 def full_url(url_name, *args): 77 return settings.BASE_URL + reverse(url_name, args=args) 78 79 80 @register.filter 81 def humanize_price(price): 82 return intcomma(floatformat(price, 0)) 83 84 85 # Not good code, but duker at 06/10/2016 don't know how to fix it. 86 # It makes Image model very complex. 87 @register.simple_tag 88 def get_img_alt(entity: ImageMixin): 89 product_alt = 'Фотография {}' 90 logo_alt = 'Логотип компании Shopelectro' 91 92 if not isinstance(entity, Page): 93 return logo_alt 94 95 # try one of this attributes to get pages name 96 name_attrs = ['h1', 'title', 'name'] 97 entity_name = next( 98 getattr(entity, attr) 99 for attr in name_attrs 100 if getattr(entity, attr) 101 ) 102 return product_alt.format(entity_name) 103 104 105 @register.simple_tag 106 def main_image_or_logo(page: Page): 107 """Used for microdata.""" 108 if hasattr(page, 'main_image') and page.main_image: 109 return page.main_image.url 110 else: 111 return settings.STATIC_URL + 'images/logo.png' 112 113 114 @register.inclusion_tag('catalog/product_feedbacks_icons.html') 115 def icon_stars(rating=0): 116 """Render set of rating icons based on 1 through 5 rating values.""" 117 full_icons = int(math.floor(rating)) 118 half_icons = 0 if rating == int(rating) else 1 119 empty_icons = 5 - full_icons - half_icons 120 121 return { 122 'full_icons': range(full_icons), 123 'half_icons': range(half_icons), 124 'empty_icons': range(empty_icons), 125 } 126 [end of shopelectro/templatetags/se_extras.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/shopelectro/templatetags/se_extras.py b/shopelectro/templatetags/se_extras.py --- a/shopelectro/templatetags/se_extras.py +++ b/shopelectro/templatetags/se_extras.py @@ -20,10 +20,13 @@ @register.simple_tag def roots(): return sorted( - Category.objects - .select_related('page') - .get_cached_trees(), # https://goo.gl/rFKiku - key=lambda x: x.page.position + filter( + lambda x: x.page.is_active, + Category.objects # https://goo.gl/rFKiku + .select_related('page') + .get_cached_trees() + ), + key=lambda x: x.page.position, )
{"golden_diff": "diff --git a/shopelectro/templatetags/se_extras.py b/shopelectro/templatetags/se_extras.py\n--- a/shopelectro/templatetags/se_extras.py\n+++ b/shopelectro/templatetags/se_extras.py\n@@ -20,10 +20,13 @@\n @register.simple_tag\n def roots():\n return sorted(\n- Category.objects\n- .select_related('page')\n- .get_cached_trees(), # https://goo.gl/rFKiku\n- key=lambda x: x.page.position\n+ filter(\n+ lambda x: x.page.is_active,\n+ Category.objects # https://goo.gl/rFKiku\n+ .select_related('page')\n+ .get_cached_trees()\n+ ),\n+ key=lambda x: x.page.position,\n )\n", "issue": "\u0423\u0431\u0435\u0440\u0438 \u043f\u0443\u043d\u043a\u0442 \u043c\u0435\u043d\u044e\n\u0423\u0431\u0435\u0440\u0438 mp3 \u043a\u043e\u043b\u043e\u043d\u043a\u0438 \u0438\u0437 \u043c\u0435\u043d\u044e. \u0418\u0445 \u0431\u043e\u043b\u044c\u0448\u0435 \u043d\u0435 \u0431\u0443\u0434\u0435\u0442.\r\n\u042f \u0437\u0430\u0448\u0435\u043b \u0432 \u0430\u0434\u043c\u0438\u043d\u043a\u0443 \u0438 \u0432\u044b\u043a\u043b\u044e\u0447\u0438\u043b \u043a\u0430\u0442\u0435\u0433\u043e\u0440\u0438\u044e. \u041d\u0430\u0434\u0435\u044e\u0441\u044c \u043f\u0440\u0430\u0432\u0438\u043b\u044c\u043d\u043e ) \r\nhttp://prntscr.com/k553lt\n", "before_files": [{"content": "import datetime\nimport math\n\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.humanize.templatetags.humanize import intcomma\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\n\nfrom images.models import ImageMixin\nfrom pages.models import Page\n\nfrom shopelectro import config\nfrom shopelectro.models import Category\n\nregister = template.Library()\n\n\n# TODO - move it in catalog. Inspired by lp_electric\[email protected]_tag\ndef roots():\n return sorted(\n Category.objects\n .select_related('page')\n .get_cached_trees(), # https://goo.gl/rFKiku\n key=lambda x: x.page.position\n )\n\n\[email protected]_tag\ndef footer_links():\n return config.FOOTER_LINKS\n\n\n# TODO - move in pages. Inspired by LP electric\[email protected]\ndef class_name(model):\n \"\"\"Return Model name.\"\"\"\n return type(model).__name__\n\n\[email protected]_tag\ndef time_to_call():\n def is_weekend(t):\n return t.weekday() > 4\n\n def is_friday(t):\n return t.weekday() == 4\n\n def not_yet_opened(t):\n current_time = (t.hour, t.minute)\n open_time = (10, 00)\n return current_time < open_time and not is_weekend(t)\n\n def is_closed(t):\n current_time = (t.hour, t.minute)\n closing_time = (16, 30) if is_friday(t) else (17, 30)\n return current_time > closing_time\n\n when_we_call = {\n lambda now: is_weekend(now) or (is_friday(now) and is_closed(now)): '\u0412 \u043f\u043e\u043d\u0435\u0434\u0435\u043b\u044c\u043d\u0438\u043a \u0432 10:30',\n lambda now: not_yet_opened(now): '\u0421\u0435\u0433\u043e\u0434\u043d\u044f \u0432 10:30',\n lambda now: is_closed(now) and not (is_friday(now) or is_weekend(now)): '\u0417\u0430\u0432\u0442\u0440\u0430 \u0432 10:30',\n lambda _: True: '\u0412 \u0442\u0435\u0447\u0435\u043d\u0438\u0435 30 \u043c\u0438\u043d\u0443\u0442'\n }\n\n time_ = datetime.datetime.now()\n call = ' \u043f\u043e\u0437\u0432\u043e\u043d\u0438\u0442 \u043c\u0435\u043d\u0435\u0434\u0436\u0435\u0440 \u0438 \u043e\u0431\u0441\u0443\u0434\u0438\u0442 \u0434\u0435\u0442\u0430\u043b\u0438 \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u0438.'\n for condition, time in when_we_call.items():\n if condition(time_):\n return time + call\n\n\n# TODO - move it in pages.\[email protected]_tag\ndef full_url(url_name, *args):\n return settings.BASE_URL + reverse(url_name, args=args)\n\n\[email protected]\ndef humanize_price(price):\n return intcomma(floatformat(price, 0))\n\n\n# Not good code, but duker at 06/10/2016 don't know how to fix it.\n# It makes Image model very complex.\[email protected]_tag\ndef get_img_alt(entity: ImageMixin):\n product_alt = '\u0424\u043e\u0442\u043e\u0433\u0440\u0430\u0444\u0438\u044f {}'\n logo_alt = '\u041b\u043e\u0433\u043e\u0442\u0438\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0438 Shopelectro'\n\n if not isinstance(entity, Page):\n return logo_alt\n\n # try one of this attributes to get pages name\n name_attrs = ['h1', 'title', 'name']\n entity_name = next(\n getattr(entity, attr)\n for attr in name_attrs\n if getattr(entity, attr)\n )\n return product_alt.format(entity_name)\n\n\[email protected]_tag\ndef main_image_or_logo(page: Page):\n \"\"\"Used for microdata.\"\"\"\n if hasattr(page, 'main_image') and page.main_image:\n return page.main_image.url\n else:\n return settings.STATIC_URL + 'images/logo.png'\n\n\[email protected]_tag('catalog/product_feedbacks_icons.html')\ndef icon_stars(rating=0):\n \"\"\"Render set of rating icons based on 1 through 5 rating values.\"\"\"\n full_icons = int(math.floor(rating))\n half_icons = 0 if rating == int(rating) else 1\n empty_icons = 5 - full_icons - half_icons\n\n return {\n 'full_icons': range(full_icons),\n 'half_icons': range(half_icons),\n 'empty_icons': range(empty_icons),\n }\n", "path": "shopelectro/templatetags/se_extras.py"}]}
1,793
189
gh_patches_debug_9311
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-3578
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Pyside2 looking different from when running as script. Hi. Im using latest development version. As you can see when i run my test script: ``` from PySide2 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.gridLayout = QtWidgets.QGridLayout(self.centralwidget) self.gridLayout.setObjectName("gridLayout") self.tabWidget = QtWidgets.QTabWidget(self.centralwidget) self.tabWidget.setObjectName("tabWidget") self.tab = QtWidgets.QWidget() self.tab.setObjectName("tab") self.pushButton = QtWidgets.QPushButton(self.tab) self.pushButton.setGeometry(QtCore.QRect(150, 90, 97, 34)) self.pushButton.setObjectName("pushButton") self.tableWidget = QtWidgets.QTableWidget(self.tab) self.tableWidget.setGeometry(QtCore.QRect(140, 150, 256, 192)) self.tableWidget.setObjectName("tableWidget") self.tableWidget.setColumnCount(3) self.tableWidget.setRowCount(0) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item) self.tabWidget.addTab(self.tab, "") self.tab_2 = QtWidgets.QWidget() self.tab_2.setObjectName("tab_2") self.tabWidget.addTab(self.tab_2, "") self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 30)) self.menubar.setObjectName("menubar") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) self.pushButton.setText(_translate("MainWindow", "PushButton")) item = self.tableWidget.horizontalHeaderItem(0) item.setText(_translate("MainWindow", "New Column")) item = self.tableWidget.horizontalHeaderItem(1) item.setText(_translate("MainWindow", "New Column")) item = self.tableWidget.horizontalHeaderItem(2) item.setText(_translate("MainWindow", "New Column")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Tab 1")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Tab 2")) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_()) ``` from console: ![pyside2](https://user-images.githubusercontent.com/28787979/37112961-3d2cc8c8-2244-11e8-95f7-610edfc777a9.png) and as the binary pyinstaller creates: ![pyside2withpyinstaller](https://user-images.githubusercontent.com/28787979/37112962-3d4e5b00-2244-11e8-95d4-54ce0661d262.png) It looks very different(ignore the blue line, that's something that happened recently on this comp for some reason but the problem existed previous to this). Perhaps this has something to do with the themes in qt5_plugins? Anyway, how can i fix this? Ideally it would be possible to bundle my theme "breeze dark" with the application so the user has the ability to use that. </issue> <code> [start of PyInstaller/hooks/hook-PySide2.QtGui.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2013-2018, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License with exception 5 # for distributing bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 #----------------------------------------------------------------------------- 9 10 from PyInstaller.utils.hooks import qt_plugins_binaries 11 from PyInstaller.compat import is_linux 12 13 hiddenimports = ['PySide2.QtCore'] 14 15 binaries = [] 16 binaries.extend(qt_plugins_binaries('accessible', namespace='PySide2')) 17 binaries.extend(qt_plugins_binaries('iconengines', namespace='PySide2')) 18 binaries.extend(qt_plugins_binaries('imageformats', namespace='PySide2')) 19 binaries.extend(qt_plugins_binaries('inputmethods', namespace='PySide2')) 20 binaries.extend(qt_plugins_binaries('graphicssystems', namespace='PySide2')) 21 binaries.extend(qt_plugins_binaries('platforms', namespace='PySide2')) 22 23 if is_linux: 24 binaries.extend(qt_plugins_binaries('platformthemes', namespace='PySide2')) 25 [end of PyInstaller/hooks/hook-PySide2.QtGui.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PyInstaller/hooks/hook-PySide2.QtGui.py b/PyInstaller/hooks/hook-PySide2.QtGui.py --- a/PyInstaller/hooks/hook-PySide2.QtGui.py +++ b/PyInstaller/hooks/hook-PySide2.QtGui.py @@ -19,6 +19,7 @@ binaries.extend(qt_plugins_binaries('inputmethods', namespace='PySide2')) binaries.extend(qt_plugins_binaries('graphicssystems', namespace='PySide2')) binaries.extend(qt_plugins_binaries('platforms', namespace='PySide2')) +binaries.extend(qt_plugins_binaries('styles', namespace='PySide2')) if is_linux: binaries.extend(qt_plugins_binaries('platformthemes', namespace='PySide2'))
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-PySide2.QtGui.py b/PyInstaller/hooks/hook-PySide2.QtGui.py\n--- a/PyInstaller/hooks/hook-PySide2.QtGui.py\n+++ b/PyInstaller/hooks/hook-PySide2.QtGui.py\n@@ -19,6 +19,7 @@\n binaries.extend(qt_plugins_binaries('inputmethods', namespace='PySide2'))\n binaries.extend(qt_plugins_binaries('graphicssystems', namespace='PySide2'))\n binaries.extend(qt_plugins_binaries('platforms', namespace='PySide2'))\n+binaries.extend(qt_plugins_binaries('styles', namespace='PySide2'))\n \n if is_linux:\n binaries.extend(qt_plugins_binaries('platformthemes', namespace='PySide2'))\n", "issue": "Pyside2 looking different from when running as script.\nHi. Im using latest development version.\r\n\r\nAs you can see when i run my test script:\r\n```\r\nfrom PySide2 import QtCore, QtGui, QtWidgets\r\n\r\nclass Ui_MainWindow(object):\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(800, 600)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)\r\n self.gridLayout.setObjectName(\"gridLayout\")\r\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\r\n self.tabWidget.setObjectName(\"tabWidget\")\r\n self.tab = QtWidgets.QWidget()\r\n self.tab.setObjectName(\"tab\")\r\n self.pushButton = QtWidgets.QPushButton(self.tab)\r\n self.pushButton.setGeometry(QtCore.QRect(150, 90, 97, 34))\r\n self.pushButton.setObjectName(\"pushButton\")\r\n self.tableWidget = QtWidgets.QTableWidget(self.tab)\r\n self.tableWidget.setGeometry(QtCore.QRect(140, 150, 256, 192))\r\n self.tableWidget.setObjectName(\"tableWidget\")\r\n self.tableWidget.setColumnCount(3)\r\n self.tableWidget.setRowCount(0)\r\n item = QtWidgets.QTableWidgetItem()\r\n self.tableWidget.setHorizontalHeaderItem(0, item)\r\n item = QtWidgets.QTableWidgetItem()\r\n self.tableWidget.setHorizontalHeaderItem(1, item)\r\n item = QtWidgets.QTableWidgetItem()\r\n self.tableWidget.setHorizontalHeaderItem(2, item)\r\n self.tabWidget.addTab(self.tab, \"\")\r\n self.tab_2 = QtWidgets.QWidget()\r\n self.tab_2.setObjectName(\"tab_2\")\r\n self.tabWidget.addTab(self.tab_2, \"\")\r\n self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 1)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.menubar = QtWidgets.QMenuBar(MainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 30))\r\n self.menubar.setObjectName(\"menubar\")\r\n MainWindow.setMenuBar(self.menubar)\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\r\n self.pushButton.setText(_translate(\"MainWindow\", \"PushButton\"))\r\n item = self.tableWidget.horizontalHeaderItem(0)\r\n item.setText(_translate(\"MainWindow\", \"New Column\"))\r\n item = self.tableWidget.horizontalHeaderItem(1)\r\n item.setText(_translate(\"MainWindow\", \"New Column\"))\r\n item = self.tableWidget.horizontalHeaderItem(2)\r\n item.setText(_translate(\"MainWindow\", \"New Column\"))\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate(\"MainWindow\", \"Tab 1\"))\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate(\"MainWindow\", \"Tab 2\"))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = Ui_MainWindow()\r\n ui.setupUi(MainWindow)\r\n MainWindow.show()\r\n sys.exit(app.exec_())\r\n```\r\n\r\nfrom console:\r\n![pyside2](https://user-images.githubusercontent.com/28787979/37112961-3d2cc8c8-2244-11e8-95f7-610edfc777a9.png)\r\nand as the binary pyinstaller creates:\r\n![pyside2withpyinstaller](https://user-images.githubusercontent.com/28787979/37112962-3d4e5b00-2244-11e8-95d4-54ce0661d262.png)\r\n\r\nIt looks very different(ignore the blue line, that's something that happened recently on this comp for some reason but the problem existed previous to this). Perhaps this has something to do with the themes in qt5_plugins? Anyway, how can i fix this? Ideally it would be possible to bundle my theme \"breeze dark\" with the application so the user has the ability to use that.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import qt_plugins_binaries\nfrom PyInstaller.compat import is_linux\n\nhiddenimports = ['PySide2.QtCore']\n\nbinaries = []\nbinaries.extend(qt_plugins_binaries('accessible', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('iconengines', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('imageformats', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('inputmethods', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('graphicssystems', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('platforms', namespace='PySide2'))\n\nif is_linux:\n binaries.extend(qt_plugins_binaries('platformthemes', namespace='PySide2'))\n", "path": "PyInstaller/hooks/hook-PySide2.QtGui.py"}]}
1,754
170
gh_patches_debug_5669
rasdani/github-patches
git_diff
lutris__lutris-3705
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Store banners in .cache They have no place to be in .local/share Store banners in .cache They have no place to be in .local/share </issue> <code> [start of lutris/settings.py] 1 """Internal settings.""" 2 import os 3 import sys 4 from gettext import gettext as _ 5 6 from gi.repository import GLib 7 8 from lutris import __version__ 9 from lutris.util.settings import SettingsIO 10 11 PROJECT = "Lutris" 12 VERSION = __version__ 13 COPYRIGHT = _("(c) 2010-2021 Lutris Team") 14 AUTHORS = [_("The Lutris team")] 15 16 # Paths 17 CONFIG_DIR = os.path.join(GLib.get_user_config_dir(), "lutris") 18 CONFIG_FILE = os.path.join(CONFIG_DIR, "lutris.conf") 19 DATA_DIR = os.path.join(GLib.get_user_data_dir(), "lutris") 20 RUNNER_DIR = os.path.join(DATA_DIR, "runners") 21 RUNTIME_DIR = os.path.join(DATA_DIR, "runtime") 22 CACHE_DIR = os.path.join(GLib.get_user_cache_dir(), "lutris") 23 GAME_CONFIG_DIR = os.path.join(CONFIG_DIR, "games") 24 25 TMP_PATH = os.path.join(CACHE_DIR, "tmp") 26 BANNER_PATH = os.path.join(DATA_DIR, "banners") 27 COVERART_PATH = os.path.join(DATA_DIR, "coverart") 28 ICON_PATH = os.path.join(GLib.get_user_data_dir(), "icons", "hicolor", "128x128", "apps") 29 30 sio = SettingsIO(CONFIG_FILE) 31 if "nosetests" in sys.argv[0] or "pytest" in sys.argv[0]: 32 PGA_DB = "/tmp/pga.db" 33 else: 34 PGA_DB = sio.read_setting("pga_path") or os.path.join(DATA_DIR, "pga.db") 35 36 SITE_URL = sio.read_setting("website") or "https://lutris.net" 37 38 DRIVER_HOWTO_URL = "https://github.com/lutris/docs/blob/master/InstallingDrivers.md" 39 INSTALLER_URL = SITE_URL + "/api/installers/%s" 40 # XXX change this, should query on the installer, not the game. 41 INSTALLER_REVISION_URL = SITE_URL + "/api/installers/games/%s/revisions/%s" 42 GAME_URL = SITE_URL + "/games/%s/" 43 RUNTIME_URL = SITE_URL + "/api/runtimes" 44 45 STEAM_API_KEY = sio.read_setting("steam_api_key") or "34C9698CEB394AB4401D65927C6B3752" 46 DISCORD_CLIENT_ID = sio.read_setting("discord_client_id") or "618290412402114570" 47 48 49 read_setting = sio.read_setting 50 write_setting = sio.write_setting 51 [end of lutris/settings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lutris/settings.py b/lutris/settings.py --- a/lutris/settings.py +++ b/lutris/settings.py @@ -23,7 +23,7 @@ GAME_CONFIG_DIR = os.path.join(CONFIG_DIR, "games") TMP_PATH = os.path.join(CACHE_DIR, "tmp") -BANNER_PATH = os.path.join(DATA_DIR, "banners") +BANNER_PATH = os.path.join(CACHE_DIR, "banners") COVERART_PATH = os.path.join(DATA_DIR, "coverart") ICON_PATH = os.path.join(GLib.get_user_data_dir(), "icons", "hicolor", "128x128", "apps")
{"golden_diff": "diff --git a/lutris/settings.py b/lutris/settings.py\n--- a/lutris/settings.py\n+++ b/lutris/settings.py\n@@ -23,7 +23,7 @@\n GAME_CONFIG_DIR = os.path.join(CONFIG_DIR, \"games\")\n \n TMP_PATH = os.path.join(CACHE_DIR, \"tmp\")\n-BANNER_PATH = os.path.join(DATA_DIR, \"banners\")\n+BANNER_PATH = os.path.join(CACHE_DIR, \"banners\")\n COVERART_PATH = os.path.join(DATA_DIR, \"coverart\")\n ICON_PATH = os.path.join(GLib.get_user_data_dir(), \"icons\", \"hicolor\", \"128x128\", \"apps\")\n", "issue": "Store banners in .cache\nThey have no place to be in .local/share\nStore banners in .cache\nThey have no place to be in .local/share\n", "before_files": [{"content": "\"\"\"Internal settings.\"\"\"\nimport os\nimport sys\nfrom gettext import gettext as _\n\nfrom gi.repository import GLib\n\nfrom lutris import __version__\nfrom lutris.util.settings import SettingsIO\n\nPROJECT = \"Lutris\"\nVERSION = __version__\nCOPYRIGHT = _(\"(c) 2010-2021 Lutris Team\")\nAUTHORS = [_(\"The Lutris team\")]\n\n# Paths\nCONFIG_DIR = os.path.join(GLib.get_user_config_dir(), \"lutris\")\nCONFIG_FILE = os.path.join(CONFIG_DIR, \"lutris.conf\")\nDATA_DIR = os.path.join(GLib.get_user_data_dir(), \"lutris\")\nRUNNER_DIR = os.path.join(DATA_DIR, \"runners\")\nRUNTIME_DIR = os.path.join(DATA_DIR, \"runtime\")\nCACHE_DIR = os.path.join(GLib.get_user_cache_dir(), \"lutris\")\nGAME_CONFIG_DIR = os.path.join(CONFIG_DIR, \"games\")\n\nTMP_PATH = os.path.join(CACHE_DIR, \"tmp\")\nBANNER_PATH = os.path.join(DATA_DIR, \"banners\")\nCOVERART_PATH = os.path.join(DATA_DIR, \"coverart\")\nICON_PATH = os.path.join(GLib.get_user_data_dir(), \"icons\", \"hicolor\", \"128x128\", \"apps\")\n\nsio = SettingsIO(CONFIG_FILE)\nif \"nosetests\" in sys.argv[0] or \"pytest\" in sys.argv[0]:\n PGA_DB = \"/tmp/pga.db\"\nelse:\n PGA_DB = sio.read_setting(\"pga_path\") or os.path.join(DATA_DIR, \"pga.db\")\n\nSITE_URL = sio.read_setting(\"website\") or \"https://lutris.net\"\n\nDRIVER_HOWTO_URL = \"https://github.com/lutris/docs/blob/master/InstallingDrivers.md\"\nINSTALLER_URL = SITE_URL + \"/api/installers/%s\"\n# XXX change this, should query on the installer, not the game.\nINSTALLER_REVISION_URL = SITE_URL + \"/api/installers/games/%s/revisions/%s\"\nGAME_URL = SITE_URL + \"/games/%s/\"\nRUNTIME_URL = SITE_URL + \"/api/runtimes\"\n\nSTEAM_API_KEY = sio.read_setting(\"steam_api_key\") or \"34C9698CEB394AB4401D65927C6B3752\"\nDISCORD_CLIENT_ID = sio.read_setting(\"discord_client_id\") or \"618290412402114570\"\n\n\nread_setting = sio.read_setting\nwrite_setting = sio.write_setting\n", "path": "lutris/settings.py"}]}
1,212
147
gh_patches_debug_804
rasdani/github-patches
git_diff
scikit-hep__awkward-1830
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `ak.fill_none(axis=None)` does nothing ### Version of Awkward Array main ### Description and code to reproduce The `apply` function for this case does not return (or recurse) </issue> <code> [start of src/awkward/operations/ak_fill_none.py] 1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE 2 3 import numbers 4 5 import awkward as ak 6 7 np = ak.nplikes.NumpyMetadata.instance() 8 9 10 def fill_none(array, value, axis=-1, highlevel=True, behavior=None): 11 """ 12 Args: 13 array: Data in which to replace None with a given value. 14 value: Data with which to replace None. 15 axis (None or int): If None, replace all None values in the array 16 with the given value; if an int, The dimension at which this 17 operation is applied. The outermost dimension is `0`, followed 18 by `1`, etc., and negative values count backward from the 19 innermost: `-1` is the innermost dimension, `-2` is the next 20 level up, etc. 21 highlevel (bool): If True, return an #ak.Array; otherwise, return 22 a low-level #ak.contents.Content subclass. 23 behavior (None or dict): Custom #ak.behavior for the output array, if 24 high-level. 25 26 Replaces missing values (None) with a given `value`. 27 28 For example, in the following `array`, 29 30 ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]]) 31 32 The None values could be replaced with `0` by 33 34 >>> ak.fill_none(array, 0) 35 <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'> 36 37 The replacement value doesn't strictly need the same type as the 38 surrounding data. For example, the None values could also be replaced 39 by a string. 40 41 >>> ak.fill_none(array, "hi") 42 <Array [[1.1, 'hi', 2.2], ... ['hi', 3.3, 4.4]] type='3 * var * union[float64, s...'> 43 44 The list content now has a union type: 45 46 >>> ak.type(ak.fill_none(array, "hi")) 47 3 * var * union[float64, string] 48 49 The values could be floating-point numbers or strings. 50 """ 51 with ak._errors.OperationErrorContext( 52 "ak.fill_none", 53 dict( 54 array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior 55 ), 56 ): 57 return _impl(array, value, axis, highlevel, behavior) 58 59 60 def _impl(array, value, axis, highlevel, behavior): 61 arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False) 62 nplike = ak.nplikes.nplike_of(arraylayout) 63 64 # Convert value type to appropriate layout 65 if ( 66 isinstance(value, np.ndarray) 67 and issubclass(value.dtype.type, (np.bool_, np.number)) 68 and len(value.shape) != 0 69 ): 70 valuelayout = ak.operations.to_layout( 71 nplike.asarray(value)[np.newaxis], allow_record=False, allow_other=False 72 ) 73 elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or ( 74 isinstance(value, np.ndarray) 75 and issubclass(value.dtype.type, (np.bool_, np.number)) 76 ): 77 valuelayout = ak.operations.to_layout( 78 nplike.asarray(value), allow_record=False, allow_other=False 79 ) 80 elif ( 81 ak._util.is_sized_iterable(value) 82 and not (isinstance(value, (str, bytes))) 83 or isinstance(value, (ak.highlevel.Record, ak.record.Record)) 84 ): 85 valuelayout = ak.operations.to_layout( 86 value, allow_record=True, allow_other=False 87 ) 88 if isinstance(valuelayout, ak.record.Record): 89 valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1] 90 elif len(valuelayout) == 0: 91 offsets = ak.index.Index64( 92 nplike.array([0, 0], dtype=np.int64), nplike=nplike 93 ) 94 valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout) 95 else: 96 valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1) 97 else: 98 valuelayout = ak.operations.to_layout( 99 [value], allow_record=False, allow_other=False 100 ) 101 102 def maybe_fillna(layout): 103 if layout.is_OptionType: 104 return layout.fill_none(valuelayout) 105 else: 106 return layout 107 108 if axis is None: 109 110 def action(layout, depth, depth_context, **kwargs): 111 layout = maybe_fillna(layout) 112 113 else: 114 115 def action(layout, depth, depth_context, **kwargs): 116 posaxis = layout.axis_wrap_if_negative(depth_context["posaxis"]) 117 depth_context["posaxis"] = posaxis 118 if posaxis + 1 < depth: 119 return layout 120 elif posaxis + 1 == depth: 121 return maybe_fillna(layout) 122 123 depth_context = {"posaxis": axis} 124 out = arraylayout.recursively_apply(action, behavior, depth_context=depth_context) 125 126 return ak._util.wrap(out, ak._util.behavior_of(array, behavior=behavior), highlevel) 127 [end of src/awkward/operations/ak_fill_none.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/awkward/operations/ak_fill_none.py b/src/awkward/operations/ak_fill_none.py --- a/src/awkward/operations/ak_fill_none.py +++ b/src/awkward/operations/ak_fill_none.py @@ -107,8 +107,8 @@ if axis is None: - def action(layout, depth, depth_context, **kwargs): - layout = maybe_fillna(layout) + def action(layout, continuation, **kwargs): + return maybe_fillna(continuation()) else:
{"golden_diff": "diff --git a/src/awkward/operations/ak_fill_none.py b/src/awkward/operations/ak_fill_none.py\n--- a/src/awkward/operations/ak_fill_none.py\n+++ b/src/awkward/operations/ak_fill_none.py\n@@ -107,8 +107,8 @@\n \n if axis is None:\n \n- def action(layout, depth, depth_context, **kwargs):\n- layout = maybe_fillna(layout)\n+ def action(layout, continuation, **kwargs):\n+ return maybe_fillna(continuation())\n \n else:\n", "issue": "`ak.fill_none(axis=None)` does nothing\n### Version of Awkward Array\r\n\r\nmain\r\n\r\n### Description and code to reproduce\r\n\r\nThe `apply` function for this case does not return (or recurse)\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport numbers\n\nimport awkward as ak\n\nnp = ak.nplikes.NumpyMetadata.instance()\n\n\ndef fill_none(array, value, axis=-1, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Data in which to replace None with a given value.\n value: Data with which to replace None.\n axis (None or int): If None, replace all None values in the array\n with the given value; if an int, The dimension at which this\n operation is applied. The outermost dimension is `0`, followed\n by `1`, etc., and negative values count backward from the\n innermost: `-1` is the innermost dimension, `-2` is the next\n level up, etc.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Replaces missing values (None) with a given `value`.\n\n For example, in the following `array`,\n\n ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])\n\n The None values could be replaced with `0` by\n\n >>> ak.fill_none(array, 0)\n <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>\n\n The replacement value doesn't strictly need the same type as the\n surrounding data. For example, the None values could also be replaced\n by a string.\n\n >>> ak.fill_none(array, \"hi\")\n <Array [[1.1, 'hi', 2.2], ... ['hi', 3.3, 4.4]] type='3 * var * union[float64, s...'>\n\n The list content now has a union type:\n\n >>> ak.type(ak.fill_none(array, \"hi\"))\n 3 * var * union[float64, string]\n\n The values could be floating-point numbers or strings.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.fill_none\",\n dict(\n array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior\n ),\n ):\n return _impl(array, value, axis, highlevel, behavior)\n\n\ndef _impl(array, value, axis, highlevel, behavior):\n arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n nplike = ak.nplikes.nplike_of(arraylayout)\n\n # Convert value type to appropriate layout\n if (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n and len(value.shape) != 0\n ):\n valuelayout = ak.operations.to_layout(\n nplike.asarray(value)[np.newaxis], allow_record=False, allow_other=False\n )\n elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n ):\n valuelayout = ak.operations.to_layout(\n nplike.asarray(value), allow_record=False, allow_other=False\n )\n elif (\n ak._util.is_sized_iterable(value)\n and not (isinstance(value, (str, bytes)))\n or isinstance(value, (ak.highlevel.Record, ak.record.Record))\n ):\n valuelayout = ak.operations.to_layout(\n value, allow_record=True, allow_other=False\n )\n if isinstance(valuelayout, ak.record.Record):\n valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]\n elif len(valuelayout) == 0:\n offsets = ak.index.Index64(\n nplike.array([0, 0], dtype=np.int64), nplike=nplike\n )\n valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)\n else:\n valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)\n else:\n valuelayout = ak.operations.to_layout(\n [value], allow_record=False, allow_other=False\n )\n\n def maybe_fillna(layout):\n if layout.is_OptionType:\n return layout.fill_none(valuelayout)\n else:\n return layout\n\n if axis is None:\n\n def action(layout, depth, depth_context, **kwargs):\n layout = maybe_fillna(layout)\n\n else:\n\n def action(layout, depth, depth_context, **kwargs):\n posaxis = layout.axis_wrap_if_negative(depth_context[\"posaxis\"])\n depth_context[\"posaxis\"] = posaxis\n if posaxis + 1 < depth:\n return layout\n elif posaxis + 1 == depth:\n return maybe_fillna(layout)\n\n depth_context = {\"posaxis\": axis}\n out = arraylayout.recursively_apply(action, behavior, depth_context=depth_context)\n\n return ak._util.wrap(out, ak._util.behavior_of(array, behavior=behavior), highlevel)\n", "path": "src/awkward/operations/ak_fill_none.py"}]}
2,025
127
gh_patches_debug_36171
rasdani/github-patches
git_diff
pandas-dev__pandas-28230
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ASV Benchmark for read_excel Right now we don't benchmark performance of read_excel across the various engines (xlrd, openpyxl and odfpy) so would gladly accept contributions to improve benchmark coverage on those </issue> <code> [start of asv_bench/benchmarks/io/excel.py] 1 from io import BytesIO 2 3 import numpy as np 4 5 from pandas import DataFrame, ExcelWriter, date_range, read_excel 6 import pandas.util.testing as tm 7 8 9 class Excel: 10 11 params = ["openpyxl", "xlsxwriter", "xlwt"] 12 param_names = ["engine"] 13 14 def setup(self, engine): 15 N = 2000 16 C = 5 17 self.df = DataFrame( 18 np.random.randn(N, C), 19 columns=["float{}".format(i) for i in range(C)], 20 index=date_range("20000101", periods=N, freq="H"), 21 ) 22 self.df["object"] = tm.makeStringIndex(N) 23 self.bio_read = BytesIO() 24 self.writer_read = ExcelWriter(self.bio_read, engine=engine) 25 self.df.to_excel(self.writer_read, sheet_name="Sheet1") 26 self.writer_read.save() 27 self.bio_read.seek(0) 28 29 def time_read_excel(self, engine): 30 read_excel(self.bio_read) 31 32 def time_write_excel(self, engine): 33 bio_write = BytesIO() 34 bio_write.seek(0) 35 writer_write = ExcelWriter(bio_write, engine=engine) 36 self.df.to_excel(writer_write, sheet_name="Sheet1") 37 writer_write.save() 38 39 40 from ..pandas_vb_common import setup # noqa: F401 isort:skip 41 [end of asv_bench/benchmarks/io/excel.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py --- a/asv_bench/benchmarks/io/excel.py +++ b/asv_bench/benchmarks/io/excel.py @@ -1,40 +1,72 @@ from io import BytesIO import numpy as np +from odf.opendocument import OpenDocumentSpreadsheet +from odf.table import Table, TableCell, TableRow +from odf.text import P from pandas import DataFrame, ExcelWriter, date_range, read_excel import pandas.util.testing as tm -class Excel: +def _generate_dataframe(): + N = 2000 + C = 5 + df = DataFrame( + np.random.randn(N, C), + columns=["float{}".format(i) for i in range(C)], + index=date_range("20000101", periods=N, freq="H"), + ) + df["object"] = tm.makeStringIndex(N) + return df + + +class WriteExcel: params = ["openpyxl", "xlsxwriter", "xlwt"] param_names = ["engine"] def setup(self, engine): - N = 2000 - C = 5 - self.df = DataFrame( - np.random.randn(N, C), - columns=["float{}".format(i) for i in range(C)], - index=date_range("20000101", periods=N, freq="H"), - ) - self.df["object"] = tm.makeStringIndex(N) - self.bio_read = BytesIO() - self.writer_read = ExcelWriter(self.bio_read, engine=engine) - self.df.to_excel(self.writer_read, sheet_name="Sheet1") - self.writer_read.save() - self.bio_read.seek(0) - - def time_read_excel(self, engine): - read_excel(self.bio_read) + self.df = _generate_dataframe() def time_write_excel(self, engine): - bio_write = BytesIO() - bio_write.seek(0) - writer_write = ExcelWriter(bio_write, engine=engine) - self.df.to_excel(writer_write, sheet_name="Sheet1") - writer_write.save() + bio = BytesIO() + bio.seek(0) + writer = ExcelWriter(bio, engine=engine) + self.df.to_excel(writer, sheet_name="Sheet1") + writer.save() + + +class ReadExcel: + + params = ["xlrd", "openpyxl", "odf"] + param_names = ["engine"] + fname_excel = "spreadsheet.xlsx" + fname_odf = "spreadsheet.ods" + + def _create_odf(self): + doc = OpenDocumentSpreadsheet() + table = Table(name="Table1") + for row in self.df.values: + tr = TableRow() + for val in row: + tc = TableCell(valuetype="string") + tc.addElement(P(text=val)) + tr.addElement(tc) + table.addElement(tr) + + doc.spreadsheet.addElement(table) + doc.save(self.fname_odf) + + def setup_cache(self): + self.df = _generate_dataframe() + + self.df.to_excel(self.fname_excel, sheet_name="Sheet1") + self._create_odf() + + def time_read_excel(self, engine): + fname = self.fname_odf if engine == "odf" else self.fname_excel + read_excel(fname, engine=engine) from ..pandas_vb_common import setup # noqa: F401 isort:skip
{"golden_diff": "diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py\n--- a/asv_bench/benchmarks/io/excel.py\n+++ b/asv_bench/benchmarks/io/excel.py\n@@ -1,40 +1,72 @@\n from io import BytesIO\n \n import numpy as np\n+from odf.opendocument import OpenDocumentSpreadsheet\n+from odf.table import Table, TableCell, TableRow\n+from odf.text import P\n \n from pandas import DataFrame, ExcelWriter, date_range, read_excel\n import pandas.util.testing as tm\n \n \n-class Excel:\n+def _generate_dataframe():\n+ N = 2000\n+ C = 5\n+ df = DataFrame(\n+ np.random.randn(N, C),\n+ columns=[\"float{}\".format(i) for i in range(C)],\n+ index=date_range(\"20000101\", periods=N, freq=\"H\"),\n+ )\n+ df[\"object\"] = tm.makeStringIndex(N)\n+ return df\n+\n+\n+class WriteExcel:\n \n params = [\"openpyxl\", \"xlsxwriter\", \"xlwt\"]\n param_names = [\"engine\"]\n \n def setup(self, engine):\n- N = 2000\n- C = 5\n- self.df = DataFrame(\n- np.random.randn(N, C),\n- columns=[\"float{}\".format(i) for i in range(C)],\n- index=date_range(\"20000101\", periods=N, freq=\"H\"),\n- )\n- self.df[\"object\"] = tm.makeStringIndex(N)\n- self.bio_read = BytesIO()\n- self.writer_read = ExcelWriter(self.bio_read, engine=engine)\n- self.df.to_excel(self.writer_read, sheet_name=\"Sheet1\")\n- self.writer_read.save()\n- self.bio_read.seek(0)\n-\n- def time_read_excel(self, engine):\n- read_excel(self.bio_read)\n+ self.df = _generate_dataframe()\n \n def time_write_excel(self, engine):\n- bio_write = BytesIO()\n- bio_write.seek(0)\n- writer_write = ExcelWriter(bio_write, engine=engine)\n- self.df.to_excel(writer_write, sheet_name=\"Sheet1\")\n- writer_write.save()\n+ bio = BytesIO()\n+ bio.seek(0)\n+ writer = ExcelWriter(bio, engine=engine)\n+ self.df.to_excel(writer, sheet_name=\"Sheet1\")\n+ writer.save()\n+\n+\n+class ReadExcel:\n+\n+ params = [\"xlrd\", \"openpyxl\", \"odf\"]\n+ param_names = [\"engine\"]\n+ fname_excel = \"spreadsheet.xlsx\"\n+ fname_odf = \"spreadsheet.ods\"\n+\n+ def _create_odf(self):\n+ doc = OpenDocumentSpreadsheet()\n+ table = Table(name=\"Table1\")\n+ for row in self.df.values:\n+ tr = TableRow()\n+ for val in row:\n+ tc = TableCell(valuetype=\"string\")\n+ tc.addElement(P(text=val))\n+ tr.addElement(tc)\n+ table.addElement(tr)\n+\n+ doc.spreadsheet.addElement(table)\n+ doc.save(self.fname_odf)\n+\n+ def setup_cache(self):\n+ self.df = _generate_dataframe()\n+\n+ self.df.to_excel(self.fname_excel, sheet_name=\"Sheet1\")\n+ self._create_odf()\n+\n+ def time_read_excel(self, engine):\n+ fname = self.fname_odf if engine == \"odf\" else self.fname_excel\n+ read_excel(fname, engine=engine)\n \n \n from ..pandas_vb_common import setup # noqa: F401 isort:skip\n", "issue": "ASV Benchmark for read_excel\nRight now we don't benchmark performance of read_excel across the various engines (xlrd, openpyxl and odfpy) so would gladly accept contributions to improve benchmark coverage on those\n", "before_files": [{"content": "from io import BytesIO\n\nimport numpy as np\n\nfrom pandas import DataFrame, ExcelWriter, date_range, read_excel\nimport pandas.util.testing as tm\n\n\nclass Excel:\n\n params = [\"openpyxl\", \"xlsxwriter\", \"xlwt\"]\n param_names = [\"engine\"]\n\n def setup(self, engine):\n N = 2000\n C = 5\n self.df = DataFrame(\n np.random.randn(N, C),\n columns=[\"float{}\".format(i) for i in range(C)],\n index=date_range(\"20000101\", periods=N, freq=\"H\"),\n )\n self.df[\"object\"] = tm.makeStringIndex(N)\n self.bio_read = BytesIO()\n self.writer_read = ExcelWriter(self.bio_read, engine=engine)\n self.df.to_excel(self.writer_read, sheet_name=\"Sheet1\")\n self.writer_read.save()\n self.bio_read.seek(0)\n\n def time_read_excel(self, engine):\n read_excel(self.bio_read)\n\n def time_write_excel(self, engine):\n bio_write = BytesIO()\n bio_write.seek(0)\n writer_write = ExcelWriter(bio_write, engine=engine)\n self.df.to_excel(writer_write, sheet_name=\"Sheet1\")\n writer_write.save()\n\n\nfrom ..pandas_vb_common import setup # noqa: F401 isort:skip\n", "path": "asv_bench/benchmarks/io/excel.py"}]}
966
813
gh_patches_debug_27454
rasdani/github-patches
git_diff
bridgecrewio__checkov-4530
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CloudFormation EC2Credentials.py does evaluate if the UserData is base64 encoded **Describe the issue** The check EC2Credentials.py which checks the UserData of a CloudFormation template against secrets does not work if the userdata are already encoded in base64. The check is not returning the correct result and the processing time is very slow. **Examples** ```yaml AWSTemplateFormatVersion: "2010-09-09" Resources: Resource0: Type: AWS::EC2::Instance Properties: ImageId: ami-04169656fea786776 UserData: IyEgL2Jpbi9iYXNoCnN1ZG8gYXB0LWdldCB1cGRhdGUKc3VkbyBhcHQtZ2V0IGluc3RhbGwgLXkgYXBhY2hlMgpzdWRvIHN5c3RlbWN0bCBzdGFydCBhcGFjaGUyCnN1ZG8gc3lzdGVtY3RsIGVuYWJsZSBhcGFjaGUyCmV4cG9ydCBBV1NfQUNDRVNTX0tFWV9JRApleHBvcnQgQVdTX0FDQ0VTU19LRVlfSUQ9QUtJQUlPU0ZPRE5ON0VYQU1QTEUKZXhwb3J0IEFXU19TRUNSRVRfQUNDRVNTX0tFWT13SmFsclhVdG5GRU1JL0s3TURFTkcvYlB4UmZpQ1lFWEFNUExFS0VZCmV4cG9ydCBBV1NfREVGQVVMVF9SRUdJT049dXMtd2VzdC0yCmVjaG8gIjxoMT5EZXBsb3llZCB2aWEgVGVycmFmb3JtPC9oMT4iIHwgc3VkbyB0ZWUgL3Zhci93d3cvaHRtbC9pbmRleC5odG1s ``` **Version (please complete the following information):** - Checkov Version [e.g. 22] - >2.0.0 **Additional context** Fix implemented here by trying to decode the base64 string. https://github.com/j2clerck/checkov/commit/af9abd724520ec21ec5510dfc5db2ef83fd9e6dc </issue> <code> [start of checkov/cloudformation/checks/resource/aws/EC2Credentials.py] 1 from typing import List 2 3 from checkov.common.models.enums import CheckResult, CheckCategories 4 from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck 5 from checkov.common.util.secrets import get_secrets_from_string 6 7 8 class EC2Credentials(BaseResourceCheck): 9 def __init__(self): 10 name = "Ensure no hard-coded secrets exist in EC2 user data" 11 id = "CKV_AWS_46" 12 supported_resources = ['AWS::EC2::Instance'] 13 categories = [CheckCategories.SECRETS] 14 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 15 16 def scan_resource_conf(self, conf): 17 if 'Properties' in conf.keys(): 18 if 'UserData' in conf['Properties'].keys(): 19 user_data = conf['Properties']['UserData'] 20 # Cast to string as user data object can look slightly different depending 21 # on Yaml or JSON CF Templates and how the B64 conversion is done. 22 user_data_str = str(user_data) 23 if isinstance(user_data_str, str): 24 secrets = get_secrets_from_string(str(user_data_str)) 25 if secrets: 26 for idx, secret in enumerate(secrets): 27 conf[f'{self.id}_secret_{idx}'] = secret 28 return CheckResult.FAILED 29 return CheckResult.PASSED 30 31 def get_evaluated_keys(self) -> List[str]: 32 return ["Properties/UserData"] 33 34 35 check = EC2Credentials() 36 [end of checkov/cloudformation/checks/resource/aws/EC2Credentials.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/cloudformation/checks/resource/aws/EC2Credentials.py b/checkov/cloudformation/checks/resource/aws/EC2Credentials.py --- a/checkov/cloudformation/checks/resource/aws/EC2Credentials.py +++ b/checkov/cloudformation/checks/resource/aws/EC2Credentials.py @@ -1,4 +1,5 @@ from typing import List +from base64 import b64decode from checkov.common.models.enums import CheckResult, CheckCategories from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck @@ -17,9 +18,15 @@ if 'Properties' in conf.keys(): if 'UserData' in conf['Properties'].keys(): user_data = conf['Properties']['UserData'] - # Cast to string as user data object can look slightly different depending - # on Yaml or JSON CF Templates and how the B64 conversion is done. - user_data_str = str(user_data) + # In some case, the UserData might be a base64 encoded string which will slow down (3 minutes) + # the get_secrets_from_string function. + try: + user_data_str = b64decode(user_data).decode() + except Exception: + # Cast to string as user data object can look slightly different depending + # on Yaml or JSON CF Templates and how the B64 conversion is done. + user_data_str = str(user_data) + if isinstance(user_data_str, str): secrets = get_secrets_from_string(str(user_data_str)) if secrets:
{"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/EC2Credentials.py b/checkov/cloudformation/checks/resource/aws/EC2Credentials.py\n--- a/checkov/cloudformation/checks/resource/aws/EC2Credentials.py\n+++ b/checkov/cloudformation/checks/resource/aws/EC2Credentials.py\n@@ -1,4 +1,5 @@\n from typing import List\n+from base64 import b64decode\n \n from checkov.common.models.enums import CheckResult, CheckCategories\n from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\n@@ -17,9 +18,15 @@\n if 'Properties' in conf.keys():\n if 'UserData' in conf['Properties'].keys():\n user_data = conf['Properties']['UserData']\n- # Cast to string as user data object can look slightly different depending\n- # on Yaml or JSON CF Templates and how the B64 conversion is done.\n- user_data_str = str(user_data)\n+ # In some case, the UserData might be a base64 encoded string which will slow down (3 minutes)\n+ # the get_secrets_from_string function.\n+ try:\n+ user_data_str = b64decode(user_data).decode()\n+ except Exception:\n+ # Cast to string as user data object can look slightly different depending\n+ # on Yaml or JSON CF Templates and how the B64 conversion is done.\n+ user_data_str = str(user_data)\n+\n if isinstance(user_data_str, str):\n secrets = get_secrets_from_string(str(user_data_str))\n if secrets:\n", "issue": "CloudFormation EC2Credentials.py does evaluate if the UserData is base64 encoded\n**Describe the issue**\r\nThe check EC2Credentials.py which checks the UserData of a CloudFormation template against secrets does not work if the userdata are already encoded in base64. \r\nThe check is not returning the correct result and the processing time is very slow.\r\n\r\n**Examples**\r\n```yaml\r\nAWSTemplateFormatVersion: \"2010-09-09\"\r\nResources:\r\n Resource0:\r\n Type: AWS::EC2::Instance\r\n Properties:\r\n ImageId: ami-04169656fea786776\r\n UserData: IyEgL2Jpbi9iYXNoCnN1ZG8gYXB0LWdldCB1cGRhdGUKc3VkbyBhcHQtZ2V0IGluc3RhbGwgLXkgYXBhY2hlMgpzdWRvIHN5c3RlbWN0bCBzdGFydCBhcGFjaGUyCnN1ZG8gc3lzdGVtY3RsIGVuYWJsZSBhcGFjaGUyCmV4cG9ydCBBV1NfQUNDRVNTX0tFWV9JRApleHBvcnQgQVdTX0FDQ0VTU19LRVlfSUQ9QUtJQUlPU0ZPRE5ON0VYQU1QTEUKZXhwb3J0IEFXU19TRUNSRVRfQUNDRVNTX0tFWT13SmFsclhVdG5GRU1JL0s3TURFTkcvYlB4UmZpQ1lFWEFNUExFS0VZCmV4cG9ydCBBV1NfREVGQVVMVF9SRUdJT049dXMtd2VzdC0yCmVjaG8gIjxoMT5EZXBsb3llZCB2aWEgVGVycmFmb3JtPC9oMT4iIHwgc3VkbyB0ZWUgL3Zhci93d3cvaHRtbC9pbmRleC5odG1s\r\n\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version [e.g. 22]\r\n - >2.0.0\r\n\r\n**Additional context**\r\nFix implemented here by trying to decode the base64 string.\r\nhttps://github.com/j2clerck/checkov/commit/af9abd724520ec21ec5510dfc5db2ef83fd9e6dc\r\n\n", "before_files": [{"content": "from typing import List\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.util.secrets import get_secrets_from_string\n\n\nclass EC2Credentials(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure no hard-coded secrets exist in EC2 user data\"\n id = \"CKV_AWS_46\"\n supported_resources = ['AWS::EC2::Instance']\n categories = [CheckCategories.SECRETS]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'Properties' in conf.keys():\n if 'UserData' in conf['Properties'].keys():\n user_data = conf['Properties']['UserData']\n # Cast to string as user data object can look slightly different depending\n # on Yaml or JSON CF Templates and how the B64 conversion is done.\n user_data_str = str(user_data)\n if isinstance(user_data_str, str):\n secrets = get_secrets_from_string(str(user_data_str))\n if secrets:\n for idx, secret in enumerate(secrets):\n conf[f'{self.id}_secret_{idx}'] = secret\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n def get_evaluated_keys(self) -> List[str]:\n return [\"Properties/UserData\"]\n\n\ncheck = EC2Credentials()\n", "path": "checkov/cloudformation/checks/resource/aws/EC2Credentials.py"}]}
1,527
343
gh_patches_debug_17541
rasdani/github-patches
git_diff
liqd__a4-opin-284
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Filter draft projects from all listings Projects that are in draft mode should be removed from most listings: - [x] latest projects for all users - [x] other projects for all users - [x] organisation page for non-initiators </issue> <code> [start of euth/organisations/views.py] 1 from django.views import generic 2 3 from . import models 4 5 6 class OrganisationDetailView(generic.DetailView): 7 model = models.Organisation 8 9 10 class OrganisationListView(generic.ListView): 11 model = models.Organisation 12 paginate_by = 10 13 [end of euth/organisations/views.py] [start of euth/projects/models.py] 1 from django.conf import settings 2 from django.db import models 3 from django.utils import functional, timezone 4 5 from contrib.transforms import html_transforms 6 from euth.contrib import base_models, validators 7 from euth.organisations import models as org_models 8 9 10 class ProjectManager(models.Manager): 11 12 def get_by_natural_key(self, name): 13 return self.get(name=name) 14 15 def featured(self): 16 return self.filter(is_draft=False).order_by('-created')[:8] 17 18 19 class Project(base_models.TimeStampedModel): 20 slug = models.SlugField(max_length=512, unique=True) 21 name = models.CharField(max_length=512) 22 organisation = models.ForeignKey( 23 org_models.Organisation, on_delete=models.CASCADE) 24 description = models.CharField(max_length=1024) 25 information = models.TextField() 26 is_public = models.BooleanField(default=True) 27 is_draft = models.BooleanField(default=True) 28 image = models.ImageField( 29 upload_to='projects/backgrounds', 30 blank=True, 31 validators=[validators.validate_hero_image]) 32 participants = models.ManyToManyField( 33 settings.AUTH_USER_MODEL, 34 related_name='project_participant', 35 blank=True, 36 ) 37 moderators = models.ManyToManyField( 38 settings.AUTH_USER_MODEL, 39 related_name='project_moderator' 40 ) 41 42 objects = ProjectManager() 43 44 def __str__(self): 45 return self.name 46 47 def save(self, *args, **kwargs): 48 self.information = html_transforms.clean_html_field( 49 self.information) 50 super(Project, self).save(*args, **kwargs) 51 52 def get_absolute_url(self): 53 from django.core.urlresolvers import reverse 54 return reverse('project-detail', args=[str(self.slug)]) 55 56 def has_member(self, user): 57 """ 58 Everybody is member of all public projects and private projects can 59 be joined as moderator or participant. 60 """ 61 return ( 62 (user.is_authenticated() and self.is_public) 63 or (user in self.participants.all()) 64 or (user in self.moderators.all()) 65 ) 66 67 @functional.cached_property 68 def other_projects(self): 69 other_projects = self.organisation.project_set.all().exclude( 70 slug=self.slug) 71 return other_projects 72 73 @functional.cached_property 74 def is_private(self): 75 return not self.is_public 76 77 @functional.cached_property 78 def active_phase(self): 79 from euth.phases import models as phase_models 80 return phase_models.Phase.objects\ 81 .filter(module__project=self)\ 82 .active_phases()\ 83 .first() 84 85 @property 86 def days_left(self): 87 if self.active_phase: 88 today = timezone.now().replace(hour=0, minute=0, second=0) 89 time_delta = self.active_phase.end_date - today 90 return time_delta.days 91 [end of euth/projects/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/euth/organisations/views.py b/euth/organisations/views.py --- a/euth/organisations/views.py +++ b/euth/organisations/views.py @@ -6,6 +6,12 @@ class OrganisationDetailView(generic.DetailView): model = models.Organisation + def visible_projects(self): + if self.request.user in self.object.initiators.all(): + return self.object.project_set.all() + else: + return self.object.project_set.filter(is_draft=False) + class OrganisationListView(generic.ListView): model = models.Organisation diff --git a/euth/projects/models.py b/euth/projects/models.py --- a/euth/projects/models.py +++ b/euth/projects/models.py @@ -66,8 +66,8 @@ @functional.cached_property def other_projects(self): - other_projects = self.organisation.project_set.all().exclude( - slug=self.slug) + other_projects = self.organisation.project_set\ + .filter(is_draft=False).exclude(slug=self.slug) return other_projects @functional.cached_property
{"golden_diff": "diff --git a/euth/organisations/views.py b/euth/organisations/views.py\n--- a/euth/organisations/views.py\n+++ b/euth/organisations/views.py\n@@ -6,6 +6,12 @@\n class OrganisationDetailView(generic.DetailView):\n model = models.Organisation\n \n+ def visible_projects(self):\n+ if self.request.user in self.object.initiators.all():\n+ return self.object.project_set.all()\n+ else:\n+ return self.object.project_set.filter(is_draft=False)\n+\n \n class OrganisationListView(generic.ListView):\n model = models.Organisation\ndiff --git a/euth/projects/models.py b/euth/projects/models.py\n--- a/euth/projects/models.py\n+++ b/euth/projects/models.py\n@@ -66,8 +66,8 @@\n \n @functional.cached_property\n def other_projects(self):\n- other_projects = self.organisation.project_set.all().exclude(\n- slug=self.slug)\n+ other_projects = self.organisation.project_set\\\n+ .filter(is_draft=False).exclude(slug=self.slug)\n return other_projects\n \n @functional.cached_property\n", "issue": "Filter draft projects from all listings\nProjects that are in draft mode should be removed from most listings:\n- [x] latest projects for all users\n- [x] other projects for all users\n- [x] organisation page for non-initiators\n\n", "before_files": [{"content": "from django.views import generic\n\nfrom . import models\n\n\nclass OrganisationDetailView(generic.DetailView):\n model = models.Organisation\n\n\nclass OrganisationListView(generic.ListView):\n model = models.Organisation\n paginate_by = 10\n", "path": "euth/organisations/views.py"}, {"content": "from django.conf import settings\nfrom django.db import models\nfrom django.utils import functional, timezone\n\nfrom contrib.transforms import html_transforms\nfrom euth.contrib import base_models, validators\nfrom euth.organisations import models as org_models\n\n\nclass ProjectManager(models.Manager):\n\n def get_by_natural_key(self, name):\n return self.get(name=name)\n\n def featured(self):\n return self.filter(is_draft=False).order_by('-created')[:8]\n\n\nclass Project(base_models.TimeStampedModel):\n slug = models.SlugField(max_length=512, unique=True)\n name = models.CharField(max_length=512)\n organisation = models.ForeignKey(\n org_models.Organisation, on_delete=models.CASCADE)\n description = models.CharField(max_length=1024)\n information = models.TextField()\n is_public = models.BooleanField(default=True)\n is_draft = models.BooleanField(default=True)\n image = models.ImageField(\n upload_to='projects/backgrounds',\n blank=True,\n validators=[validators.validate_hero_image])\n participants = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='project_participant',\n blank=True,\n )\n moderators = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='project_moderator'\n )\n\n objects = ProjectManager()\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.information = html_transforms.clean_html_field(\n self.information)\n super(Project, self).save(*args, **kwargs)\n\n def get_absolute_url(self):\n from django.core.urlresolvers import reverse\n return reverse('project-detail', args=[str(self.slug)])\n\n def has_member(self, user):\n \"\"\"\n Everybody is member of all public projects and private projects can\n be joined as moderator or participant.\n \"\"\"\n return (\n (user.is_authenticated() and self.is_public)\n or (user in self.participants.all())\n or (user in self.moderators.all())\n )\n\n @functional.cached_property\n def other_projects(self):\n other_projects = self.organisation.project_set.all().exclude(\n slug=self.slug)\n return other_projects\n\n @functional.cached_property\n def is_private(self):\n return not self.is_public\n\n @functional.cached_property\n def active_phase(self):\n from euth.phases import models as phase_models\n return phase_models.Phase.objects\\\n .filter(module__project=self)\\\n .active_phases()\\\n .first()\n\n @property\n def days_left(self):\n if self.active_phase:\n today = timezone.now().replace(hour=0, minute=0, second=0)\n time_delta = self.active_phase.end_date - today\n return time_delta.days\n", "path": "euth/projects/models.py"}]}
1,443
244
gh_patches_debug_28597
rasdani/github-patches
git_diff
openstates__openstates-scrapers-1354
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> NH: legislators scrape is failing State: NH Legislator scraper needs some attention </issue> <code> [start of openstates/nh/legislators.py] 1 import re 2 3 from billy.scrape.legislators import LegislatorScraper, Legislator 4 from openstates.utils import LXMLMixin 5 6 7 class NHLegislatorScraper(LegislatorScraper, LXMLMixin): 8 jurisdiction = 'nh' 9 latest_only = True 10 members_url = 'http://www.gencourt.state.nh.us/downloads/Members.txt' 11 12 chamber_map = {'H': 'lower', 'S': 'upper'} 13 party_map = { 14 'D': 'Democratic', 15 'R': 'Republican', 16 'I': 'Independent', 17 'L': 'Libertarian', 18 } 19 20 def _get_photo(self, url, chamber): 21 """Attempts to find a portrait in the given legislator profile.""" 22 doc = self.lxmlize(url) 23 24 if chamber == 'upper': 25 src = doc.xpath('//div[@id="page_content"]//img[contains(@src, ' 26 '"images/senators") or contains(@src, "Senator")]/@src') 27 elif chamber == 'lower': 28 src = doc.xpath('//img[contains(@src, "images/memberpics")]/@src') 29 30 if src and 'nophoto' not in src[0]: 31 photo_url = src[0] 32 else: 33 photo_url = '' 34 35 return photo_url 36 37 def _parse_legislator(self, row, chamber, term): 38 # Capture legislator vitals. 39 first_name = row['FirstName'] 40 middle_name = row['MiddleName'] 41 last_name = row['lastname'] 42 full_name = '{} {} {}'.format(first_name, middle_name, last_name) 43 full_name = re.sub(r'[\s]{2,}', ' ', full_name) 44 45 district = '{} {}'.format(row['county'], int(row['District'])).strip() 46 party = self.party_map[row['party']] 47 email = row['EMailAddress1'] 48 49 legislator = Legislator(term, chamber, district, full_name, 50 first_name=first_name, last_name=last_name, 51 middle_name=middle_name, party=party, 52 email=email) 53 54 # Capture legislator office contact information. 55 district_address = '{}\n{}\n{}, {} {}'.format(row['street'], 56 row['address2'], row['city'], row['state'], row['zipcode']).strip() 57 58 legislator.add_office('district', 'Home Address', 59 address=district_address) 60 61 # Retrieve legislator portrait. 62 #profile_url = None 63 #if chamber == 'upper': 64 # profile_url = 'http://www.gencourt.state.nh.us/Senate/members/webpages/district{:02d}.aspx'.format(row['District']) 65 #elif chamber == 'lower': 66 # profile_url = 'http://www.gencourt.state.nh.us/house/members/member.aspx?member={}'.format(row['employee_no']) 67 68 #if profile_url: 69 # legislator['photo_url'] = self._get_photo(profile_url, chamber) 70 # legislator.add_source(profile_url) 71 72 return legislator 73 74 def _parse_members_txt(self): 75 lines = self.get(self.members_url).text.splitlines() 76 77 header = lines[0].split('\t') 78 79 for line in lines[1:]: 80 yield dict(zip(header, line.split('\t'))) 81 82 def scrape(self, chamber, term): 83 for row in self._parse_members_txt(): 84 if self.chamber_map[row['LegislativeBody']] == chamber: 85 leg = self._parse_legislator(row, chamber, term) 86 leg.add_source(self.members_url) 87 self.save_legislator(leg) 88 [end of openstates/nh/legislators.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/openstates/nh/legislators.py b/openstates/nh/legislators.py --- a/openstates/nh/legislators.py +++ b/openstates/nh/legislators.py @@ -38,13 +38,13 @@ # Capture legislator vitals. first_name = row['FirstName'] middle_name = row['MiddleName'] - last_name = row['lastname'] + last_name = row['LastName'] full_name = '{} {} {}'.format(first_name, middle_name, last_name) full_name = re.sub(r'[\s]{2,}', ' ', full_name) - district = '{} {}'.format(row['county'], int(row['District'])).strip() - party = self.party_map[row['party']] - email = row['EMailAddress1'] + district = '{} {}'.format(row['County'], int(row['District'])).strip() + party = self.party_map[row['party'].upper()] + email = row['WorkEmail'] legislator = Legislator(term, chamber, district, full_name, first_name=first_name, last_name=last_name, @@ -52,8 +52,8 @@ email=email) # Capture legislator office contact information. - district_address = '{}\n{}\n{}, {} {}'.format(row['street'], - row['address2'], row['city'], row['state'], row['zipcode']).strip() + district_address = '{}\n{}\n{}, {} {}'.format(row['Address'], + row['address2'], row['city'], row['State'], row['Zipcode']).strip() legislator.add_office('district', 'Home Address', address=district_address)
{"golden_diff": "diff --git a/openstates/nh/legislators.py b/openstates/nh/legislators.py\n--- a/openstates/nh/legislators.py\n+++ b/openstates/nh/legislators.py\n@@ -38,13 +38,13 @@\n # Capture legislator vitals.\n first_name = row['FirstName']\n middle_name = row['MiddleName']\n- last_name = row['lastname']\n+ last_name = row['LastName']\n full_name = '{} {} {}'.format(first_name, middle_name, last_name)\n full_name = re.sub(r'[\\s]{2,}', ' ', full_name)\n \n- district = '{} {}'.format(row['county'], int(row['District'])).strip()\n- party = self.party_map[row['party']]\n- email = row['EMailAddress1']\n+ district = '{} {}'.format(row['County'], int(row['District'])).strip()\n+ party = self.party_map[row['party'].upper()]\n+ email = row['WorkEmail']\n \n legislator = Legislator(term, chamber, district, full_name,\n first_name=first_name, last_name=last_name,\n@@ -52,8 +52,8 @@\n email=email)\n \n # Capture legislator office contact information.\n- district_address = '{}\\n{}\\n{}, {} {}'.format(row['street'],\n- row['address2'], row['city'], row['state'], row['zipcode']).strip()\n+ district_address = '{}\\n{}\\n{}, {} {}'.format(row['Address'],\n+ row['address2'], row['city'], row['State'], row['Zipcode']).strip()\n \n legislator.add_office('district', 'Home Address',\n address=district_address)\n", "issue": "NH: legislators scrape is failing\nState: NH\r\n\r\nLegislator scraper needs some attention\n", "before_files": [{"content": "import re\n\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom openstates.utils import LXMLMixin\n\n\nclass NHLegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'nh'\n latest_only = True\n members_url = 'http://www.gencourt.state.nh.us/downloads/Members.txt'\n\n chamber_map = {'H': 'lower', 'S': 'upper'}\n party_map = {\n 'D': 'Democratic',\n 'R': 'Republican',\n 'I': 'Independent',\n 'L': 'Libertarian',\n }\n\n def _get_photo(self, url, chamber):\n \"\"\"Attempts to find a portrait in the given legislator profile.\"\"\"\n doc = self.lxmlize(url)\n\n if chamber == 'upper':\n src = doc.xpath('//div[@id=\"page_content\"]//img[contains(@src, '\n '\"images/senators\") or contains(@src, \"Senator\")]/@src')\n elif chamber == 'lower':\n src = doc.xpath('//img[contains(@src, \"images/memberpics\")]/@src')\n\n if src and 'nophoto' not in src[0]:\n photo_url = src[0]\n else:\n photo_url = ''\n\n return photo_url\n\n def _parse_legislator(self, row, chamber, term):\n # Capture legislator vitals.\n first_name = row['FirstName']\n middle_name = row['MiddleName']\n last_name = row['lastname']\n full_name = '{} {} {}'.format(first_name, middle_name, last_name)\n full_name = re.sub(r'[\\s]{2,}', ' ', full_name)\n\n district = '{} {}'.format(row['county'], int(row['District'])).strip()\n party = self.party_map[row['party']]\n email = row['EMailAddress1']\n\n legislator = Legislator(term, chamber, district, full_name,\n first_name=first_name, last_name=last_name,\n middle_name=middle_name, party=party,\n email=email)\n\n # Capture legislator office contact information.\n district_address = '{}\\n{}\\n{}, {} {}'.format(row['street'],\n row['address2'], row['city'], row['state'], row['zipcode']).strip()\n\n legislator.add_office('district', 'Home Address',\n address=district_address)\n\n # Retrieve legislator portrait.\n #profile_url = None\n #if chamber == 'upper':\n # profile_url = 'http://www.gencourt.state.nh.us/Senate/members/webpages/district{:02d}.aspx'.format(row['District'])\n #elif chamber == 'lower':\n # profile_url = 'http://www.gencourt.state.nh.us/house/members/member.aspx?member={}'.format(row['employee_no'])\n\n #if profile_url:\n # legislator['photo_url'] = self._get_photo(profile_url, chamber)\n # legislator.add_source(profile_url)\n\n return legislator\n\n def _parse_members_txt(self):\n lines = self.get(self.members_url).text.splitlines()\n\n header = lines[0].split('\\t')\n\n for line in lines[1:]:\n yield dict(zip(header, line.split('\\t')))\n\n def scrape(self, chamber, term):\n for row in self._parse_members_txt():\n if self.chamber_map[row['LegislativeBody']] == chamber:\n leg = self._parse_legislator(row, chamber, term)\n leg.add_source(self.members_url)\n self.save_legislator(leg)\n", "path": "openstates/nh/legislators.py"}]}
1,515
379
gh_patches_debug_2764
rasdani/github-patches
git_diff
ivy-llc__ivy-18208
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> expand </issue> <code> [start of ivy/functional/frontends/paddle/tensor/manipulation.py] 1 # global 2 import ivy 3 from ivy.functional.frontends.paddle.func_wrapper import ( 4 to_ivy_arrays_and_back, 5 ) 6 from ivy.func_wrapper import ( 7 with_unsupported_dtypes, 8 with_supported_dtypes, 9 ) 10 11 12 @to_ivy_arrays_and_back 13 def reshape(x, shape): 14 return ivy.reshape(x, shape) 15 16 17 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 18 @to_ivy_arrays_and_back 19 def abs(x, name=None): 20 return ivy.abs(x) 21 22 23 absolute = abs 24 25 26 @to_ivy_arrays_and_back 27 def stack(x, axis=0, name=None): 28 return ivy.stack(x, axis=axis) 29 30 31 @with_unsupported_dtypes({"2.5.0 and below": ("int8", "int16")}, "paddle") 32 @to_ivy_arrays_and_back 33 def concat(x, axis, name=None): 34 return ivy.concat(x, axis=axis) 35 36 37 @with_unsupported_dtypes( 38 {"2.5.0 and below": ("int8", "uint8", "int16", "float16")}, 39 "paddle", 40 ) 41 @to_ivy_arrays_and_back 42 def tile(x, repeat_times, name=None): 43 return ivy.tile(x, repeats=repeat_times) 44 45 46 @with_unsupported_dtypes( 47 {"2.5.0 and below": ("int16", "complex64", "complex128")}, 48 "paddle", 49 ) 50 @to_ivy_arrays_and_back 51 def split(x, num_or_sections, axis=0, name=None): 52 return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis) 53 54 55 @with_unsupported_dtypes( 56 {"2.5.0 and below": ("float16", "bfloat16", "int8", "int16")}, 57 "paddle", 58 ) 59 @to_ivy_arrays_and_back 60 def squeeze(x, axis=None, name=None): 61 return ivy.squeeze(x, axis=axis) 62 63 64 @with_supported_dtypes( 65 { 66 "2.5.0 and below": ( 67 "bool", 68 "float16", 69 "float32", 70 "float64", 71 "int32", 72 "int64", 73 "uint8", 74 ) 75 }, 76 "paddle", 77 ) 78 @to_ivy_arrays_and_back 79 def cast(x, dtype): 80 return ivy.astype(x, dtype) 81 [end of ivy/functional/frontends/paddle/tensor/manipulation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py --- a/ivy/functional/frontends/paddle/tensor/manipulation.py +++ b/ivy/functional/frontends/paddle/tensor/manipulation.py @@ -61,6 +61,15 @@ return ivy.squeeze(x, axis=axis) +@with_supported_dtypes( + {"2.5.0 and below": ("bool", "float32", "float64", "int32", "int64")}, + "paddle", +) +@to_ivy_arrays_and_back +def expand(x, shape, name=None): + return ivy.expand(x, shape) + + @with_supported_dtypes( { "2.5.0 and below": (
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py\n--- a/ivy/functional/frontends/paddle/tensor/manipulation.py\n+++ b/ivy/functional/frontends/paddle/tensor/manipulation.py\n@@ -61,6 +61,15 @@\n return ivy.squeeze(x, axis=axis)\n \n \n+@with_supported_dtypes(\n+ {\"2.5.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def expand(x, shape, name=None):\n+ return ivy.expand(x, shape)\n+\n+\n @with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n", "issue": "expand\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape):\n return ivy.reshape(x, shape)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\nabsolute = abs\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n", "path": "ivy/functional/frontends/paddle/tensor/manipulation.py"}]}
1,258
193
gh_patches_debug_27372
rasdani/github-patches
git_diff
open-mmlab__mmdetection-4250
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Different mean values of image normalization for HRNet I find the different configs of HRNet experiments use different mean values for image normalization. For example, [fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py) uses the `[102.9801, 115.9465, 122.7717]` as the mean value, while [fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py) uses the `[103.530, 116.280, 123.675]` as the mean value. Which one is correct? </issue> <code> [start of configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py] 1 _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' 2 img_norm_cfg = dict( 3 mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) 4 train_pipeline = [ 5 dict(type='LoadImageFromFile'), 6 dict(type='LoadAnnotations', with_bbox=True), 7 dict( 8 type='Resize', 9 img_scale=[(1333, 640), (1333, 800)], 10 multiscale_mode='value', 11 keep_ratio=True), 12 dict(type='RandomFlip', flip_ratio=0.5), 13 dict(type='Normalize', **img_norm_cfg), 14 dict(type='Pad', size_divisor=32), 15 dict(type='DefaultFormatBundle'), 16 dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), 17 ] 18 test_pipeline = [ 19 dict(type='LoadImageFromFile'), 20 dict( 21 type='MultiScaleFlipAug', 22 img_scale=(1333, 800), 23 flip=False, 24 transforms=[ 25 dict(type='Resize', keep_ratio=True), 26 dict(type='RandomFlip'), 27 dict(type='Normalize', **img_norm_cfg), 28 dict(type='Pad', size_divisor=32), 29 dict(type='ImageToTensor', keys=['img']), 30 dict(type='Collect', keys=['img']), 31 ]) 32 ] 33 data = dict( 34 train=dict(pipeline=train_pipeline), 35 val=dict(pipeline=test_pipeline), 36 test=dict(pipeline=test_pipeline)) 37 # learning policy 38 lr_config = dict(step=[16, 22]) 39 total_epochs = 24 40 [end of configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py] [start of configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py] 1 _base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' 2 model = dict( 3 pretrained='open-mmlab://msra/hrnetv2_w32', 4 backbone=dict( 5 _delete_=True, 6 type='HRNet', 7 extra=dict( 8 stage1=dict( 9 num_modules=1, 10 num_branches=1, 11 block='BOTTLENECK', 12 num_blocks=(4, ), 13 num_channels=(64, )), 14 stage2=dict( 15 num_modules=1, 16 num_branches=2, 17 block='BASIC', 18 num_blocks=(4, 4), 19 num_channels=(32, 64)), 20 stage3=dict( 21 num_modules=4, 22 num_branches=3, 23 block='BASIC', 24 num_blocks=(4, 4, 4), 25 num_channels=(32, 64, 128)), 26 stage4=dict( 27 num_modules=3, 28 num_branches=4, 29 block='BASIC', 30 num_blocks=(4, 4, 4, 4), 31 num_channels=(32, 64, 128, 256)))), 32 neck=dict( 33 _delete_=True, 34 type='HRFPN', 35 in_channels=[32, 64, 128, 256], 36 out_channels=256, 37 stride=2, 38 num_outs=5)) 39 [end of configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py --- a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py +++ b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py @@ -36,3 +36,34 @@ out_channels=256, stride=2, num_outs=5)) +img_norm_cfg = dict( + mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py --- a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py +++ b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py @@ -1,6 +1,6 @@ _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) + mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True),
{"golden_diff": "diff --git a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py\n--- a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py\n+++ b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py\n@@ -36,3 +36,34 @@\n out_channels=256,\n stride=2,\n num_outs=5))\n+img_norm_cfg = dict(\n+ mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)\n+train_pipeline = [\n+ dict(type='LoadImageFromFile'),\n+ dict(type='LoadAnnotations', with_bbox=True),\n+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n+ dict(type='RandomFlip', flip_ratio=0.5),\n+ dict(type='Normalize', **img_norm_cfg),\n+ dict(type='Pad', size_divisor=32),\n+ dict(type='DefaultFormatBundle'),\n+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n+]\n+test_pipeline = [\n+ dict(type='LoadImageFromFile'),\n+ dict(\n+ type='MultiScaleFlipAug',\n+ img_scale=(1333, 800),\n+ flip=False,\n+ transforms=[\n+ dict(type='Resize', keep_ratio=True),\n+ dict(type='RandomFlip'),\n+ dict(type='Normalize', **img_norm_cfg),\n+ dict(type='Pad', size_divisor=32),\n+ dict(type='ImageToTensor', keys=['img']),\n+ dict(type='Collect', keys=['img']),\n+ ])\n+]\n+data = dict(\n+ train=dict(pipeline=train_pipeline),\n+ val=dict(pipeline=test_pipeline),\n+ test=dict(pipeline=test_pipeline))\ndiff --git a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py\n--- a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py\n+++ b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py\n@@ -1,6 +1,6 @@\n _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'\n img_norm_cfg = dict(\n- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\n+ mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)\n train_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n", "issue": "Different mean values of image normalization for HRNet\nI find the different configs of HRNet experiments use different mean values for image normalization.\r\n\r\nFor example,\r\n[fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py) uses the `[102.9801, 115.9465, 122.7717]` as the mean value,\r\nwhile [fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py) uses the `[103.530, 116.280, 123.675]` as the mean value.\r\n\r\nWhich one is correct?\n", "before_files": [{"content": "_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'\nimg_norm_cfg = dict(\n mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='Resize',\n img_scale=[(1333, 640), (1333, 800)],\n multiscale_mode='value',\n keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\ndata = dict(\n train=dict(pipeline=train_pipeline),\n val=dict(pipeline=test_pipeline),\n test=dict(pipeline=test_pipeline))\n# learning policy\nlr_config = dict(step=[16, 22])\ntotal_epochs = 24\n", "path": "configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py"}, {"content": "_base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py'\nmodel = dict(\n pretrained='open-mmlab://msra/hrnetv2_w32',\n backbone=dict(\n _delete_=True,\n type='HRNet',\n extra=dict(\n stage1=dict(\n num_modules=1,\n num_branches=1,\n block='BOTTLENECK',\n num_blocks=(4, ),\n num_channels=(64, )),\n stage2=dict(\n num_modules=1,\n num_branches=2,\n block='BASIC',\n num_blocks=(4, 4),\n num_channels=(32, 64)),\n stage3=dict(\n num_modules=4,\n num_branches=3,\n block='BASIC',\n num_blocks=(4, 4, 4),\n num_channels=(32, 64, 128)),\n stage4=dict(\n num_modules=3,\n num_branches=4,\n block='BASIC',\n num_blocks=(4, 4, 4, 4),\n num_channels=(32, 64, 128, 256)))),\n neck=dict(\n _delete_=True,\n type='HRFPN',\n in_channels=[32, 64, 128, 256],\n out_channels=256,\n stride=2,\n num_outs=5))\n", "path": "configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py"}]}
1,768
830
gh_patches_debug_45268
rasdani/github-patches
git_diff
dj-stripe__dj-stripe-1001
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Webhook missing: invoice.payment_action_required I can't find any reference to ```invoice.payment_action_required``` anywhere, however this is a fairly critical piece of off-session SCA workflow. Is it simply a case of adding it to the signals list? https://stripe.com/docs/api/events/types#event_types-invoice.payment_action_required </issue> <code> [start of djstripe/signals.py] 1 """ 2 signals are sent for each event Stripe sends to the app 3 4 Stripe docs for Webhooks: https://stripe.com/docs/webhooks 5 """ 6 from django.db.models.signals import pre_delete 7 from django.dispatch import Signal, receiver 8 9 from . import settings as djstripe_settings 10 11 webhook_processing_error = Signal(providing_args=["data", "exception"]) 12 13 # A signal for each Event type. See https://stripe.com/docs/api/events/types 14 15 WEBHOOK_SIGNALS = dict( 16 [ 17 (hook, Signal(providing_args=["event"])) 18 for hook in [ 19 "account.updated", 20 "account.application.authorized", 21 "account.application.deauthorized", 22 "account.external_account.created", 23 "account.external_account.deleted", 24 "account.external_account.updated", 25 "application_fee.created", 26 "application_fee.refunded", 27 "application_fee.refund.updated", 28 "balance.available", 29 "charge.captured", 30 "charge.expired", 31 "charge.failed", 32 "charge.pending", 33 "charge.refunded", 34 "charge.succeeded", 35 "charge.updated", 36 "charge.dispute.closed", 37 "charge.dispute.created", 38 "charge.dispute.funds_reinstated", 39 "charge.dispute.funds_withdrawn", 40 "charge.dispute.updated", 41 "charge.refund.updated", 42 "checkout.session.completed", 43 "coupon.created", 44 "coupon.deleted", 45 "coupon.updated", 46 "customer.created", 47 "customer.deleted", 48 "customer.updated", 49 "customer.discount.created", 50 "customer.discount.deleted", 51 "customer.discount.updated", 52 "customer.source.created", 53 "customer.source.deleted", 54 "customer.source.expiring", 55 "customer.source.updated", 56 "customer.subscription.created", 57 "customer.subscription.deleted", 58 "customer.subscription.trial_will_end", 59 "customer.subscription.updated", 60 "file.created", 61 "invoice.created", 62 "invoice.deleted", 63 "invoice.finalized", 64 "invoice.marked_uncollectible", 65 "invoice.payment_failed", 66 "invoice.payment_succeeded", 67 "invoice.sent", 68 "invoice.upcoming", 69 "invoice.updated", 70 "invoice.voided", 71 "invoiceitem.created", 72 "invoiceitem.deleted", 73 "invoiceitem.updated", 74 "issuing_authorization.created", 75 "issuing_authorization.request", 76 "issuing_authorization.updated", 77 "issuing_card.created", 78 "issuing_card.updated", 79 "issuing_cardholder.created", 80 "issuing_cardholder.updated", 81 "issuing_dispute.created", 82 "issuing_dispute.updated", 83 "issuing_settlement.created", 84 "issuing_settlement.updated", 85 "issuing_transaction.created", 86 "issuing_transaction.updated", 87 "order.created", 88 "order.payment_failed", 89 "order.payment_succeeded", 90 "order.updated", 91 "order_return.created", 92 "payment_intent.amount_capturable_updated", 93 "payment_intent.created", 94 "payment_intent.payment_failed", 95 "payment_intent.succeeded", 96 "payment_method.attached", 97 "payment_method.card_automatically_updated", 98 "payment_method.detached", 99 "payment_method.updated", 100 "payout.canceled", 101 "payout.created", 102 "payout.failed", 103 "payout.paid", 104 "payout.updated", 105 "plan.created", 106 "plan.deleted", 107 "plan.updated", 108 "product.created", 109 "product.deleted", 110 "product.updated", 111 "recipient.created", 112 "recipient.deleted", 113 "recipient.updated", 114 "reporting.report_run.failed", 115 "reporting.report_run.succeeded", 116 "reporting.report_type.updated", 117 "review.closed", 118 "review.opened", 119 "setup_intent.created", 120 "setup_intent.setup_failed", 121 "setup_intent.succeeded", 122 "sigma.scheduled_query_run.created", 123 "sku.created", 124 "sku.deleted", 125 "sku.updated", 126 "source.canceled", 127 "source.chargeable", 128 "source.failed", 129 "source.mandate_notification", 130 "source.refund_attributes_required", 131 "source.transaction.created", 132 "source.transaction.updated", 133 "topup.canceled", 134 "topup.created", 135 "topup.failed", 136 "topup.reversed", 137 "topup.succeeded", 138 "transfer.created", 139 "transfer.reversed", 140 "transfer.updated", 141 # deprecated (no longer in events_types list) - TODO can be deleted? 142 "checkout_beta.session_succeeded", 143 "issuer_fraud_record.created", 144 "payment_intent.requires_capture", 145 "subscription_schedule.canceled", 146 "subscription_schedule.completed", 147 "subscription_schedule.created", 148 "subscription_schedule.released", 149 "subscription_schedule.updated", 150 # special case? - TODO can be deleted? 151 "ping", 152 ] 153 ] 154 ) 155 156 157 @receiver(pre_delete, sender=djstripe_settings.get_subscriber_model_string()) 158 def on_delete_subscriber_purge_customer(instance=None, **kwargs): 159 """ Purge associated customers when the subscriber is deleted. """ 160 for customer in instance.djstripe_customers.all(): 161 customer.purge() 162 [end of djstripe/signals.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/djstripe/signals.py b/djstripe/signals.py --- a/djstripe/signals.py +++ b/djstripe/signals.py @@ -16,36 +16,41 @@ [ (hook, Signal(providing_args=["event"])) for hook in [ - "account.updated", + # Update this by copy-pasting the "enabled_events" enum values from + # https://raw.githubusercontent.com/stripe/openapi/master/openapi/spec3.json "account.application.authorized", "account.application.deauthorized", "account.external_account.created", "account.external_account.deleted", "account.external_account.updated", + "account.updated", "application_fee.created", - "application_fee.refunded", "application_fee.refund.updated", + "application_fee.refunded", "balance.available", + "capability.updated", "charge.captured", - "charge.expired", - "charge.failed", - "charge.pending", - "charge.refunded", - "charge.succeeded", - "charge.updated", "charge.dispute.closed", "charge.dispute.created", "charge.dispute.funds_reinstated", "charge.dispute.funds_withdrawn", "charge.dispute.updated", + "charge.expired", + "charge.failed", + "charge.pending", "charge.refund.updated", + "charge.refunded", + "charge.succeeded", + "charge.updated", "checkout.session.completed", "coupon.created", "coupon.deleted", "coupon.updated", + "credit_note.created", + "credit_note.updated", + "credit_note.voided", "customer.created", "customer.deleted", - "customer.updated", "customer.discount.created", "customer.discount.deleted", "customer.discount.updated", @@ -57,11 +62,16 @@ "customer.subscription.deleted", "customer.subscription.trial_will_end", "customer.subscription.updated", + "customer.tax_id.created", + "customer.tax_id.deleted", + "customer.tax_id.updated", + "customer.updated", "file.created", "invoice.created", "invoice.deleted", "invoice.finalized", "invoice.marked_uncollectible", + "invoice.payment_action_required", "invoice.payment_failed", "invoice.payment_succeeded", "invoice.sent", @@ -102,12 +112,17 @@ "payout.failed", "payout.paid", "payout.updated", + "person.created", + "person.deleted", + "person.updated", "plan.created", "plan.deleted", "plan.updated", "product.created", "product.deleted", "product.updated", + "radar.early_fraud_warning.created", + "radar.early_fraud_warning.updated", "recipient.created", "recipient.deleted", "recipient.updated", @@ -130,12 +145,23 @@ "source.refund_attributes_required", "source.transaction.created", "source.transaction.updated", + "subscription_schedule.aborted", + "subscription_schedule.canceled", + "subscription_schedule.completed", + "subscription_schedule.created", + "subscription_schedule.expiring", + "subscription_schedule.released", + "subscription_schedule.updated", + "tax_rate.created", + "tax_rate.updated", "topup.canceled", "topup.created", "topup.failed", "topup.reversed", "topup.succeeded", "transfer.created", + "transfer.failed", + "transfer.paid", "transfer.reversed", "transfer.updated", # deprecated (no longer in events_types list) - TODO can be deleted?
{"golden_diff": "diff --git a/djstripe/signals.py b/djstripe/signals.py\n--- a/djstripe/signals.py\n+++ b/djstripe/signals.py\n@@ -16,36 +16,41 @@\n [\n (hook, Signal(providing_args=[\"event\"]))\n for hook in [\n- \"account.updated\",\n+ # Update this by copy-pasting the \"enabled_events\" enum values from\n+ # https://raw.githubusercontent.com/stripe/openapi/master/openapi/spec3.json\n \"account.application.authorized\",\n \"account.application.deauthorized\",\n \"account.external_account.created\",\n \"account.external_account.deleted\",\n \"account.external_account.updated\",\n+ \"account.updated\",\n \"application_fee.created\",\n- \"application_fee.refunded\",\n \"application_fee.refund.updated\",\n+ \"application_fee.refunded\",\n \"balance.available\",\n+ \"capability.updated\",\n \"charge.captured\",\n- \"charge.expired\",\n- \"charge.failed\",\n- \"charge.pending\",\n- \"charge.refunded\",\n- \"charge.succeeded\",\n- \"charge.updated\",\n \"charge.dispute.closed\",\n \"charge.dispute.created\",\n \"charge.dispute.funds_reinstated\",\n \"charge.dispute.funds_withdrawn\",\n \"charge.dispute.updated\",\n+ \"charge.expired\",\n+ \"charge.failed\",\n+ \"charge.pending\",\n \"charge.refund.updated\",\n+ \"charge.refunded\",\n+ \"charge.succeeded\",\n+ \"charge.updated\",\n \"checkout.session.completed\",\n \"coupon.created\",\n \"coupon.deleted\",\n \"coupon.updated\",\n+ \"credit_note.created\",\n+ \"credit_note.updated\",\n+ \"credit_note.voided\",\n \"customer.created\",\n \"customer.deleted\",\n- \"customer.updated\",\n \"customer.discount.created\",\n \"customer.discount.deleted\",\n \"customer.discount.updated\",\n@@ -57,11 +62,16 @@\n \"customer.subscription.deleted\",\n \"customer.subscription.trial_will_end\",\n \"customer.subscription.updated\",\n+ \"customer.tax_id.created\",\n+ \"customer.tax_id.deleted\",\n+ \"customer.tax_id.updated\",\n+ \"customer.updated\",\n \"file.created\",\n \"invoice.created\",\n \"invoice.deleted\",\n \"invoice.finalized\",\n \"invoice.marked_uncollectible\",\n+ \"invoice.payment_action_required\",\n \"invoice.payment_failed\",\n \"invoice.payment_succeeded\",\n \"invoice.sent\",\n@@ -102,12 +112,17 @@\n \"payout.failed\",\n \"payout.paid\",\n \"payout.updated\",\n+ \"person.created\",\n+ \"person.deleted\",\n+ \"person.updated\",\n \"plan.created\",\n \"plan.deleted\",\n \"plan.updated\",\n \"product.created\",\n \"product.deleted\",\n \"product.updated\",\n+ \"radar.early_fraud_warning.created\",\n+ \"radar.early_fraud_warning.updated\",\n \"recipient.created\",\n \"recipient.deleted\",\n \"recipient.updated\",\n@@ -130,12 +145,23 @@\n \"source.refund_attributes_required\",\n \"source.transaction.created\",\n \"source.transaction.updated\",\n+ \"subscription_schedule.aborted\",\n+ \"subscription_schedule.canceled\",\n+ \"subscription_schedule.completed\",\n+ \"subscription_schedule.created\",\n+ \"subscription_schedule.expiring\",\n+ \"subscription_schedule.released\",\n+ \"subscription_schedule.updated\",\n+ \"tax_rate.created\",\n+ \"tax_rate.updated\",\n \"topup.canceled\",\n \"topup.created\",\n \"topup.failed\",\n \"topup.reversed\",\n \"topup.succeeded\",\n \"transfer.created\",\n+ \"transfer.failed\",\n+ \"transfer.paid\",\n \"transfer.reversed\",\n \"transfer.updated\",\n # deprecated (no longer in events_types list) - TODO can be deleted?\n", "issue": "Webhook missing: invoice.payment_action_required\nI can't find any reference to ```invoice.payment_action_required``` anywhere, however this is a fairly critical piece of off-session SCA workflow. Is it simply a case of adding it to the signals list?\r\n\r\nhttps://stripe.com/docs/api/events/types#event_types-invoice.payment_action_required\n", "before_files": [{"content": "\"\"\"\nsignals are sent for each event Stripe sends to the app\n\nStripe docs for Webhooks: https://stripe.com/docs/webhooks\n\"\"\"\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch import Signal, receiver\n\nfrom . import settings as djstripe_settings\n\nwebhook_processing_error = Signal(providing_args=[\"data\", \"exception\"])\n\n# A signal for each Event type. See https://stripe.com/docs/api/events/types\n\nWEBHOOK_SIGNALS = dict(\n [\n (hook, Signal(providing_args=[\"event\"]))\n for hook in [\n \"account.updated\",\n \"account.application.authorized\",\n \"account.application.deauthorized\",\n \"account.external_account.created\",\n \"account.external_account.deleted\",\n \"account.external_account.updated\",\n \"application_fee.created\",\n \"application_fee.refunded\",\n \"application_fee.refund.updated\",\n \"balance.available\",\n \"charge.captured\",\n \"charge.expired\",\n \"charge.failed\",\n \"charge.pending\",\n \"charge.refunded\",\n \"charge.succeeded\",\n \"charge.updated\",\n \"charge.dispute.closed\",\n \"charge.dispute.created\",\n \"charge.dispute.funds_reinstated\",\n \"charge.dispute.funds_withdrawn\",\n \"charge.dispute.updated\",\n \"charge.refund.updated\",\n \"checkout.session.completed\",\n \"coupon.created\",\n \"coupon.deleted\",\n \"coupon.updated\",\n \"customer.created\",\n \"customer.deleted\",\n \"customer.updated\",\n \"customer.discount.created\",\n \"customer.discount.deleted\",\n \"customer.discount.updated\",\n \"customer.source.created\",\n \"customer.source.deleted\",\n \"customer.source.expiring\",\n \"customer.source.updated\",\n \"customer.subscription.created\",\n \"customer.subscription.deleted\",\n \"customer.subscription.trial_will_end\",\n \"customer.subscription.updated\",\n \"file.created\",\n \"invoice.created\",\n \"invoice.deleted\",\n \"invoice.finalized\",\n \"invoice.marked_uncollectible\",\n \"invoice.payment_failed\",\n \"invoice.payment_succeeded\",\n \"invoice.sent\",\n \"invoice.upcoming\",\n \"invoice.updated\",\n \"invoice.voided\",\n \"invoiceitem.created\",\n \"invoiceitem.deleted\",\n \"invoiceitem.updated\",\n \"issuing_authorization.created\",\n \"issuing_authorization.request\",\n \"issuing_authorization.updated\",\n \"issuing_card.created\",\n \"issuing_card.updated\",\n \"issuing_cardholder.created\",\n \"issuing_cardholder.updated\",\n \"issuing_dispute.created\",\n \"issuing_dispute.updated\",\n \"issuing_settlement.created\",\n \"issuing_settlement.updated\",\n \"issuing_transaction.created\",\n \"issuing_transaction.updated\",\n \"order.created\",\n \"order.payment_failed\",\n \"order.payment_succeeded\",\n \"order.updated\",\n \"order_return.created\",\n \"payment_intent.amount_capturable_updated\",\n \"payment_intent.created\",\n \"payment_intent.payment_failed\",\n \"payment_intent.succeeded\",\n \"payment_method.attached\",\n \"payment_method.card_automatically_updated\",\n \"payment_method.detached\",\n \"payment_method.updated\",\n \"payout.canceled\",\n \"payout.created\",\n \"payout.failed\",\n \"payout.paid\",\n \"payout.updated\",\n \"plan.created\",\n \"plan.deleted\",\n \"plan.updated\",\n \"product.created\",\n \"product.deleted\",\n \"product.updated\",\n \"recipient.created\",\n \"recipient.deleted\",\n \"recipient.updated\",\n \"reporting.report_run.failed\",\n \"reporting.report_run.succeeded\",\n \"reporting.report_type.updated\",\n \"review.closed\",\n \"review.opened\",\n \"setup_intent.created\",\n \"setup_intent.setup_failed\",\n \"setup_intent.succeeded\",\n \"sigma.scheduled_query_run.created\",\n \"sku.created\",\n \"sku.deleted\",\n \"sku.updated\",\n \"source.canceled\",\n \"source.chargeable\",\n \"source.failed\",\n \"source.mandate_notification\",\n \"source.refund_attributes_required\",\n \"source.transaction.created\",\n \"source.transaction.updated\",\n \"topup.canceled\",\n \"topup.created\",\n \"topup.failed\",\n \"topup.reversed\",\n \"topup.succeeded\",\n \"transfer.created\",\n \"transfer.reversed\",\n \"transfer.updated\",\n # deprecated (no longer in events_types list) - TODO can be deleted?\n \"checkout_beta.session_succeeded\",\n \"issuer_fraud_record.created\",\n \"payment_intent.requires_capture\",\n \"subscription_schedule.canceled\",\n \"subscription_schedule.completed\",\n \"subscription_schedule.created\",\n \"subscription_schedule.released\",\n \"subscription_schedule.updated\",\n # special case? - TODO can be deleted?\n \"ping\",\n ]\n ]\n)\n\n\n@receiver(pre_delete, sender=djstripe_settings.get_subscriber_model_string())\ndef on_delete_subscriber_purge_customer(instance=None, **kwargs):\n \"\"\" Purge associated customers when the subscriber is deleted. \"\"\"\n for customer in instance.djstripe_customers.all():\n customer.purge()\n", "path": "djstripe/signals.py"}]}
2,042
833
gh_patches_debug_4877
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-2042
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> updating progress doesn't work Warning : i see this bug as unlinked with instance, because i saw it on my instance and on bookwyrm.social. When i go to the book view, i have a block with two progress, but without any print of the pages i've read. ![bookwyrm1](https://user-images.githubusercontent.com/40766799/156523379-53341dc3-cb0a-4213-989b-e62ec0cd4b38.png) If i want to edit the progress to add pages number read, i have the choice between two dates and the number input. So i fill in the form. with only page number filled. ![bookwyrm2](https://user-images.githubusercontent.com/40766799/156523381-37a613ff-bcad-4bd3-81c2-b9667b77c4c9.png) Then i'm going back to the book view, but with 3 entries instead of two. page number hasn't been saved/printed and it added a new progress instead of editing it. ![bookwyrm3](https://user-images.githubusercontent.com/40766799/156523383-ed2789cb-d6b5-4d04-ae59-c5f65ff82625.png) One problem is you can't save a progress without a comment, but even with that fix, i think progress should indicate the pages read and the pages left to be read. a more "graphic' layout could be very cool ;) </issue> <code> [start of bookwyrm/forms/forms.py] 1 """ using django model forms """ 2 from django import forms 3 from django.forms import widgets 4 from django.utils.translation import gettext_lazy as _ 5 6 from bookwyrm import models 7 from bookwyrm.models.user import FeedFilterChoices 8 from .custom_form import CustomForm 9 10 11 # pylint: disable=missing-class-docstring 12 class FeedStatusTypesForm(CustomForm): 13 class Meta: 14 model = models.User 15 fields = ["feed_status_types"] 16 help_texts = {f: None for f in fields} 17 widgets = { 18 "feed_status_types": widgets.CheckboxSelectMultiple( 19 choices=FeedFilterChoices, 20 ), 21 } 22 23 24 class ImportForm(forms.Form): 25 csv_file = forms.FileField() 26 27 28 class ShelfForm(CustomForm): 29 class Meta: 30 model = models.Shelf 31 fields = ["user", "name", "privacy", "description"] 32 33 34 class GoalForm(CustomForm): 35 class Meta: 36 model = models.AnnualGoal 37 fields = ["user", "year", "goal", "privacy"] 38 39 40 class ReportForm(CustomForm): 41 class Meta: 42 model = models.Report 43 fields = ["user", "reporter", "status", "links", "note"] 44 45 46 class ReadThroughForm(CustomForm): 47 def clean(self): 48 """make sure the email isn't in use by a registered user""" 49 cleaned_data = super().clean() 50 start_date = cleaned_data.get("start_date") 51 finish_date = cleaned_data.get("finish_date") 52 if start_date and finish_date and start_date > finish_date: 53 self.add_error( 54 "finish_date", _("Reading finish date cannot be before start date.") 55 ) 56 57 class Meta: 58 model = models.ReadThrough 59 fields = ["user", "book", "start_date", "finish_date"] 60 [end of bookwyrm/forms/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bookwyrm/forms/forms.py b/bookwyrm/forms/forms.py --- a/bookwyrm/forms/forms.py +++ b/bookwyrm/forms/forms.py @@ -45,7 +45,7 @@ class ReadThroughForm(CustomForm): def clean(self): - """make sure the email isn't in use by a registered user""" + """don't let readthroughs end before they start""" cleaned_data = super().clean() start_date = cleaned_data.get("start_date") finish_date = cleaned_data.get("finish_date")
{"golden_diff": "diff --git a/bookwyrm/forms/forms.py b/bookwyrm/forms/forms.py\n--- a/bookwyrm/forms/forms.py\n+++ b/bookwyrm/forms/forms.py\n@@ -45,7 +45,7 @@\n \n class ReadThroughForm(CustomForm):\n def clean(self):\n- \"\"\"make sure the email isn't in use by a registered user\"\"\"\n+ \"\"\"don't let readthroughs end before they start\"\"\"\n cleaned_data = super().clean()\n start_date = cleaned_data.get(\"start_date\")\n finish_date = cleaned_data.get(\"finish_date\")\n", "issue": "updating progress doesn't work\nWarning : i see this bug as unlinked with instance, because i saw it on my instance and on bookwyrm.social. \r\n\r\nWhen i go to the book view, i have a block with two progress, but without any print of the pages i've read. \r\n![bookwyrm1](https://user-images.githubusercontent.com/40766799/156523379-53341dc3-cb0a-4213-989b-e62ec0cd4b38.png)\r\n\r\nIf i want to edit the progress to add pages number read, i have the choice between two dates and the number input. So i fill in the form. with only page number filled.\r\n![bookwyrm2](https://user-images.githubusercontent.com/40766799/156523381-37a613ff-bcad-4bd3-81c2-b9667b77c4c9.png)\r\n\r\nThen i'm going back to the book view, but with 3 entries instead of two. page number hasn't been saved/printed and it added a new progress instead of editing it.\r\n![bookwyrm3](https://user-images.githubusercontent.com/40766799/156523383-ed2789cb-d6b5-4d04-ae59-c5f65ff82625.png)\r\n\r\nOne problem is you can't save a progress without a comment, but even with that fix, i think progress should indicate the pages read and the pages left to be read. a more \"graphic' layout could be very cool ;)\n", "before_files": [{"content": "\"\"\" using django model forms \"\"\"\nfrom django import forms\nfrom django.forms import widgets\nfrom django.utils.translation import gettext_lazy as _\n\nfrom bookwyrm import models\nfrom bookwyrm.models.user import FeedFilterChoices\nfrom .custom_form import CustomForm\n\n\n# pylint: disable=missing-class-docstring\nclass FeedStatusTypesForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"feed_status_types\"]\n help_texts = {f: None for f in fields}\n widgets = {\n \"feed_status_types\": widgets.CheckboxSelectMultiple(\n choices=FeedFilterChoices,\n ),\n }\n\n\nclass ImportForm(forms.Form):\n csv_file = forms.FileField()\n\n\nclass ShelfForm(CustomForm):\n class Meta:\n model = models.Shelf\n fields = [\"user\", \"name\", \"privacy\", \"description\"]\n\n\nclass GoalForm(CustomForm):\n class Meta:\n model = models.AnnualGoal\n fields = [\"user\", \"year\", \"goal\", \"privacy\"]\n\n\nclass ReportForm(CustomForm):\n class Meta:\n model = models.Report\n fields = [\"user\", \"reporter\", \"status\", \"links\", \"note\"]\n\n\nclass ReadThroughForm(CustomForm):\n def clean(self):\n \"\"\"make sure the email isn't in use by a registered user\"\"\"\n cleaned_data = super().clean()\n start_date = cleaned_data.get(\"start_date\")\n finish_date = cleaned_data.get(\"finish_date\")\n if start_date and finish_date and start_date > finish_date:\n self.add_error(\n \"finish_date\", _(\"Reading finish date cannot be before start date.\")\n )\n\n class Meta:\n model = models.ReadThrough\n fields = [\"user\", \"book\", \"start_date\", \"finish_date\"]\n", "path": "bookwyrm/forms/forms.py"}]}
1,407
123