problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_16340
rasdani/github-patches
git_diff
qutip__qutip-2335
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Typos in qutip.expect()'s docstring ### Bug Description Two words in the docstring contain typos. See below. ### Code to Reproduce the Bug ```shell from qutip import expect print(expect.__doc__) ``` ### Code Output ```shell Parameters ---------- oper : qobj/array-like A single or a `list` or operators for expectation value. state : qobj/array-like A single or a `list` of quantum states or density matrices. Returns ------- expt : float/complex/array-like Expectation value. ``real`` if `oper` is Hermitian, ``complex`` otherwise. A (nested) array of expectaction values of state or operator are arrays. ``` ### Expected Behaviour Parameters ---------- oper : qobj/array-like A single or a `list` **~~or~~** **_of_** operators for expectation value. state : qobj/array-like A single or a `list` of quantum states or density matrices. Returns ------- expt : float/complex/array-like Expectation value. ``real`` if `oper` is Hermitian, ``complex`` otherwise. A (nested) array of expectaction values **~~of~~** **_if_** state or operator are arrays. ### Your Environment ```shell QuTiP Version: 4.7.5 Numpy Version: 1.26.4 Scipy Version: 1.11.4 Cython Version: None Matplotlib Version: 3.8.3 Python Version: 3.12.2 Number of CPUs: 8 BLAS Info: Generic OPENMP Installed: False INTEL MKL Ext: False Platform Info: Windows (AMD64) ``` ### Additional Context _No response_ </issue> <code> [start of qutip/core/expect.py] 1 __all__ = ['expect', 'variance'] 2 3 import numpy as np 4 5 from .qobj import Qobj 6 from . import data as _data 7 8 9 def expect(oper, state): 10 """ 11 Calculate the expectation value for operator(s) and state(s). The 12 expectation of state ``k`` on operator ``A`` is defined as 13 ``k.dag() @ A @ k``, and for density matrix ``R`` on operator ``A`` it is 14 ``trace(A @ R)``. 15 16 Parameters 17 ---------- 18 oper : qobj/array-like 19 A single or a `list` or operators for expectation value. 20 21 state : qobj/array-like 22 A single or a `list` of quantum states or density matrices. 23 24 Returns 25 ------- 26 expt : float/complex/array-like 27 Expectation value. ``real`` if ``oper`` is Hermitian, ``complex`` 28 otherwise. A (nested) array of expectaction values of state or operator 29 are arrays. 30 31 Examples 32 -------- 33 >>> expect(num(4), basis(4, 3)) == 3 # doctest: +NORMALIZE_WHITESPACE 34 True 35 36 """ 37 if isinstance(state, Qobj) and isinstance(oper, Qobj): 38 return _single_qobj_expect(oper, state) 39 40 elif isinstance(oper, (list, np.ndarray)): 41 if isinstance(state, Qobj): 42 dtype = np.complex128 43 if all(op.isherm for op in oper) and (state.isket or state.isherm): 44 dtype = np.float64 45 return np.array([_single_qobj_expect(op, state) for op in oper], 46 dtype=dtype) 47 return [expect(op, state) for op in oper] 48 49 elif isinstance(state, (list, np.ndarray)): 50 dtype = np.complex128 51 if oper.isherm and all(op.isherm or op.isket for op in state): 52 dtype = np.float64 53 return np.array([_single_qobj_expect(oper, x) for x in state], 54 dtype=dtype) 55 raise TypeError('Arguments must be quantum objects') 56 57 58 def _single_qobj_expect(oper, state): 59 """ 60 Private function used by expect to calculate expectation values of Qobjs. 61 """ 62 if not oper.isoper or not (state.isket or state.isoper): 63 raise TypeError('invalid operand types') 64 if oper.dims[1] != state.dims[0]: 65 msg = ( 66 "incompatible dimensions " 67 + str(oper.dims[1]) + " and " + str(state.dims[0]) 68 ) 69 raise ValueError(msg) 70 out = _data.expect(oper.data, state.data) 71 72 # This ensures that expect can return something that is not a number such 73 # as a `tensorflow.Tensor` in qutip-tensorflow. 74 return out.real if (oper.isherm 75 and (state.isket or state.isherm) 76 and hasattr(out, "real") 77 ) else out 78 79 80 def variance(oper, state): 81 """ 82 Variance of an operator for the given state vector or density matrix. 83 84 Parameters 85 ---------- 86 oper : qobj 87 Operator for expectation value. 88 89 state : qobj/list 90 A single or ``list`` of quantum states or density matrices.. 91 92 Returns 93 ------- 94 var : float 95 Variance of operator 'oper' for given state. 96 97 """ 98 return expect(oper**2, state) - expect(oper, state)**2 99 [end of qutip/core/expect.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/qutip/core/expect.py b/qutip/core/expect.py --- a/qutip/core/expect.py +++ b/qutip/core/expect.py @@ -16,7 +16,7 @@ Parameters ---------- oper : qobj/array-like - A single or a `list` or operators for expectation value. + A single or a `list` of operators for expectation value. state : qobj/array-like A single or a `list` of quantum states or density matrices. @@ -25,8 +25,8 @@ ------- expt : float/complex/array-like Expectation value. ``real`` if ``oper`` is Hermitian, ``complex`` - otherwise. A (nested) array of expectaction values of state or operator - are arrays. + otherwise. A (nested) array of expectaction values if ``state`` or + ``oper`` are arrays. Examples --------
{"golden_diff": "diff --git a/qutip/core/expect.py b/qutip/core/expect.py\n--- a/qutip/core/expect.py\n+++ b/qutip/core/expect.py\n@@ -16,7 +16,7 @@\n Parameters\n ----------\n oper : qobj/array-like\n- A single or a `list` or operators for expectation value.\n+ A single or a `list` of operators for expectation value.\n \n state : qobj/array-like\n A single or a `list` of quantum states or density matrices.\n@@ -25,8 +25,8 @@\n -------\n expt : float/complex/array-like\n Expectation value. ``real`` if ``oper`` is Hermitian, ``complex``\n- otherwise. A (nested) array of expectaction values of state or operator\n- are arrays.\n+ otherwise. A (nested) array of expectaction values if ``state`` or\n+ ``oper`` are arrays.\n \n Examples\n --------\n", "issue": "Typos in qutip.expect()'s docstring\n### Bug Description\r\n\r\nTwo words in the docstring contain typos. See below.\r\n\r\n### Code to Reproduce the Bug\r\n\r\n```shell\r\nfrom qutip import expect\r\nprint(expect.__doc__)\r\n```\r\n\r\n\r\n### Code Output\r\n\r\n```shell\r\nParameters\r\n----------\r\noper : qobj/array-like\r\n A single or a `list` or operators for expectation value.\r\n\r\nstate : qobj/array-like\r\n A single or a `list` of quantum states or density matrices.\r\n\r\nReturns\r\n-------\r\nexpt : float/complex/array-like\r\n Expectation value. ``real`` if `oper` is Hermitian, ``complex``\r\n otherwise. A (nested) array of expectaction values of state or operator\r\n are arrays.\r\n```\r\n\r\n\r\n### Expected Behaviour\r\n\r\nParameters\r\n----------\r\noper : qobj/array-like\r\n A single or a `list` **~~or~~** **_of_** operators for expectation value.\r\n\r\nstate : qobj/array-like\r\n A single or a `list` of quantum states or density matrices.\r\n\r\nReturns\r\n-------\r\nexpt : float/complex/array-like\r\n Expectation value. ``real`` if `oper` is Hermitian, ``complex``\r\n otherwise. A (nested) array of expectaction values **~~of~~** **_if_** state or operator\r\n are arrays.\r\n\r\n### Your Environment\r\n\r\n```shell\r\nQuTiP Version: 4.7.5\r\nNumpy Version: 1.26.4\r\nScipy Version: 1.11.4\r\nCython Version: None\r\nMatplotlib Version: 3.8.3\r\nPython Version: 3.12.2\r\nNumber of CPUs: 8\r\nBLAS Info: Generic\r\nOPENMP Installed: False\r\nINTEL MKL Ext: False\r\nPlatform Info: Windows (AMD64)\r\n```\r\n\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "__all__ = ['expect', 'variance']\n\nimport numpy as np\n\nfrom .qobj import Qobj\nfrom . import data as _data\n\n\ndef expect(oper, state):\n \"\"\"\n Calculate the expectation value for operator(s) and state(s). The\n expectation of state ``k`` on operator ``A`` is defined as\n ``k.dag() @ A @ k``, and for density matrix ``R`` on operator ``A`` it is\n ``trace(A @ R)``.\n\n Parameters\n ----------\n oper : qobj/array-like\n A single or a `list` or operators for expectation value.\n\n state : qobj/array-like\n A single or a `list` of quantum states or density matrices.\n\n Returns\n -------\n expt : float/complex/array-like\n Expectation value. ``real`` if ``oper`` is Hermitian, ``complex``\n otherwise. A (nested) array of expectaction values of state or operator\n are arrays.\n\n Examples\n --------\n >>> expect(num(4), basis(4, 3)) == 3 # doctest: +NORMALIZE_WHITESPACE\n True\n\n \"\"\"\n if isinstance(state, Qobj) and isinstance(oper, Qobj):\n return _single_qobj_expect(oper, state)\n\n elif isinstance(oper, (list, np.ndarray)):\n if isinstance(state, Qobj):\n dtype = np.complex128\n if all(op.isherm for op in oper) and (state.isket or state.isherm):\n dtype = np.float64\n return np.array([_single_qobj_expect(op, state) for op in oper],\n dtype=dtype)\n return [expect(op, state) for op in oper]\n\n elif isinstance(state, (list, np.ndarray)):\n dtype = np.complex128\n if oper.isherm and all(op.isherm or op.isket for op in state):\n dtype = np.float64\n return np.array([_single_qobj_expect(oper, x) for x in state],\n dtype=dtype)\n raise TypeError('Arguments must be quantum objects')\n\n\ndef _single_qobj_expect(oper, state):\n \"\"\"\n Private function used by expect to calculate expectation values of Qobjs.\n \"\"\"\n if not oper.isoper or not (state.isket or state.isoper):\n raise TypeError('invalid operand types')\n if oper.dims[1] != state.dims[0]:\n msg = (\n \"incompatible dimensions \"\n + str(oper.dims[1]) + \" and \" + str(state.dims[0])\n )\n raise ValueError(msg)\n out = _data.expect(oper.data, state.data)\n\n # This ensures that expect can return something that is not a number such\n # as a `tensorflow.Tensor` in qutip-tensorflow.\n return out.real if (oper.isherm\n and (state.isket or state.isherm)\n and hasattr(out, \"real\")\n ) else out\n\n\ndef variance(oper, state):\n \"\"\"\n Variance of an operator for the given state vector or density matrix.\n\n Parameters\n ----------\n oper : qobj\n Operator for expectation value.\n\n state : qobj/list\n A single or ``list`` of quantum states or density matrices..\n\n Returns\n -------\n var : float\n Variance of operator 'oper' for given state.\n\n \"\"\"\n return expect(oper**2, state) - expect(oper, state)**2\n", "path": "qutip/core/expect.py"}]}
1,923
221
gh_patches_debug_505
rasdani/github-patches
git_diff
googleapis__google-cloud-python-3693
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Increment threadsafety on BigQuery DB-API interface when httplib2 is removed From https://github.com/GoogleCloudPlatform/google-cloud-python/pull/2921/files/5b29a9e42a40f7c74ced0b7a7e9db0add52a20a1#r123596828 Blocked by https://github.com/GoogleCloudPlatform/google-cloud-python/issues/1998 </issue> <code> [start of bigquery/google/cloud/bigquery/dbapi/__init__.py] 1 # Copyright 2017 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Google BigQuery implementation of the Database API Specification v2.0. 16 17 This module implements the `Python Database API Specification v2.0 (DB-API)`_ 18 for Google BigQuery. 19 20 .. _Python Database API Specification v2.0 (DB-API): 21 https://www.python.org/dev/peps/pep-0249/ 22 23 .. warning:: 24 The ``dbapi`` module is **alpha**. The implementation is not complete. It 25 might be changed in backward-incompatible ways and is not subject to any SLA 26 or deprecation policy. 27 """ 28 29 from google.cloud.bigquery.dbapi.connection import connect 30 from google.cloud.bigquery.dbapi.connection import Connection 31 from google.cloud.bigquery.dbapi.cursor import Cursor 32 from google.cloud.bigquery.dbapi.exceptions import Warning 33 from google.cloud.bigquery.dbapi.exceptions import Error 34 from google.cloud.bigquery.dbapi.exceptions import InterfaceError 35 from google.cloud.bigquery.dbapi.exceptions import DatabaseError 36 from google.cloud.bigquery.dbapi.exceptions import DataError 37 from google.cloud.bigquery.dbapi.exceptions import OperationalError 38 from google.cloud.bigquery.dbapi.exceptions import IntegrityError 39 from google.cloud.bigquery.dbapi.exceptions import InternalError 40 from google.cloud.bigquery.dbapi.exceptions import ProgrammingError 41 from google.cloud.bigquery.dbapi.exceptions import NotSupportedError 42 from google.cloud.bigquery.dbapi.types import Binary 43 from google.cloud.bigquery.dbapi.types import Date 44 from google.cloud.bigquery.dbapi.types import DateFromTicks 45 from google.cloud.bigquery.dbapi.types import Time 46 from google.cloud.bigquery.dbapi.types import TimeFromTicks 47 from google.cloud.bigquery.dbapi.types import Timestamp 48 from google.cloud.bigquery.dbapi.types import TimestampFromTicks 49 from google.cloud.bigquery.dbapi.types import BINARY 50 from google.cloud.bigquery.dbapi.types import DATETIME 51 from google.cloud.bigquery.dbapi.types import NUMBER 52 from google.cloud.bigquery.dbapi.types import ROWID 53 from google.cloud.bigquery.dbapi.types import STRING 54 55 56 apilevel = '2.0' 57 58 # Threads may share the module, but not connections. 59 threadsafety = 1 60 61 paramstyle = 'pyformat' 62 63 __all__ = [ 64 'apilevel', 'threadsafety', 'paramstyle', 'connect', 'Connection', 65 'Cursor', 'Warning', 'Error', 'InterfaceError', 'DatabaseError', 66 'DataError', 'OperationalError', 'IntegrityError', 'InternalError', 67 'ProgrammingError', 'NotSupportedError', 'Binary', 'Date', 'DateFromTicks', 68 'Time', 'TimeFromTicks', 'Timestamp', 'TimestampFromTicks', 'BINARY', 69 'DATETIME', 'NUMBER', 'ROWID', 'STRING', 70 ] 71 [end of bigquery/google/cloud/bigquery/dbapi/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bigquery/google/cloud/bigquery/dbapi/__init__.py b/bigquery/google/cloud/bigquery/dbapi/__init__.py --- a/bigquery/google/cloud/bigquery/dbapi/__init__.py +++ b/bigquery/google/cloud/bigquery/dbapi/__init__.py @@ -55,8 +55,8 @@ apilevel = '2.0' -# Threads may share the module, but not connections. -threadsafety = 1 +# Threads may share the module and connections, but not cursors. +threadsafety = 2 paramstyle = 'pyformat'
{"golden_diff": "diff --git a/bigquery/google/cloud/bigquery/dbapi/__init__.py b/bigquery/google/cloud/bigquery/dbapi/__init__.py\n--- a/bigquery/google/cloud/bigquery/dbapi/__init__.py\n+++ b/bigquery/google/cloud/bigquery/dbapi/__init__.py\n@@ -55,8 +55,8 @@\n \n apilevel = '2.0'\n \n-# Threads may share the module, but not connections.\n-threadsafety = 1\n+# Threads may share the module and connections, but not cursors.\n+threadsafety = 2\n \n paramstyle = 'pyformat'\n", "issue": "Increment threadsafety on BigQuery DB-API interface when httplib2 is removed\nFrom https://github.com/GoogleCloudPlatform/google-cloud-python/pull/2921/files/5b29a9e42a40f7c74ced0b7a7e9db0add52a20a1#r123596828\r\n\r\nBlocked by https://github.com/GoogleCloudPlatform/google-cloud-python/issues/1998\n", "before_files": [{"content": "# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google BigQuery implementation of the Database API Specification v2.0.\n\nThis module implements the `Python Database API Specification v2.0 (DB-API)`_\nfor Google BigQuery.\n\n.. _Python Database API Specification v2.0 (DB-API):\n https://www.python.org/dev/peps/pep-0249/\n\n.. warning::\n The ``dbapi`` module is **alpha**. The implementation is not complete. It\n might be changed in backward-incompatible ways and is not subject to any SLA\n or deprecation policy.\n\"\"\"\n\nfrom google.cloud.bigquery.dbapi.connection import connect\nfrom google.cloud.bigquery.dbapi.connection import Connection\nfrom google.cloud.bigquery.dbapi.cursor import Cursor\nfrom google.cloud.bigquery.dbapi.exceptions import Warning\nfrom google.cloud.bigquery.dbapi.exceptions import Error\nfrom google.cloud.bigquery.dbapi.exceptions import InterfaceError\nfrom google.cloud.bigquery.dbapi.exceptions import DatabaseError\nfrom google.cloud.bigquery.dbapi.exceptions import DataError\nfrom google.cloud.bigquery.dbapi.exceptions import OperationalError\nfrom google.cloud.bigquery.dbapi.exceptions import IntegrityError\nfrom google.cloud.bigquery.dbapi.exceptions import InternalError\nfrom google.cloud.bigquery.dbapi.exceptions import ProgrammingError\nfrom google.cloud.bigquery.dbapi.exceptions import NotSupportedError\nfrom google.cloud.bigquery.dbapi.types import Binary\nfrom google.cloud.bigquery.dbapi.types import Date\nfrom google.cloud.bigquery.dbapi.types import DateFromTicks\nfrom google.cloud.bigquery.dbapi.types import Time\nfrom google.cloud.bigquery.dbapi.types import TimeFromTicks\nfrom google.cloud.bigquery.dbapi.types import Timestamp\nfrom google.cloud.bigquery.dbapi.types import TimestampFromTicks\nfrom google.cloud.bigquery.dbapi.types import BINARY\nfrom google.cloud.bigquery.dbapi.types import DATETIME\nfrom google.cloud.bigquery.dbapi.types import NUMBER\nfrom google.cloud.bigquery.dbapi.types import ROWID\nfrom google.cloud.bigquery.dbapi.types import STRING\n\n\napilevel = '2.0'\n\n# Threads may share the module, but not connections.\nthreadsafety = 1\n\nparamstyle = 'pyformat'\n\n__all__ = [\n 'apilevel', 'threadsafety', 'paramstyle', 'connect', 'Connection',\n 'Cursor', 'Warning', 'Error', 'InterfaceError', 'DatabaseError',\n 'DataError', 'OperationalError', 'IntegrityError', 'InternalError',\n 'ProgrammingError', 'NotSupportedError', 'Binary', 'Date', 'DateFromTicks',\n 'Time', 'TimeFromTicks', 'Timestamp', 'TimestampFromTicks', 'BINARY',\n 'DATETIME', 'NUMBER', 'ROWID', 'STRING',\n]\n", "path": "bigquery/google/cloud/bigquery/dbapi/__init__.py"}]}
1,497
131
gh_patches_debug_25995
rasdani/github-patches
git_diff
pytorch__vision-2979
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [DOC] Documentation for sigmoid focal loss ## 📚 Documentation Sigmoid focal loss was added to torchvision, but I think docs are not generated for it It was initalized in `init` as well [here](https://github.com/pytorch/vision/blob/v0.8.0/torchvision/ops/__init__.py#L11) I think it's really minor fix, we might need `..autodoc::sigmoid_focal_loss` and it should be fine. </issue> <code> [start of torchvision/ops/focal_loss.py] 1 import torch 2 import torch.nn.functional as F 3 4 5 def sigmoid_focal_loss( 6 inputs, 7 targets, 8 alpha: float = 0.25, 9 gamma: float = 2, 10 reduction: str = "none", 11 ): 12 """ 13 Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py . 14 Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. 15 Args: 16 inputs: A float tensor of arbitrary shape. 17 The predictions for each example. 18 targets: A float tensor with the same shape as inputs. Stores the binary 19 classification label for each element in inputs 20 (0 for the negative class and 1 for the positive class). 21 alpha: (optional) Weighting factor in range (0,1) to balance 22 positive vs negative examples or -1 for ignore. Default = 0.25 23 gamma: Exponent of the modulating factor (1 - p_t) to 24 balance easy vs hard examples. 25 reduction: 'none' | 'mean' | 'sum' 26 'none': No reduction will be applied to the output. 27 'mean': The output will be averaged. 28 'sum': The output will be summed. 29 Returns: 30 Loss tensor with the reduction option applied. 31 """ 32 p = torch.sigmoid(inputs) 33 ce_loss = F.binary_cross_entropy_with_logits( 34 inputs, targets, reduction="none" 35 ) 36 p_t = p * targets + (1 - p) * (1 - targets) 37 loss = ce_loss * ((1 - p_t) ** gamma) 38 39 if alpha >= 0: 40 alpha_t = alpha * targets + (1 - alpha) * (1 - targets) 41 loss = alpha_t * loss 42 43 if reduction == "mean": 44 loss = loss.mean() 45 elif reduction == "sum": 46 loss = loss.sum() 47 48 return loss 49 [end of torchvision/ops/focal_loss.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchvision/ops/focal_loss.py b/torchvision/ops/focal_loss.py --- a/torchvision/ops/focal_loss.py +++ b/torchvision/ops/focal_loss.py @@ -3,8 +3,8 @@ def sigmoid_focal_loss( - inputs, - targets, + inputs: torch.Tensor, + targets: torch.Tensor, alpha: float = 0.25, gamma: float = 2, reduction: str = "none", @@ -12,11 +12,12 @@ """ Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py . Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. - Args: + + Arguments: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs + classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples or -1 for ignore. Default = 0.25
{"golden_diff": "diff --git a/torchvision/ops/focal_loss.py b/torchvision/ops/focal_loss.py\n--- a/torchvision/ops/focal_loss.py\n+++ b/torchvision/ops/focal_loss.py\n@@ -3,8 +3,8 @@\n \n \n def sigmoid_focal_loss(\n- inputs,\n- targets,\n+ inputs: torch.Tensor,\n+ targets: torch.Tensor,\n alpha: float = 0.25,\n gamma: float = 2,\n reduction: str = \"none\",\n@@ -12,11 +12,12 @@\n \"\"\"\n Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py .\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n- Args:\n+\n+ Arguments:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n- classification label for each element in inputs\n+ classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples or -1 for ignore. Default = 0.25\n", "issue": "[DOC] Documentation for sigmoid focal loss\n## \ud83d\udcda Documentation\r\n\r\nSigmoid focal loss was added to torchvision, but I think docs are not generated for it\r\n\r\nIt was initalized in `init` as well [here](https://github.com/pytorch/vision/blob/v0.8.0/torchvision/ops/__init__.py#L11)\r\n\r\nI think it's really minor fix, we might need `..autodoc::sigmoid_focal_loss` and it should be fine.\n", "before_files": [{"content": "import torch\nimport torch.nn.functional as F\n\n\ndef sigmoid_focal_loss(\n inputs,\n targets,\n alpha: float = 0.25,\n gamma: float = 2,\n reduction: str = \"none\",\n):\n \"\"\"\n Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py .\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples or -1 for ignore. Default = 0.25\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n reduction: 'none' | 'mean' | 'sum'\n 'none': No reduction will be applied to the output.\n 'mean': The output will be averaged.\n 'sum': The output will be summed.\n Returns:\n Loss tensor with the reduction option applied.\n \"\"\"\n p = torch.sigmoid(inputs)\n ce_loss = F.binary_cross_entropy_with_logits(\n inputs, targets, reduction=\"none\"\n )\n p_t = p * targets + (1 - p) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n if reduction == \"mean\":\n loss = loss.mean()\n elif reduction == \"sum\":\n loss = loss.sum()\n\n return loss\n", "path": "torchvision/ops/focal_loss.py"}]}
1,158
302
gh_patches_debug_3415
rasdani/github-patches
git_diff
pyro-ppl__numpyro-1041
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> New versions of sphinx and jinja2 break docs linting This is observed by @tcbegley in #1034 </issue> <code> [start of setup.py] 1 # Copyright Contributors to the Pyro project. 2 # SPDX-License-Identifier: Apache-2.0 3 4 from __future__ import absolute_import, division, print_function 5 6 import os 7 import sys 8 9 from setuptools import find_packages, setup 10 11 PROJECT_PATH = os.path.dirname(os.path.abspath(__file__)) 12 13 # Find version 14 for line in open(os.path.join(PROJECT_PATH, "numpyro", "version.py")): 15 if line.startswith("__version__ = "): 16 version = line.strip().split()[2][1:-1] 17 18 # READ README.md for long description on PyPi. 19 try: 20 long_description = open("README.md", encoding="utf-8").read() 21 except Exception as e: 22 sys.stderr.write("Failed to read README.md:\n {}\n".format(e)) 23 sys.stderr.flush() 24 long_description = "" 25 26 27 setup( 28 name="numpyro", 29 version=version, 30 description="Pyro PPL on NumPy", 31 packages=find_packages(include=["numpyro", "numpyro.*"]), 32 url="https://github.com/pyro-ppl/numpyro", 33 author="Uber AI Labs", 34 install_requires=[ 35 "jax>=0.2.11", 36 "jaxlib>=0.1.62", 37 "tqdm", 38 ], 39 extras_require={ 40 "doc": [ 41 "ipython", # sphinx needs this to render codes 42 "jinja2<3.0.0", 43 "nbsphinx", 44 "sphinx<4.0.0", 45 "sphinx_rtd_theme", 46 "sphinx-gallery", 47 ], 48 "test": [ 49 "black", 50 "flake8", 51 "isort>=5.0", 52 "pytest>=4.1", 53 "pyro-api>=0.1.1", 54 "scipy>=1.1", 55 ], 56 "dev": [ 57 "dm-haiku", 58 "flax", 59 # TODO: bump funsor version before the release 60 "funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc", 61 "graphviz", 62 "optax==0.0.6", 63 # TODO: change this to tensorflow_probability>0.12.1 when the next version 64 # of tfp is released. The current release is not compatible with jax>=0.2.12. 65 "tfp-nightly", 66 ], 67 "examples": ["arviz", "jupyter", "matplotlib", "pandas", "seaborn"], 68 }, 69 long_description=long_description, 70 long_description_content_type="text/markdown", 71 keywords="probabilistic machine learning bayesian statistics", 72 license="Apache License 2.0", 73 classifiers=[ 74 "Intended Audience :: Developers", 75 "Intended Audience :: Education", 76 "Intended Audience :: Science/Research", 77 "License :: OSI Approved :: Apache Software License", 78 "Operating System :: POSIX :: Linux", 79 "Operating System :: MacOS :: MacOS X", 80 "Programming Language :: Python :: 3.6", 81 "Programming Language :: Python :: 3.7", 82 "Programming Language :: Python :: 3.8", 83 "Programming Language :: Python :: 3.9", 84 ], 85 ) 86 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -39,9 +39,8 @@ extras_require={ "doc": [ "ipython", # sphinx needs this to render codes - "jinja2<3.0.0", - "nbsphinx", - "sphinx<4.0.0", + "nbsphinx>=0.8.5", + "sphinx", "sphinx_rtd_theme", "sphinx-gallery", ],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -39,9 +39,8 @@\n extras_require={\n \"doc\": [\n \"ipython\", # sphinx needs this to render codes\n- \"jinja2<3.0.0\",\n- \"nbsphinx\",\n- \"sphinx<4.0.0\",\n+ \"nbsphinx>=0.8.5\",\n+ \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinx-gallery\",\n ],\n", "issue": "New versions of sphinx and jinja2 break docs linting\nThis is observed by @tcbegley in #1034\n", "before_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\n\n# Find version\nfor line in open(os.path.join(PROJECT_PATH, \"numpyro\", \"version.py\")):\n if line.startswith(\"__version__ = \"):\n version = line.strip().split()[2][1:-1]\n\n# READ README.md for long description on PyPi.\ntry:\n long_description = open(\"README.md\", encoding=\"utf-8\").read()\nexcept Exception as e:\n sys.stderr.write(\"Failed to read README.md:\\n {}\\n\".format(e))\n sys.stderr.flush()\n long_description = \"\"\n\n\nsetup(\n name=\"numpyro\",\n version=version,\n description=\"Pyro PPL on NumPy\",\n packages=find_packages(include=[\"numpyro\", \"numpyro.*\"]),\n url=\"https://github.com/pyro-ppl/numpyro\",\n author=\"Uber AI Labs\",\n install_requires=[\n \"jax>=0.2.11\",\n \"jaxlib>=0.1.62\",\n \"tqdm\",\n ],\n extras_require={\n \"doc\": [\n \"ipython\", # sphinx needs this to render codes\n \"jinja2<3.0.0\",\n \"nbsphinx\",\n \"sphinx<4.0.0\",\n \"sphinx_rtd_theme\",\n \"sphinx-gallery\",\n ],\n \"test\": [\n \"black\",\n \"flake8\",\n \"isort>=5.0\",\n \"pytest>=4.1\",\n \"pyro-api>=0.1.1\",\n \"scipy>=1.1\",\n ],\n \"dev\": [\n \"dm-haiku\",\n \"flax\",\n # TODO: bump funsor version before the release\n \"funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc\",\n \"graphviz\",\n \"optax==0.0.6\",\n # TODO: change this to tensorflow_probability>0.12.1 when the next version\n # of tfp is released. The current release is not compatible with jax>=0.2.12.\n \"tfp-nightly\",\n ],\n \"examples\": [\"arviz\", \"jupyter\", \"matplotlib\", \"pandas\", \"seaborn\"],\n },\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords=\"probabilistic machine learning bayesian statistics\",\n license=\"Apache License 2.0\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n)\n", "path": "setup.py"}]}
1,453
124
gh_patches_debug_15692
rasdani/github-patches
git_diff
ethereum__web3.py-1198
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Drop repeated rpc endpoints * Version: 4.6 ### What was wrong? web3.py has a few repeated endpoints, for example: `web3.version.net` and `web3.net.version` ### How can it be fixed? Deprecate the endpoint api's that don't mirror the json-rpc spec namespacing, in favor of those that do. </issue> <code> [start of web3/version.py] 1 from web3.module import ( 2 Module, 3 ) 4 5 6 class Version(Module): 7 @property 8 def api(self): 9 from web3 import __version__ 10 return __version__ 11 12 @property 13 def node(self): 14 return self.web3.manager.request_blocking("web3_clientVersion", []) 15 16 @property 17 def network(self): 18 return self.web3.manager.request_blocking("net_version", []) 19 20 @property 21 def ethereum(self): 22 return self.web3.manager.request_blocking("eth_protocolVersion", []) 23 [end of web3/version.py] [start of web3/miner.py] 1 from web3.module import ( 2 Module, 3 ) 4 5 6 class Miner(Module): 7 @property 8 def hashrate(self): 9 return self.web3.manager.request_blocking("eth_hashrate", []) 10 11 def makeDAG(self, number): 12 return self.web3.manager.request_blocking("miner_makeDag", [number]) 13 14 def setExtra(self, extra): 15 return self.web3.manager.request_blocking("miner_setExtra", [extra]) 16 17 def setEtherBase(self, etherbase): 18 return self.web3.manager.request_blocking("miner_setEtherbase", [etherbase]) 19 20 def setGasPrice(self, gas_price): 21 return self.web3.manager.request_blocking( 22 "miner_setGasPrice", [gas_price], 23 ) 24 25 def start(self, num_threads): 26 return self.web3.manager.request_blocking( 27 "miner_start", [num_threads], 28 ) 29 30 def stop(self): 31 return self.web3.manager.request_blocking("miner_stop", []) 32 33 def startAutoDAG(self): 34 return self.web3.manager.request_blocking("miner_startAutoDag", []) 35 36 def stopAutoDAG(self): 37 return self.web3.manager.request_blocking("miner_stopAutoDag", []) 38 [end of web3/miner.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/web3/miner.py b/web3/miner.py --- a/web3/miner.py +++ b/web3/miner.py @@ -4,10 +4,6 @@ class Miner(Module): - @property - def hashrate(self): - return self.web3.manager.request_blocking("eth_hashrate", []) - def makeDAG(self, number): return self.web3.manager.request_blocking("miner_makeDag", [number]) diff --git a/web3/version.py b/web3/version.py --- a/web3/version.py +++ b/web3/version.py @@ -13,10 +13,6 @@ def node(self): return self.web3.manager.request_blocking("web3_clientVersion", []) - @property - def network(self): - return self.web3.manager.request_blocking("net_version", []) - @property def ethereum(self): return self.web3.manager.request_blocking("eth_protocolVersion", [])
{"golden_diff": "diff --git a/web3/miner.py b/web3/miner.py\n--- a/web3/miner.py\n+++ b/web3/miner.py\n@@ -4,10 +4,6 @@\n \n \n class Miner(Module):\n- @property\n- def hashrate(self):\n- return self.web3.manager.request_blocking(\"eth_hashrate\", [])\n-\n def makeDAG(self, number):\n return self.web3.manager.request_blocking(\"miner_makeDag\", [number])\n \ndiff --git a/web3/version.py b/web3/version.py\n--- a/web3/version.py\n+++ b/web3/version.py\n@@ -13,10 +13,6 @@\n def node(self):\n return self.web3.manager.request_blocking(\"web3_clientVersion\", [])\n \n- @property\n- def network(self):\n- return self.web3.manager.request_blocking(\"net_version\", [])\n-\n @property\n def ethereum(self):\n return self.web3.manager.request_blocking(\"eth_protocolVersion\", [])\n", "issue": "Drop repeated rpc endpoints\n* Version: 4.6\r\n\r\n### What was wrong?\r\n\r\nweb3.py has a few repeated endpoints, for example: `web3.version.net` and `web3.net.version`\r\n\r\n\r\n### How can it be fixed?\r\nDeprecate the endpoint api's that don't mirror the json-rpc spec namespacing, in favor of those that do.\n", "before_files": [{"content": "from web3.module import (\n Module,\n)\n\n\nclass Version(Module):\n @property\n def api(self):\n from web3 import __version__\n return __version__\n\n @property\n def node(self):\n return self.web3.manager.request_blocking(\"web3_clientVersion\", [])\n\n @property\n def network(self):\n return self.web3.manager.request_blocking(\"net_version\", [])\n\n @property\n def ethereum(self):\n return self.web3.manager.request_blocking(\"eth_protocolVersion\", [])\n", "path": "web3/version.py"}, {"content": "from web3.module import (\n Module,\n)\n\n\nclass Miner(Module):\n @property\n def hashrate(self):\n return self.web3.manager.request_blocking(\"eth_hashrate\", [])\n\n def makeDAG(self, number):\n return self.web3.manager.request_blocking(\"miner_makeDag\", [number])\n\n def setExtra(self, extra):\n return self.web3.manager.request_blocking(\"miner_setExtra\", [extra])\n\n def setEtherBase(self, etherbase):\n return self.web3.manager.request_blocking(\"miner_setEtherbase\", [etherbase])\n\n def setGasPrice(self, gas_price):\n return self.web3.manager.request_blocking(\n \"miner_setGasPrice\", [gas_price],\n )\n\n def start(self, num_threads):\n return self.web3.manager.request_blocking(\n \"miner_start\", [num_threads],\n )\n\n def stop(self):\n return self.web3.manager.request_blocking(\"miner_stop\", [])\n\n def startAutoDAG(self):\n return self.web3.manager.request_blocking(\"miner_startAutoDag\", [])\n\n def stopAutoDAG(self):\n return self.web3.manager.request_blocking(\"miner_stopAutoDag\", [])\n", "path": "web3/miner.py"}]}
1,097
216
gh_patches_debug_20807
rasdani/github-patches
git_diff
google__flax-2540
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Make RNG name configurable in Dropout Currently `'dropout'` is hard-coded as an argument of `make_rng` inside `Dropout`. However, when implementing support for "recurrent dropout" in an LSTMCell or similar you need two kinds of dropout: 1. A regular dropout which is applied to the inputs with a different mask at each step. 2. A "recurrent dropout" that is applied to the state with the same mask at each step. To implement `2` a possibility is to set the RNG name to `'recurrent_dropout'` on the Dropout layer applied to the state and guarantee that each step uses the same random state. From `nn.scan`'s perspective the would look like this: ```python nn.scan(..., split_rngs={'dropout': True, 'recurrent_dropout': False}) ``` The proposal is to add an `rng_name` (or similar) attribute to `Dropout` so we are able support these kind of use-cases. The alternative would be to create a separate `RecurrentDropout` layer with the same code but different hard-coded value. </issue> <code> [start of flax/linen/stochastic.py] 1 # Copyright 2022 The Flax Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Stochastic modules.""" 16 17 from typing import Optional, Sequence 18 19 from flax.linen.module import compact 20 from flax.linen.module import merge_param 21 from flax.linen.module import Module 22 from jax import lax 23 from jax import random 24 import jax.numpy as jnp 25 26 27 class Dropout(Module): 28 """Create a dropout layer. 29 30 Note: When using :meth:`Module.apply() <flax.linen.Module.apply>`, make sure 31 to include an RNG seed named `'dropout'`. For example:: 32 33 model.apply({'params': params}, inputs=inputs, train=True, rngs={'dropout': dropout_rng})` 34 35 Attributes: 36 rate: the dropout probability. (_not_ the keep rate!) 37 broadcast_dims: dimensions that will share the same dropout mask 38 deterministic: if false the inputs are scaled by `1 / (1 - rate)` and 39 masked, whereas if true, no mask is applied and the inputs are returned 40 as is. 41 """ 42 rate: float 43 broadcast_dims: Sequence[int] = () 44 deterministic: Optional[bool] = None 45 46 @compact 47 def __call__(self, inputs, deterministic: Optional[bool] = None): 48 """Applies a random dropout mask to the input. 49 50 Args: 51 inputs: the inputs that should be randomly masked. 52 deterministic: if false the inputs are scaled by `1 / (1 - rate)` and 53 masked, whereas if true, no mask is applied and the inputs are returned 54 as is. 55 56 Returns: 57 The masked inputs reweighted to preserve mean. 58 """ 59 deterministic = merge_param( 60 'deterministic', self.deterministic, deterministic) 61 if self.rate == 0.: 62 return inputs 63 # Prevent gradient NaNs in 1.0 edge-case. 64 if self.rate == 1.0: 65 return jnp.zeros_like(inputs) 66 keep_prob = 1. - self.rate 67 if deterministic: 68 return inputs 69 else: 70 rng = self.make_rng('dropout') 71 broadcast_shape = list(inputs.shape) 72 for dim in self.broadcast_dims: 73 broadcast_shape[dim] = 1 74 mask = random.bernoulli(rng, p=keep_prob, shape=broadcast_shape) 75 mask = jnp.broadcast_to(mask, inputs.shape) 76 return lax.select(mask, inputs / keep_prob, jnp.zeros_like(inputs)) 77 [end of flax/linen/stochastic.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/flax/linen/stochastic.py b/flax/linen/stochastic.py --- a/flax/linen/stochastic.py +++ b/flax/linen/stochastic.py @@ -38,10 +38,12 @@ deterministic: if false the inputs are scaled by `1 / (1 - rate)` and masked, whereas if true, no mask is applied and the inputs are returned as is. + rng_collection: the rng collection name to use when requesting an rng key. """ rate: float broadcast_dims: Sequence[int] = () deterministic: Optional[bool] = None + rng_collection: str = 'dropout' @compact def __call__(self, inputs, deterministic: Optional[bool] = None): @@ -67,7 +69,7 @@ if deterministic: return inputs else: - rng = self.make_rng('dropout') + rng = self.make_rng(self.rng_collection) broadcast_shape = list(inputs.shape) for dim in self.broadcast_dims: broadcast_shape[dim] = 1
{"golden_diff": "diff --git a/flax/linen/stochastic.py b/flax/linen/stochastic.py\n--- a/flax/linen/stochastic.py\n+++ b/flax/linen/stochastic.py\n@@ -38,10 +38,12 @@\n deterministic: if false the inputs are scaled by `1 / (1 - rate)` and\n masked, whereas if true, no mask is applied and the inputs are returned\n as is.\n+ rng_collection: the rng collection name to use when requesting an rng key.\n \"\"\"\n rate: float\n broadcast_dims: Sequence[int] = ()\n deterministic: Optional[bool] = None\n+ rng_collection: str = 'dropout'\n \n @compact\n def __call__(self, inputs, deterministic: Optional[bool] = None):\n@@ -67,7 +69,7 @@\n if deterministic:\n return inputs\n else:\n- rng = self.make_rng('dropout')\n+ rng = self.make_rng(self.rng_collection)\n broadcast_shape = list(inputs.shape)\n for dim in self.broadcast_dims:\n broadcast_shape[dim] = 1\n", "issue": "Make RNG name configurable in Dropout\nCurrently `'dropout'` is hard-coded as an argument of `make_rng` inside `Dropout`. However, when implementing support for \"recurrent dropout\" in an LSTMCell or similar you need two kinds of dropout:\r\n1. A regular dropout which is applied to the inputs with a different mask at each step.\r\n2. A \"recurrent dropout\" that is applied to the state with the same mask at each step.\r\n\r\nTo implement `2` a possibility is to set the RNG name to `'recurrent_dropout'` on the Dropout layer applied to the state and guarantee that each step uses the same random state. From `nn.scan`'s perspective the would look like this:\r\n\r\n```python\r\nnn.scan(..., split_rngs={'dropout': True, 'recurrent_dropout': False})\r\n```\r\nThe proposal is to add an `rng_name` (or similar) attribute to `Dropout` so we are able support these kind of use-cases. The alternative would be to create a separate `RecurrentDropout` layer with the same code but different hard-coded value.\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Stochastic modules.\"\"\"\n\nfrom typing import Optional, Sequence\n\nfrom flax.linen.module import compact\nfrom flax.linen.module import merge_param\nfrom flax.linen.module import Module\nfrom jax import lax\nfrom jax import random\nimport jax.numpy as jnp\n\n\nclass Dropout(Module):\n \"\"\"Create a dropout layer.\n\n Note: When using :meth:`Module.apply() <flax.linen.Module.apply>`, make sure\n to include an RNG seed named `'dropout'`. For example::\n\n model.apply({'params': params}, inputs=inputs, train=True, rngs={'dropout': dropout_rng})`\n\n Attributes:\n rate: the dropout probability. (_not_ the keep rate!)\n broadcast_dims: dimensions that will share the same dropout mask\n deterministic: if false the inputs are scaled by `1 / (1 - rate)` and\n masked, whereas if true, no mask is applied and the inputs are returned\n as is.\n \"\"\"\n rate: float\n broadcast_dims: Sequence[int] = ()\n deterministic: Optional[bool] = None\n\n @compact\n def __call__(self, inputs, deterministic: Optional[bool] = None):\n \"\"\"Applies a random dropout mask to the input.\n\n Args:\n inputs: the inputs that should be randomly masked.\n deterministic: if false the inputs are scaled by `1 / (1 - rate)` and\n masked, whereas if true, no mask is applied and the inputs are returned\n as is.\n\n Returns:\n The masked inputs reweighted to preserve mean.\n \"\"\"\n deterministic = merge_param(\n 'deterministic', self.deterministic, deterministic)\n if self.rate == 0.:\n return inputs\n # Prevent gradient NaNs in 1.0 edge-case.\n if self.rate == 1.0:\n return jnp.zeros_like(inputs)\n keep_prob = 1. - self.rate\n if deterministic:\n return inputs\n else:\n rng = self.make_rng('dropout')\n broadcast_shape = list(inputs.shape)\n for dim in self.broadcast_dims:\n broadcast_shape[dim] = 1\n mask = random.bernoulli(rng, p=keep_prob, shape=broadcast_shape)\n mask = jnp.broadcast_to(mask, inputs.shape)\n return lax.select(mask, inputs / keep_prob, jnp.zeros_like(inputs))\n", "path": "flax/linen/stochastic.py"}]}
1,561
246
gh_patches_debug_3497
rasdani/github-patches
git_diff
ManimCommunity__manim-646
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The config value `html_favicon' has type `PosixPath'; expected `str'. BTW when building the docs now I get ``` WARNING: The config value `html_favicon' has type `PosixPath'; expected `str'. ``` _Originally posted by @leotrs in https://github.com/ManimCommunity/manim/pull/631#issuecomment-719075737_ </issue> <code> [start of docs/source/conf.py] 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # This file only contains a selection of the most common options. For a full 4 # list see the documentation: 5 # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 7 # -- Path setup -------------------------------------------------------------- 8 9 # If extensions (or modules to document with autodoc) are in another directory, 10 # add these directories to sys.path here. If the directory is relative to the 11 # documentation root, use os.path.abspath to make it absolute, like shown here. 12 13 import os 14 import subprocess 15 import sys 16 from distutils.sysconfig import get_python_lib 17 from pathlib import Path 18 19 sys.path.insert(0, os.path.abspath(".")) 20 21 22 if os.environ.get("READTHEDOCS") == "True": 23 site_path = get_python_lib() 24 # bindings for pangocffi, cairocffi, pangocairocffi need to be generated 25 subprocess.run(["python", "pangocffi/ffi_build.py"], cwd=site_path) 26 subprocess.run(["python", "cairocffi/ffi_build.py"], cwd=site_path) 27 subprocess.run(["python", "pangocairocffi/ffi_build.py"], cwd=site_path) 28 # we need to add ffmpeg to the path 29 ffmpeg_path = os.path.join(site_path, "imageio_ffmpeg", "binaries") 30 # the included binary is named ffmpeg-linux..., create a symlink 31 [ffmpeg_bin] = [ 32 file for file in os.listdir(ffmpeg_path) if file.startswith("ffmpeg-") 33 ] 34 os.symlink( 35 os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, "ffmpeg") 36 ) 37 os.environ["PATH"] += os.pathsep + ffmpeg_path 38 39 40 # -- Project information ----------------------------------------------------- 41 42 project = "Manim" 43 copyright = "2020, The Manim Community Dev Team" 44 author = "The Manim Community Dev Team" 45 46 47 # -- General configuration --------------------------------------------------- 48 49 # Add any Sphinx extension module names here, as strings. They can be 50 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 51 # ones. 52 extensions = [ 53 "sphinx.ext.autodoc", 54 "recommonmark", 55 "sphinx_copybutton", 56 "sphinx.ext.napoleon", 57 "sphinx.ext.autosummary", 58 "sphinx.ext.doctest", 59 "manim_directive", 60 ] 61 62 # Automatically generate stub pages when using the .. autosummary directive 63 autosummary_generate = True 64 65 # Add any paths that contain templates here, relative to this directory. 66 templates_path = ["_templates"] 67 68 # List of patterns, relative to source directory, that match files and 69 # directories to ignore when looking for source files. 70 # This pattern also affects html_static_path and html_extra_path. 71 exclude_patterns = [] 72 73 74 # -- Options for HTML output ------------------------------------------------- 75 76 # The theme to use for HTML and HTML Help pages. See the documentation for 77 # a list of builtin themes. 78 # 79 import guzzle_sphinx_theme 80 81 html_theme_path = guzzle_sphinx_theme.html_theme_path() 82 html_theme = "guzzle_sphinx_theme" 83 html_favicon = Path("_static/favicon.ico") 84 85 # There's a standing issue with Sphinx's new-style sidebars. This is a 86 # workaround. Taken from 87 # https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826 88 html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "searchbox.html"]} 89 90 # Register the theme as an extension to generate a sitemap.xml 91 extensions.append("guzzle_sphinx_theme") 92 93 # Add any paths that contain custom static files (such as style sheets) here, 94 # relative to this directory. They are copied after the builtin static files, 95 # so a file named "default.css" will overwrite the builtin "default.css". 96 html_static_path = ["_static"] 97 98 # This specifies any additional css files that will override the theme's 99 html_css_files = ["custom.css"] 100 [end of docs/source/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -80,7 +80,7 @@ html_theme_path = guzzle_sphinx_theme.html_theme_path() html_theme = "guzzle_sphinx_theme" -html_favicon = Path("_static/favicon.ico") +html_favicon = str(Path("_static/favicon.ico")) # There's a standing issue with Sphinx's new-style sidebars. This is a # workaround. Taken from
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -80,7 +80,7 @@\n \n html_theme_path = guzzle_sphinx_theme.html_theme_path()\n html_theme = \"guzzle_sphinx_theme\"\n-html_favicon = Path(\"_static/favicon.ico\")\n+html_favicon = str(Path(\"_static/favicon.ico\"))\n \n # There's a standing issue with Sphinx's new-style sidebars. This is a\n # workaround. Taken from\n", "issue": "The config value `html_favicon' has type `PosixPath'; expected `str'.\nBTW when building the docs now I get\r\n```\r\nWARNING: The config value `html_favicon' has type `PosixPath'; expected `str'.\r\n```\r\n\r\n_Originally posted by @leotrs in https://github.com/ManimCommunity/manim/pull/631#issuecomment-719075737_\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport subprocess\nimport sys\nfrom distutils.sysconfig import get_python_lib\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\n\nif os.environ.get(\"READTHEDOCS\") == \"True\":\n site_path = get_python_lib()\n # bindings for pangocffi, cairocffi, pangocairocffi need to be generated\n subprocess.run([\"python\", \"pangocffi/ffi_build.py\"], cwd=site_path)\n subprocess.run([\"python\", \"cairocffi/ffi_build.py\"], cwd=site_path)\n subprocess.run([\"python\", \"pangocairocffi/ffi_build.py\"], cwd=site_path)\n # we need to add ffmpeg to the path\n ffmpeg_path = os.path.join(site_path, \"imageio_ffmpeg\", \"binaries\")\n # the included binary is named ffmpeg-linux..., create a symlink\n [ffmpeg_bin] = [\n file for file in os.listdir(ffmpeg_path) if file.startswith(\"ffmpeg-\")\n ]\n os.symlink(\n os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, \"ffmpeg\")\n )\n os.environ[\"PATH\"] += os.pathsep + ffmpeg_path\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Manim\"\ncopyright = \"2020, The Manim Community Dev Team\"\nauthor = \"The Manim Community Dev Team\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"recommonmark\",\n \"sphinx_copybutton\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"manim_directive\",\n]\n\n# Automatically generate stub pages when using the .. autosummary directive\nautosummary_generate = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nimport guzzle_sphinx_theme\n\nhtml_theme_path = guzzle_sphinx_theme.html_theme_path()\nhtml_theme = \"guzzle_sphinx_theme\"\nhtml_favicon = Path(\"_static/favicon.ico\")\n\n# There's a standing issue with Sphinx's new-style sidebars. This is a\n# workaround. Taken from\n# https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826\nhtml_sidebars = {\"**\": [\"logo-text.html\", \"globaltoc.html\", \"searchbox.html\"]}\n\n# Register the theme as an extension to generate a sitemap.xml\nextensions.append(\"guzzle_sphinx_theme\")\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# This specifies any additional css files that will override the theme's\nhtml_css_files = [\"custom.css\"]\n", "path": "docs/source/conf.py"}]}
1,684
114
gh_patches_debug_41816
rasdani/github-patches
git_diff
fonttools__fonttools-804
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [T2CharStringPen] Rounding coordinates? Currently, [T2CharStringPen](https://github.com/fonttools/fonttools/blob/fcd697a328b439165b3e9d04904eb73f065b6838/Lib/fontTools/pens/t2CharStringPen.py#L78) always rounds glyph coordinates and casts to int, but it does not seem a good idea. I think we should remove this behavior, or make it optional. cf. TTGlyphPen (#526) </issue> <code> [start of Lib/fontTools/pens/t2CharStringPen.py] 1 # Copyright (c) 2009 Type Supply LLC 2 # Author: Tal Leming 3 4 5 from __future__ import print_function, division, absolute_import 6 7 from fontTools.misc.py23 import * 8 from fontTools.misc.psCharStrings import T2CharString 9 from fontTools.pens.basePen import BasePen 10 11 12 def roundInt(v): 13 return int(round(v)) 14 15 16 def roundIntPoint(point): 17 x, y = point 18 return roundInt(x), roundInt(y) 19 20 21 class RelativeCoordinatePen(BasePen): 22 23 def __init__(self, glyphSet): 24 BasePen.__init__(self, glyphSet) 25 self._lastX = None 26 self._lastY = None 27 self._heldAbsoluteMove = None 28 29 def _makePointRelative(self, pt): 30 absX, absY = pt 31 absX = absX 32 absY = absY 33 # no points have been added 34 # so no conversion is needed 35 if self._lastX is None: 36 relX, relY = absX, absY 37 # otherwise calculate the relative coordinates 38 else: 39 relX = absX - self._lastX 40 relY = absY - self._lastY 41 # store the absolute coordinates 42 self._lastX = absX 43 self._lastY = absY 44 # now return the relative coordinates 45 return relX, relY 46 47 def _moveTo(self, pt): 48 self._heldAbsoluteMove = pt 49 50 def _releaseHeldMove(self): 51 if self._heldAbsoluteMove is not None: 52 pt = self._makePointRelative(self._heldAbsoluteMove) 53 self._relativeMoveTo(pt) 54 self._heldAbsoluteMove = None 55 56 def _relativeMoveTo(self, pt): 57 raise NotImplementedError 58 59 def _lineTo(self, pt): 60 self._releaseHeldMove() 61 pt = self._makePointRelative(pt) 62 self._relativeLineTo(pt) 63 64 def _relativeLineTo(self, pt): 65 raise NotImplementedError 66 67 def _curveToOne(self, pt1, pt2, pt3): 68 self._releaseHeldMove() 69 pt1 = self._makePointRelative(pt1) 70 pt2 = self._makePointRelative(pt2) 71 pt3 = self._makePointRelative(pt3) 72 self._relativeCurveToOne(pt1, pt2, pt3) 73 74 def _relativeCurveToOne(self, pt1, pt2, pt3): 75 raise NotImplementedError 76 77 78 class T2CharStringPen(RelativeCoordinatePen): 79 80 def __init__(self, width, glyphSet): 81 RelativeCoordinatePen.__init__(self, glyphSet) 82 self._heldMove = None 83 self._program = [] 84 if width is not None: 85 self._program.append(roundInt(width)) 86 87 def _moveTo(self, pt): 88 RelativeCoordinatePen._moveTo(self, roundIntPoint(pt)) 89 90 def _relativeMoveTo(self, pt): 91 pt = roundIntPoint(pt) 92 x, y = pt 93 self._heldMove = [x, y, "rmoveto"] 94 95 def _storeHeldMove(self): 96 if self._heldMove is not None: 97 self._program.extend(self._heldMove) 98 self._heldMove = None 99 100 def _lineTo(self, pt): 101 RelativeCoordinatePen._lineTo(self, roundIntPoint(pt)) 102 103 def _relativeLineTo(self, pt): 104 self._storeHeldMove() 105 pt = roundIntPoint(pt) 106 x, y = pt 107 self._program.extend([x, y, "rlineto"]) 108 109 def _curveToOne(self, pt1, pt2, pt3): 110 RelativeCoordinatePen._curveToOne(self, roundIntPoint(pt1), roundIntPoint(pt2), roundIntPoint(pt3)) 111 112 def _relativeCurveToOne(self, pt1, pt2, pt3): 113 self._storeHeldMove() 114 pt1 = roundIntPoint(pt1) 115 pt2 = roundIntPoint(pt2) 116 pt3 = roundIntPoint(pt3) 117 x1, y1 = pt1 118 x2, y2 = pt2 119 x3, y3 = pt3 120 self._program.extend([x1, y1, x2, y2, x3, y3, "rrcurveto"]) 121 122 def _closePath(self): 123 pass 124 125 def _endPath(self): 126 pass 127 128 def getCharString(self, private=None, globalSubrs=None): 129 program = self._program + ["endchar"] 130 charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs) 131 return charString 132 [end of Lib/fontTools/pens/t2CharStringPen.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Lib/fontTools/pens/t2CharStringPen.py b/Lib/fontTools/pens/t2CharStringPen.py --- a/Lib/fontTools/pens/t2CharStringPen.py +++ b/Lib/fontTools/pens/t2CharStringPen.py @@ -9,15 +9,6 @@ from fontTools.pens.basePen import BasePen -def roundInt(v): - return int(round(v)) - - -def roundIntPoint(point): - x, y = point - return roundInt(x), roundInt(y) - - class RelativeCoordinatePen(BasePen): def __init__(self, glyphSet): @@ -75,20 +66,45 @@ raise NotImplementedError +def makeRoundFunc(tolerance): + if tolerance < 0: + raise ValueError("Rounding tolerance must be positive") + + def _round(number): + if tolerance == 0: + return number # no-op + rounded = round(number) + # return rounded integer if the tolerance >= 0.5, or if the absolute + # difference between the original float and the rounded integer is + # within the tolerance + if tolerance >= .5 or abs(rounded - number) <= tolerance: + return rounded + else: + # else return the value un-rounded + return number + + def roundPoint(point): + x, y = point + return _round(x), _round(y) + + return roundPoint + + class T2CharStringPen(RelativeCoordinatePen): - def __init__(self, width, glyphSet): + def __init__(self, width, glyphSet, roundTolerance=0.5): RelativeCoordinatePen.__init__(self, glyphSet) + self.roundPoint = makeRoundFunc(roundTolerance) self._heldMove = None self._program = [] if width is not None: - self._program.append(roundInt(width)) + self._program.append(round(width)) def _moveTo(self, pt): - RelativeCoordinatePen._moveTo(self, roundIntPoint(pt)) + RelativeCoordinatePen._moveTo(self, self.roundPoint(pt)) def _relativeMoveTo(self, pt): - pt = roundIntPoint(pt) + pt = self.roundPoint(pt) x, y = pt self._heldMove = [x, y, "rmoveto"] @@ -98,22 +114,25 @@ self._heldMove = None def _lineTo(self, pt): - RelativeCoordinatePen._lineTo(self, roundIntPoint(pt)) + RelativeCoordinatePen._lineTo(self, self.roundPoint(pt)) def _relativeLineTo(self, pt): self._storeHeldMove() - pt = roundIntPoint(pt) + pt = self.roundPoint(pt) x, y = pt self._program.extend([x, y, "rlineto"]) def _curveToOne(self, pt1, pt2, pt3): - RelativeCoordinatePen._curveToOne(self, roundIntPoint(pt1), roundIntPoint(pt2), roundIntPoint(pt3)) + RelativeCoordinatePen._curveToOne(self, + self.roundPoint(pt1), + self.roundPoint(pt2), + self.roundPoint(pt3)) def _relativeCurveToOne(self, pt1, pt2, pt3): self._storeHeldMove() - pt1 = roundIntPoint(pt1) - pt2 = roundIntPoint(pt2) - pt3 = roundIntPoint(pt3) + pt1 = self.roundPoint(pt1) + pt2 = self.roundPoint(pt2) + pt3 = self.roundPoint(pt3) x1, y1 = pt1 x2, y2 = pt2 x3, y3 = pt3 @@ -127,5 +146,6 @@ def getCharString(self, private=None, globalSubrs=None): program = self._program + ["endchar"] - charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs) + charString = T2CharString( + program=program, private=private, globalSubrs=globalSubrs) return charString
{"golden_diff": "diff --git a/Lib/fontTools/pens/t2CharStringPen.py b/Lib/fontTools/pens/t2CharStringPen.py\n--- a/Lib/fontTools/pens/t2CharStringPen.py\n+++ b/Lib/fontTools/pens/t2CharStringPen.py\n@@ -9,15 +9,6 @@\n from fontTools.pens.basePen import BasePen\n \n \n-def roundInt(v):\n- return int(round(v))\n-\n-\n-def roundIntPoint(point):\n- x, y = point\n- return roundInt(x), roundInt(y)\n-\n-\n class RelativeCoordinatePen(BasePen):\n \n def __init__(self, glyphSet):\n@@ -75,20 +66,45 @@\n raise NotImplementedError\n \n \n+def makeRoundFunc(tolerance):\n+ if tolerance < 0:\n+ raise ValueError(\"Rounding tolerance must be positive\")\n+\n+ def _round(number):\n+ if tolerance == 0:\n+ return number # no-op\n+ rounded = round(number)\n+ # return rounded integer if the tolerance >= 0.5, or if the absolute\n+ # difference between the original float and the rounded integer is\n+ # within the tolerance\n+ if tolerance >= .5 or abs(rounded - number) <= tolerance:\n+ return rounded\n+ else:\n+ # else return the value un-rounded\n+ return number\n+\n+ def roundPoint(point):\n+ x, y = point\n+ return _round(x), _round(y)\n+\n+ return roundPoint\n+\n+\n class T2CharStringPen(RelativeCoordinatePen):\n \n- def __init__(self, width, glyphSet):\n+ def __init__(self, width, glyphSet, roundTolerance=0.5):\n RelativeCoordinatePen.__init__(self, glyphSet)\n+ self.roundPoint = makeRoundFunc(roundTolerance)\n self._heldMove = None\n self._program = []\n if width is not None:\n- self._program.append(roundInt(width))\n+ self._program.append(round(width))\n \n def _moveTo(self, pt):\n- RelativeCoordinatePen._moveTo(self, roundIntPoint(pt))\n+ RelativeCoordinatePen._moveTo(self, self.roundPoint(pt))\n \n def _relativeMoveTo(self, pt):\n- pt = roundIntPoint(pt)\n+ pt = self.roundPoint(pt)\n x, y = pt\n self._heldMove = [x, y, \"rmoveto\"]\n \n@@ -98,22 +114,25 @@\n self._heldMove = None\n \n def _lineTo(self, pt):\n- RelativeCoordinatePen._lineTo(self, roundIntPoint(pt))\n+ RelativeCoordinatePen._lineTo(self, self.roundPoint(pt))\n \n def _relativeLineTo(self, pt):\n self._storeHeldMove()\n- pt = roundIntPoint(pt)\n+ pt = self.roundPoint(pt)\n x, y = pt\n self._program.extend([x, y, \"rlineto\"])\n \n def _curveToOne(self, pt1, pt2, pt3):\n- RelativeCoordinatePen._curveToOne(self, roundIntPoint(pt1), roundIntPoint(pt2), roundIntPoint(pt3))\n+ RelativeCoordinatePen._curveToOne(self,\n+ self.roundPoint(pt1),\n+ self.roundPoint(pt2),\n+ self.roundPoint(pt3))\n \n def _relativeCurveToOne(self, pt1, pt2, pt3):\n self._storeHeldMove()\n- pt1 = roundIntPoint(pt1)\n- pt2 = roundIntPoint(pt2)\n- pt3 = roundIntPoint(pt3)\n+ pt1 = self.roundPoint(pt1)\n+ pt2 = self.roundPoint(pt2)\n+ pt3 = self.roundPoint(pt3)\n x1, y1 = pt1\n x2, y2 = pt2\n x3, y3 = pt3\n@@ -127,5 +146,6 @@\n \n def getCharString(self, private=None, globalSubrs=None):\n program = self._program + [\"endchar\"]\n- charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)\n+ charString = T2CharString(\n+ program=program, private=private, globalSubrs=globalSubrs)\n return charString\n", "issue": "[T2CharStringPen] Rounding coordinates?\nCurrently, [T2CharStringPen](https://github.com/fonttools/fonttools/blob/fcd697a328b439165b3e9d04904eb73f065b6838/Lib/fontTools/pens/t2CharStringPen.py#L78) always rounds glyph coordinates and casts to int, but it does not seem a good idea. I think we should remove this behavior, or make it optional.\r\n\r\ncf. TTGlyphPen (#526)\n", "before_files": [{"content": "# Copyright (c) 2009 Type Supply LLC\n# Author: Tal Leming\n\n\nfrom __future__ import print_function, division, absolute_import\n\nfrom fontTools.misc.py23 import *\nfrom fontTools.misc.psCharStrings import T2CharString\nfrom fontTools.pens.basePen import BasePen\n\n\ndef roundInt(v):\n return int(round(v))\n\n\ndef roundIntPoint(point):\n x, y = point\n return roundInt(x), roundInt(y)\n\n\nclass RelativeCoordinatePen(BasePen):\n\n def __init__(self, glyphSet):\n BasePen.__init__(self, glyphSet)\n self._lastX = None\n self._lastY = None\n self._heldAbsoluteMove = None\n\n def _makePointRelative(self, pt):\n absX, absY = pt\n absX = absX\n absY = absY\n # no points have been added\n # so no conversion is needed\n if self._lastX is None:\n relX, relY = absX, absY\n # otherwise calculate the relative coordinates\n else:\n relX = absX - self._lastX\n relY = absY - self._lastY\n # store the absolute coordinates\n self._lastX = absX\n self._lastY = absY\n # now return the relative coordinates\n return relX, relY\n\n def _moveTo(self, pt):\n self._heldAbsoluteMove = pt\n\n def _releaseHeldMove(self):\n if self._heldAbsoluteMove is not None:\n pt = self._makePointRelative(self._heldAbsoluteMove)\n self._relativeMoveTo(pt)\n self._heldAbsoluteMove = None\n\n def _relativeMoveTo(self, pt):\n raise NotImplementedError\n\n def _lineTo(self, pt):\n self._releaseHeldMove()\n pt = self._makePointRelative(pt)\n self._relativeLineTo(pt)\n\n def _relativeLineTo(self, pt):\n raise NotImplementedError\n\n def _curveToOne(self, pt1, pt2, pt3):\n self._releaseHeldMove()\n pt1 = self._makePointRelative(pt1)\n pt2 = self._makePointRelative(pt2)\n pt3 = self._makePointRelative(pt3)\n self._relativeCurveToOne(pt1, pt2, pt3)\n\n def _relativeCurveToOne(self, pt1, pt2, pt3):\n raise NotImplementedError\n\n\nclass T2CharStringPen(RelativeCoordinatePen):\n\n def __init__(self, width, glyphSet):\n RelativeCoordinatePen.__init__(self, glyphSet)\n self._heldMove = None\n self._program = []\n if width is not None:\n self._program.append(roundInt(width))\n\n def _moveTo(self, pt):\n RelativeCoordinatePen._moveTo(self, roundIntPoint(pt))\n\n def _relativeMoveTo(self, pt):\n pt = roundIntPoint(pt)\n x, y = pt\n self._heldMove = [x, y, \"rmoveto\"]\n\n def _storeHeldMove(self):\n if self._heldMove is not None:\n self._program.extend(self._heldMove)\n self._heldMove = None\n\n def _lineTo(self, pt):\n RelativeCoordinatePen._lineTo(self, roundIntPoint(pt))\n\n def _relativeLineTo(self, pt):\n self._storeHeldMove()\n pt = roundIntPoint(pt)\n x, y = pt\n self._program.extend([x, y, \"rlineto\"])\n\n def _curveToOne(self, pt1, pt2, pt3):\n RelativeCoordinatePen._curveToOne(self, roundIntPoint(pt1), roundIntPoint(pt2), roundIntPoint(pt3))\n\n def _relativeCurveToOne(self, pt1, pt2, pt3):\n self._storeHeldMove()\n pt1 = roundIntPoint(pt1)\n pt2 = roundIntPoint(pt2)\n pt3 = roundIntPoint(pt3)\n x1, y1 = pt1\n x2, y2 = pt2\n x3, y3 = pt3\n self._program.extend([x1, y1, x2, y2, x3, y3, \"rrcurveto\"])\n\n def _closePath(self):\n pass\n\n def _endPath(self):\n pass\n\n def getCharString(self, private=None, globalSubrs=None):\n program = self._program + [\"endchar\"]\n charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)\n return charString\n", "path": "Lib/fontTools/pens/t2CharStringPen.py"}]}
1,991
966
gh_patches_debug_17604
rasdani/github-patches
git_diff
DataDog__dd-trace-py-4250
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cannot call sqlite3.backup(db) on a TracedSQLite object Thanks for taking the time for reporting an issue! Before reporting an issue on dd-trace-py, please be sure to provide all necessary information. If you're hitting a bug, make sure that you're using the latest version of this library. ### Which version of dd-trace-py are you using? 1.5.0 ### Which version of pip are you using? 21.1.1 _ddtrace requires pip>=18 to install one of our pre-built wheels_ ### Which version of the libraries are you using? You can copy/paste the output of `pip freeze` here. ``` ddtrace==1.5.0 ``` ### How can we reproduce your problem? ``` from ddtrace import config, patch_all import sqlite3 config.env = "test" # the environment the application is in config.service = "app" # name of your application config.version = "v1" # version of your application patch_all() src = sqlite3.connect("1.db") dst = sqlite3.connect("2.db") with dst: src.backup(dst, pages=1) dst.close() src.close() ``` ### What is the result that you get? The following TypeError ``` TypeError: backup() argument 1 must be sqlite3.Connection, not TracedSQLite ``` ### What is the result that you expected? The function should succeed without error. </issue> <code> [start of ddtrace/contrib/sqlite3/patch.py] 1 import os 2 import sqlite3 3 import sqlite3.dbapi2 4 5 from ddtrace import config 6 from ddtrace.vendor import wrapt 7 8 from ...contrib.dbapi import FetchTracedCursor 9 from ...contrib.dbapi import TracedConnection 10 from ...contrib.dbapi import TracedCursor 11 from ...internal.utils.formats import asbool 12 from ...pin import Pin 13 14 15 # Original connect method 16 _connect = sqlite3.connect 17 18 config._add( 19 "sqlite", 20 dict( 21 _default_service="sqlite", 22 _dbapi_span_name_prefix="sqlite", 23 trace_fetch_methods=asbool(os.getenv("DD_SQLITE_TRACE_FETCH_METHODS", default=False)), 24 ), 25 ) 26 27 28 def patch(): 29 wrapped = wrapt.FunctionWrapper(_connect, traced_connect) 30 31 setattr(sqlite3, "connect", wrapped) 32 setattr(sqlite3.dbapi2, "connect", wrapped) 33 34 35 def unpatch(): 36 sqlite3.connect = _connect 37 sqlite3.dbapi2.connect = _connect 38 39 40 def traced_connect(func, _, args, kwargs): 41 conn = func(*args, **kwargs) 42 return patch_conn(conn) 43 44 45 def patch_conn(conn): 46 wrapped = TracedSQLite(conn) 47 Pin().onto(wrapped) 48 return wrapped 49 50 51 class TracedSQLiteCursor(TracedCursor): 52 def executemany(self, *args, **kwargs): 53 # DEV: SQLite3 Cursor.execute always returns back the cursor instance 54 super(TracedSQLiteCursor, self).executemany(*args, **kwargs) 55 return self 56 57 def execute(self, *args, **kwargs): 58 # DEV: SQLite3 Cursor.execute always returns back the cursor instance 59 super(TracedSQLiteCursor, self).execute(*args, **kwargs) 60 return self 61 62 63 class TracedSQLiteFetchCursor(TracedSQLiteCursor, FetchTracedCursor): 64 pass 65 66 67 class TracedSQLite(TracedConnection): 68 def __init__(self, conn, pin=None, cursor_cls=None): 69 if not cursor_cls: 70 # Do not trace `fetch*` methods by default 71 cursor_cls = TracedSQLiteFetchCursor if config.sqlite.trace_fetch_methods else TracedSQLiteCursor 72 73 super(TracedSQLite, self).__init__(conn, pin=pin, cfg=config.sqlite, cursor_cls=cursor_cls) 74 75 def execute(self, *args, **kwargs): 76 # sqlite has a few extra sugar functions 77 return self.cursor().execute(*args, **kwargs) 78 [end of ddtrace/contrib/sqlite3/patch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ddtrace/contrib/sqlite3/patch.py b/ddtrace/contrib/sqlite3/patch.py --- a/ddtrace/contrib/sqlite3/patch.py +++ b/ddtrace/contrib/sqlite3/patch.py @@ -1,6 +1,7 @@ import os import sqlite3 import sqlite3.dbapi2 +import sys from ddtrace import config from ddtrace.vendor import wrapt @@ -75,3 +76,13 @@ def execute(self, *args, **kwargs): # sqlite has a few extra sugar functions return self.cursor().execute(*args, **kwargs) + + # backup was added in Python 3.7 + if sys.version_info >= (3, 7, 0): + + def backup(self, target, *args, **kwargs): + # sqlite3 checks the type of `target`, it cannot be a wrapped connection + # https://github.com/python/cpython/blob/4652093e1b816b78e9a585d671a807ce66427417/Modules/_sqlite/connection.c#L1897-L1899 + if isinstance(target, TracedConnection): + target = target.__wrapped__ + return self.__wrapped__.backup(target, *args, **kwargs)
{"golden_diff": "diff --git a/ddtrace/contrib/sqlite3/patch.py b/ddtrace/contrib/sqlite3/patch.py\n--- a/ddtrace/contrib/sqlite3/patch.py\n+++ b/ddtrace/contrib/sqlite3/patch.py\n@@ -1,6 +1,7 @@\n import os\n import sqlite3\n import sqlite3.dbapi2\n+import sys\n \n from ddtrace import config\n from ddtrace.vendor import wrapt\n@@ -75,3 +76,13 @@\n def execute(self, *args, **kwargs):\n # sqlite has a few extra sugar functions\n return self.cursor().execute(*args, **kwargs)\n+\n+ # backup was added in Python 3.7\n+ if sys.version_info >= (3, 7, 0):\n+\n+ def backup(self, target, *args, **kwargs):\n+ # sqlite3 checks the type of `target`, it cannot be a wrapped connection\n+ # https://github.com/python/cpython/blob/4652093e1b816b78e9a585d671a807ce66427417/Modules/_sqlite/connection.c#L1897-L1899\n+ if isinstance(target, TracedConnection):\n+ target = target.__wrapped__\n+ return self.__wrapped__.backup(target, *args, **kwargs)\n", "issue": "Cannot call sqlite3.backup(db) on a TracedSQLite object\nThanks for taking the time for reporting an issue!\r\n\r\nBefore reporting an issue on dd-trace-py, please be sure to provide all\r\nnecessary information.\r\n\r\nIf you're hitting a bug, make sure that you're using the latest version of this\r\nlibrary.\r\n\r\n### Which version of dd-trace-py are you using?\r\n1.5.0\r\n### Which version of pip are you using?\r\n21.1.1\r\n_ddtrace requires pip>=18 to install one of our pre-built wheels_\r\n\r\n### Which version of the libraries are you using?\r\n\r\nYou can copy/paste the output of `pip freeze` here.\r\n\r\n```\r\nddtrace==1.5.0\r\n```\r\n\r\n### How can we reproduce your problem?\r\n\r\n```\r\nfrom ddtrace import config, patch_all\r\nimport sqlite3\r\n\r\nconfig.env = \"test\" # the environment the application is in\r\nconfig.service = \"app\" # name of your application\r\nconfig.version = \"v1\" # version of your application\r\npatch_all()\r\n\r\nsrc = sqlite3.connect(\"1.db\")\r\ndst = sqlite3.connect(\"2.db\")\r\nwith dst:\r\n src.backup(dst, pages=1)\r\ndst.close()\r\nsrc.close()\r\n```\r\n\r\n### What is the result that you get?\r\n\r\nThe following TypeError\r\n```\r\nTypeError: backup() argument 1 must be sqlite3.Connection, not TracedSQLite\r\n```\r\n\r\n### What is the result that you expected?\r\n\r\nThe function should succeed without error.\r\n\n", "before_files": [{"content": "import os\nimport sqlite3\nimport sqlite3.dbapi2\n\nfrom ddtrace import config\nfrom ddtrace.vendor import wrapt\n\nfrom ...contrib.dbapi import FetchTracedCursor\nfrom ...contrib.dbapi import TracedConnection\nfrom ...contrib.dbapi import TracedCursor\nfrom ...internal.utils.formats import asbool\nfrom ...pin import Pin\n\n\n# Original connect method\n_connect = sqlite3.connect\n\nconfig._add(\n \"sqlite\",\n dict(\n _default_service=\"sqlite\",\n _dbapi_span_name_prefix=\"sqlite\",\n trace_fetch_methods=asbool(os.getenv(\"DD_SQLITE_TRACE_FETCH_METHODS\", default=False)),\n ),\n)\n\n\ndef patch():\n wrapped = wrapt.FunctionWrapper(_connect, traced_connect)\n\n setattr(sqlite3, \"connect\", wrapped)\n setattr(sqlite3.dbapi2, \"connect\", wrapped)\n\n\ndef unpatch():\n sqlite3.connect = _connect\n sqlite3.dbapi2.connect = _connect\n\n\ndef traced_connect(func, _, args, kwargs):\n conn = func(*args, **kwargs)\n return patch_conn(conn)\n\n\ndef patch_conn(conn):\n wrapped = TracedSQLite(conn)\n Pin().onto(wrapped)\n return wrapped\n\n\nclass TracedSQLiteCursor(TracedCursor):\n def executemany(self, *args, **kwargs):\n # DEV: SQLite3 Cursor.execute always returns back the cursor instance\n super(TracedSQLiteCursor, self).executemany(*args, **kwargs)\n return self\n\n def execute(self, *args, **kwargs):\n # DEV: SQLite3 Cursor.execute always returns back the cursor instance\n super(TracedSQLiteCursor, self).execute(*args, **kwargs)\n return self\n\n\nclass TracedSQLiteFetchCursor(TracedSQLiteCursor, FetchTracedCursor):\n pass\n\n\nclass TracedSQLite(TracedConnection):\n def __init__(self, conn, pin=None, cursor_cls=None):\n if not cursor_cls:\n # Do not trace `fetch*` methods by default\n cursor_cls = TracedSQLiteFetchCursor if config.sqlite.trace_fetch_methods else TracedSQLiteCursor\n\n super(TracedSQLite, self).__init__(conn, pin=pin, cfg=config.sqlite, cursor_cls=cursor_cls)\n\n def execute(self, *args, **kwargs):\n # sqlite has a few extra sugar functions\n return self.cursor().execute(*args, **kwargs)\n", "path": "ddtrace/contrib/sqlite3/patch.py"}]}
1,541
308
gh_patches_debug_7048
rasdani/github-patches
git_diff
airctic__icevision-821
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Adding Non Unique Names To ClassMap Must Be Illegal Currently, `add_name` doesn't check if the added `name` is a duplicate or not. This should be illegal, as classes must be unique https://github.com/airctic/icevision/blob/82f3c7322d5adbc41d6a1b7b9e4327eb32ad23c4/icevision/core/class_map.py#L47-L51 </issue> <code> [start of icevision/core/class_map.py] 1 __all__ = ["ClassMap", "BACKGROUND"] 2 3 from icevision.imports import * 4 5 BACKGROUND = "background" 6 7 8 class ClassMap: 9 """Utility class for mapping between class name and id.""" 10 11 def __init__( 12 self, 13 classes: Optional[Sequence[str]] = None, 14 background: Optional[str] = BACKGROUND, 15 ): 16 self._lock = True 17 18 self._id2class = copy(list(classes)) if classes else [] 19 # insert background if required 20 self._background = background 21 if self._background is not None: 22 try: 23 self._id2class.remove(self._background) 24 except ValueError: 25 pass 26 # background is always index zero 27 self._id2class.insert(0, self._background) 28 29 self._class2id = {name: i for i, name in enumerate(self._id2class)} 30 31 @property 32 def num_classes(self): 33 return len(self) 34 35 def get_by_id(self, id: int) -> str: 36 return self._id2class[id] 37 38 def get_by_name(self, name: str) -> int: 39 try: 40 return self._class2id[name] 41 except KeyError as e: 42 if not self._lock: 43 return self.add_name(name) 44 else: 45 raise e 46 47 def add_name(self, name) -> int: 48 self._id2class.append(name) 49 id = len(self._class2id) 50 self._class2id[name] = id 51 return id 52 53 def lock(self): 54 self._lock = True 55 return self 56 57 def unlock(self): 58 self._lock = False 59 return self 60 61 def __eq__(self, other) -> bool: 62 if isinstance(other, ClassMap): 63 return self.__dict__ == other.__dict__ 64 return False 65 66 def __len__(self): 67 return len(self._id2class) 68 69 def __repr__(self): 70 return f"<ClassMap: {self._class2id.__repr__()}>" 71 [end of icevision/core/class_map.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/icevision/core/class_map.py b/icevision/core/class_map.py --- a/icevision/core/class_map.py +++ b/icevision/core/class_map.py @@ -44,7 +44,13 @@ else: raise e - def add_name(self, name) -> int: + def add_name(self, name: str) -> int: + # Raise error if trying to add duplicate value + if name in self._id2class: + raise ValueError( + f"'{name}' already exists in the ClassMap. You can only add new labels that are unique" + ) + self._id2class.append(name) id = len(self._class2id) self._class2id[name] = id
{"golden_diff": "diff --git a/icevision/core/class_map.py b/icevision/core/class_map.py\n--- a/icevision/core/class_map.py\n+++ b/icevision/core/class_map.py\n@@ -44,7 +44,13 @@\n else:\n raise e\n \n- def add_name(self, name) -> int:\n+ def add_name(self, name: str) -> int:\n+ # Raise error if trying to add duplicate value\n+ if name in self._id2class:\n+ raise ValueError(\n+ f\"'{name}' already exists in the ClassMap. You can only add new labels that are unique\"\n+ )\n+\n self._id2class.append(name)\n id = len(self._class2id)\n self._class2id[name] = id\n", "issue": "Adding Non Unique Names To ClassMap Must Be Illegal\nCurrently, `add_name` doesn't check if the added `name` is a duplicate or not. This should be illegal, as classes must be unique\r\n\r\nhttps://github.com/airctic/icevision/blob/82f3c7322d5adbc41d6a1b7b9e4327eb32ad23c4/icevision/core/class_map.py#L47-L51\n", "before_files": [{"content": "__all__ = [\"ClassMap\", \"BACKGROUND\"]\n\nfrom icevision.imports import *\n\nBACKGROUND = \"background\"\n\n\nclass ClassMap:\n \"\"\"Utility class for mapping between class name and id.\"\"\"\n\n def __init__(\n self,\n classes: Optional[Sequence[str]] = None,\n background: Optional[str] = BACKGROUND,\n ):\n self._lock = True\n\n self._id2class = copy(list(classes)) if classes else []\n # insert background if required\n self._background = background\n if self._background is not None:\n try:\n self._id2class.remove(self._background)\n except ValueError:\n pass\n # background is always index zero\n self._id2class.insert(0, self._background)\n\n self._class2id = {name: i for i, name in enumerate(self._id2class)}\n\n @property\n def num_classes(self):\n return len(self)\n\n def get_by_id(self, id: int) -> str:\n return self._id2class[id]\n\n def get_by_name(self, name: str) -> int:\n try:\n return self._class2id[name]\n except KeyError as e:\n if not self._lock:\n return self.add_name(name)\n else:\n raise e\n\n def add_name(self, name) -> int:\n self._id2class.append(name)\n id = len(self._class2id)\n self._class2id[name] = id\n return id\n\n def lock(self):\n self._lock = True\n return self\n\n def unlock(self):\n self._lock = False\n return self\n\n def __eq__(self, other) -> bool:\n if isinstance(other, ClassMap):\n return self.__dict__ == other.__dict__\n return False\n\n def __len__(self):\n return len(self._id2class)\n\n def __repr__(self):\n return f\"<ClassMap: {self._class2id.__repr__()}>\"\n", "path": "icevision/core/class_map.py"}]}
1,217
173
gh_patches_debug_42527
rasdani/github-patches
git_diff
conan-io__conan-center-index-2077
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [request] cpp-taskflow/2.5.0 ### Package Details * Package Name/Version: **cpp-taskflow/2.5.0** * Changelog: **https://taskflow.github.io/taskflow/release-2-5-0.html** Please note that `cpp-taskflow` is renamed to `taskflow`. So new recipe shall be created. P.S. Can you tell me please, why recipes is not updated automatically, when new library version is released? Lask of automation? Will it be fixed in future conan center pipeline? </issue> <code> [start of recipes/taskflow/all/conanfile.py] 1 from conans import ConanFile, tools 2 from conans.errors import ConanInvalidConfiguration 3 from conans.model.version import Version 4 import os 5 6 7 class TaskflowConan(ConanFile): 8 name = "taskflow" 9 description = "A fast C++ header-only library to help you quickly write parallel programs with complex task dependencies." 10 topics = ("conan", "taskflow", "tasking", "parallelism") 11 url = "https://github.com/conan-io/conan-center-index" 12 homepage = "https://github.com/taskflow/taskflow" 13 license = "MIT" 14 15 no_copy_source = True 16 17 settings = "os", "compiler" 18 19 _source_subfolder = "source_subfolder" 20 21 def configure(self): 22 compiler = str(self.settings.compiler) 23 compiler_version = tools.Version(self.settings.compiler.version) 24 min_req_cppstd = "17" if tools.Version(self.version) <= "2.2.0" else "14" 25 26 if self.settings.compiler.cppstd: 27 tools.check_min_cppstd(self, min_req_cppstd) 28 else: 29 self.output.warn("%s recipe lacks information about the %s compiler" 30 " standard version support" % (self.name, compiler)) 31 32 minimal_version = { 33 "17" : { 34 "Visual Studio": "16", 35 "gcc": "7.3", 36 "clang": "6.0", 37 "apple-clang": "10.0" 38 }, 39 "14" : { 40 "Visual Studio": "15", 41 "gcc": "5", 42 "clang": "4.0", 43 "apple-clang": "8.0" 44 } 45 } 46 47 if compiler not in minimal_version[min_req_cppstd]: 48 self.output.info("%s requires a compiler that supports at least C++%s" % (self.name, min_req_cppstd)) 49 return 50 51 # Exclude compilers not supported by taskflow 52 if compiler_version < minimal_version[min_req_cppstd][compiler]: 53 raise ConanInvalidConfiguration("%s requires a compiler that supports" 54 " at least C++%s. %s %s is not" 55 " supported." % (self.name, min_req_cppstd, compiler, Version(self.settings.compiler.version.value))) 56 57 def source(self): 58 tools.get(**self.conan_data["sources"][self.version]) 59 extracted_dir = self.name + "-" + self.version 60 os.rename(extracted_dir, self._source_subfolder) 61 62 def package(self): 63 self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder) 64 self.copy(pattern="*", dst="include/taskflow", src=os.path.join(self._source_subfolder, "taskflow")) 65 66 def package_id(self): 67 self.info.header_only() 68 69 def package_info(self): 70 if self.settings.os == "Linux": 71 self.cpp_info.system_libs.append("pthread") 72 if self.settings.compiler == "Visual Studio": 73 self.cpp_info.defines.append("_ENABLE_EXTENDED_ALIGNED_STORAGE") 74 75 [end of recipes/taskflow/all/conanfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/recipes/taskflow/all/conanfile.py b/recipes/taskflow/all/conanfile.py --- a/recipes/taskflow/all/conanfile.py +++ b/recipes/taskflow/all/conanfile.py @@ -1,8 +1,7 @@ -from conans import ConanFile, tools -from conans.errors import ConanInvalidConfiguration -from conans.model.version import Version import os +from conans import ConanFile, tools +from conans.errors import ConanInvalidConfiguration class TaskflowConan(ConanFile): name = "taskflow" @@ -16,43 +15,41 @@ settings = "os", "compiler" - _source_subfolder = "source_subfolder" + @property + def _source_subfolder(self): + return "source_subfolder" def configure(self): - compiler = str(self.settings.compiler) - compiler_version = tools.Version(self.settings.compiler.version) - min_req_cppstd = "17" if tools.Version(self.version) <= "2.2.0" else "14" + minimal_cpp_standard = "17" if tools.Version(self.version) <= "2.2.0" else "14" if self.settings.compiler.cppstd: - tools.check_min_cppstd(self, min_req_cppstd) - else: - self.output.warn("%s recipe lacks information about the %s compiler" - " standard version support" % (self.name, compiler)) + tools.check_min_cppstd(self, minimal_cpp_standard) minimal_version = { - "17" : { + "17": { "Visual Studio": "16", "gcc": "7.3", "clang": "6.0", "apple-clang": "10.0" }, - "14" : { + "14": { "Visual Studio": "15", "gcc": "5", "clang": "4.0", "apple-clang": "8.0" } - } + }[minimal_cpp_standard] - if compiler not in minimal_version[min_req_cppstd]: - self.output.info("%s requires a compiler that supports at least C++%s" % (self.name, min_req_cppstd)) + compiler = str(self.settings.compiler) + if compiler not in minimal_version: + self.output.warn( + "%s recipe lacks information about the %s compiler standard version support" % (self.name, compiler)) + self.output.warn( + "%s requires a compiler that supports at least C++%s" % (self.name, minimal_cpp_standard)) return - - # Exclude compilers not supported by taskflow - if compiler_version < minimal_version[min_req_cppstd][compiler]: - raise ConanInvalidConfiguration("%s requires a compiler that supports" - " at least C++%s. %s %s is not" - " supported." % (self.name, min_req_cppstd, compiler, Version(self.settings.compiler.version.value))) + version = tools.Version(self.settings.compiler.version) + if version < minimal_version[compiler]: + raise ConanInvalidConfiguration("%s requires a compiler that supports at least C++%s" % (self.name, minimal_cpp_standard)) def source(self): tools.get(**self.conan_data["sources"][self.version]) @@ -61,7 +58,7 @@ def package(self): self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder) - self.copy(pattern="*", dst="include/taskflow", src=os.path.join(self._source_subfolder, "taskflow")) + self.copy(pattern="*", dst=os.path.join("include", "taskflow"), src=os.path.join(self._source_subfolder, "taskflow")) def package_id(self): self.info.header_only() @@ -71,4 +68,5 @@ self.cpp_info.system_libs.append("pthread") if self.settings.compiler == "Visual Studio": self.cpp_info.defines.append("_ENABLE_EXTENDED_ALIGNED_STORAGE") - + self.cpp_info.names["cmake_find_package"] = "Taskflow" + self.cpp_info.names["cmake_find_package_multi"] = "Taskflow"
{"golden_diff": "diff --git a/recipes/taskflow/all/conanfile.py b/recipes/taskflow/all/conanfile.py\n--- a/recipes/taskflow/all/conanfile.py\n+++ b/recipes/taskflow/all/conanfile.py\n@@ -1,8 +1,7 @@\n-from conans import ConanFile, tools\n-from conans.errors import ConanInvalidConfiguration\n-from conans.model.version import Version\n import os\n \n+from conans import ConanFile, tools\n+from conans.errors import ConanInvalidConfiguration\n \n class TaskflowConan(ConanFile):\n name = \"taskflow\"\n@@ -16,43 +15,41 @@\n \n settings = \"os\", \"compiler\"\n \n- _source_subfolder = \"source_subfolder\"\n+ @property\n+ def _source_subfolder(self):\n+ return \"source_subfolder\"\n \n def configure(self):\n- compiler = str(self.settings.compiler)\n- compiler_version = tools.Version(self.settings.compiler.version)\n- min_req_cppstd = \"17\" if tools.Version(self.version) <= \"2.2.0\" else \"14\"\n+ minimal_cpp_standard = \"17\" if tools.Version(self.version) <= \"2.2.0\" else \"14\"\n \n if self.settings.compiler.cppstd:\n- tools.check_min_cppstd(self, min_req_cppstd)\n- else:\n- self.output.warn(\"%s recipe lacks information about the %s compiler\"\n- \" standard version support\" % (self.name, compiler))\n+ tools.check_min_cppstd(self, minimal_cpp_standard)\n \n minimal_version = {\n- \"17\" : {\n+ \"17\": {\n \"Visual Studio\": \"16\",\n \"gcc\": \"7.3\",\n \"clang\": \"6.0\",\n \"apple-clang\": \"10.0\"\n },\n- \"14\" : {\n+ \"14\": {\n \"Visual Studio\": \"15\",\n \"gcc\": \"5\",\n \"clang\": \"4.0\",\n \"apple-clang\": \"8.0\"\n }\n- }\n+ }[minimal_cpp_standard]\n \n- if compiler not in minimal_version[min_req_cppstd]:\n- self.output.info(\"%s requires a compiler that supports at least C++%s\" % (self.name, min_req_cppstd))\n+ compiler = str(self.settings.compiler)\n+ if compiler not in minimal_version:\n+ self.output.warn(\n+ \"%s recipe lacks information about the %s compiler standard version support\" % (self.name, compiler))\n+ self.output.warn(\n+ \"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n return\n-\n- # Exclude compilers not supported by taskflow\n- if compiler_version < minimal_version[min_req_cppstd][compiler]:\n- raise ConanInvalidConfiguration(\"%s requires a compiler that supports\"\n- \" at least C++%s. %s %s is not\"\n- \" supported.\" % (self.name, min_req_cppstd, compiler, Version(self.settings.compiler.version.value)))\n+ version = tools.Version(self.settings.compiler.version)\n+ if version < minimal_version[compiler]:\n+ raise ConanInvalidConfiguration(\"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n \n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n@@ -61,7 +58,7 @@\n \n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n- self.copy(pattern=\"*\", dst=\"include/taskflow\", src=os.path.join(self._source_subfolder, \"taskflow\"))\n+ self.copy(pattern=\"*\", dst=os.path.join(\"include\", \"taskflow\"), src=os.path.join(self._source_subfolder, \"taskflow\"))\n \n def package_id(self):\n self.info.header_only()\n@@ -71,4 +68,5 @@\n self.cpp_info.system_libs.append(\"pthread\")\n if self.settings.compiler == \"Visual Studio\":\n self.cpp_info.defines.append(\"_ENABLE_EXTENDED_ALIGNED_STORAGE\")\n-\n+ self.cpp_info.names[\"cmake_find_package\"] = \"Taskflow\"\n+ self.cpp_info.names[\"cmake_find_package_multi\"] = \"Taskflow\"\n", "issue": "[request] cpp-taskflow/2.5.0\n### Package Details\r\n * Package Name/Version: **cpp-taskflow/2.5.0**\r\n * Changelog: **https://taskflow.github.io/taskflow/release-2-5-0.html**\r\n\r\n\r\nPlease note that `cpp-taskflow` is renamed to `taskflow`. So new recipe shall be created.\r\n\r\nP.S. Can you tell me please, why recipes is not updated automatically, when new library version is released? Lask of automation? Will it be fixed in future conan center pipeline?\r\n\n", "before_files": [{"content": "from conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\nfrom conans.model.version import Version\nimport os\n\n\nclass TaskflowConan(ConanFile):\n name = \"taskflow\"\n description = \"A fast C++ header-only library to help you quickly write parallel programs with complex task dependencies.\"\n topics = (\"conan\", \"taskflow\", \"tasking\", \"parallelism\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/taskflow/taskflow\"\n license = \"MIT\"\n\n no_copy_source = True\n\n settings = \"os\", \"compiler\"\n\n _source_subfolder = \"source_subfolder\"\n\n def configure(self):\n compiler = str(self.settings.compiler)\n compiler_version = tools.Version(self.settings.compiler.version)\n min_req_cppstd = \"17\" if tools.Version(self.version) <= \"2.2.0\" else \"14\"\n\n if self.settings.compiler.cppstd:\n tools.check_min_cppstd(self, min_req_cppstd)\n else:\n self.output.warn(\"%s recipe lacks information about the %s compiler\"\n \" standard version support\" % (self.name, compiler))\n\n minimal_version = {\n \"17\" : {\n \"Visual Studio\": \"16\",\n \"gcc\": \"7.3\",\n \"clang\": \"6.0\",\n \"apple-clang\": \"10.0\"\n },\n \"14\" : {\n \"Visual Studio\": \"15\",\n \"gcc\": \"5\",\n \"clang\": \"4.0\",\n \"apple-clang\": \"8.0\"\n }\n }\n\n if compiler not in minimal_version[min_req_cppstd]:\n self.output.info(\"%s requires a compiler that supports at least C++%s\" % (self.name, min_req_cppstd))\n return\n\n # Exclude compilers not supported by taskflow\n if compiler_version < minimal_version[min_req_cppstd][compiler]:\n raise ConanInvalidConfiguration(\"%s requires a compiler that supports\"\n \" at least C++%s. %s %s is not\"\n \" supported.\" % (self.name, min_req_cppstd, compiler, Version(self.settings.compiler.version.value)))\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(pattern=\"*\", dst=\"include/taskflow\", src=os.path.join(self._source_subfolder, \"taskflow\"))\n\n def package_id(self):\n self.info.header_only()\n\n def package_info(self):\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"pthread\")\n if self.settings.compiler == \"Visual Studio\":\n self.cpp_info.defines.append(\"_ENABLE_EXTENDED_ALIGNED_STORAGE\")\n\n", "path": "recipes/taskflow/all/conanfile.py"}]}
1,449
933
gh_patches_debug_3808
rasdani/github-patches
git_diff
ManimCommunity__manim-2197
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create a test for resolution configuration ## Enhancement proposal A test for the `--resolution` configuration option. It should be placed [here](https://github.com/ManimCommunity/manim/blob/main/tests/test_scene_rendering/test_cli_flags.py) and [here](https://github.com/ManimCommunity/manim/blob/main/tests/test_scene_rendering/opengl/test_cli_flags_opengl.py) (the OpenGL test can be marked as `xfail` if setting resolution does not work properly on OpenGL). Test different resolutions as well as different ways of separating the resolutions (width and height separated by `;`, `,`, and `-`). ## Additional comments <!-- Add further context that you think might be relevant. --> </issue> <code> [start of manim/utils/commands.py] 1 import json 2 import os 3 from subprocess import run 4 from typing import Any, Dict, List 5 6 __all__ = [ 7 "capture", 8 "get_video_metadata", 9 "get_dir_layout", 10 ] 11 12 13 def capture(command, cwd=None, command_input=None): 14 p = run(command, cwd=cwd, input=command_input, capture_output=True, text=True) 15 out, err = p.stdout, p.stderr 16 return out, err, p.returncode 17 18 19 def get_video_metadata(path_to_video: str) -> Dict[str, Any]: 20 command = [ 21 "ffprobe", 22 "-v", 23 "error", 24 "-select_streams", 25 "v:0", 26 "-show_entries", 27 "stream=width,height,nb_frames,duration,avg_frame_rate,codec_name", 28 "-print_format", 29 "json", 30 path_to_video, 31 ] 32 config, err, exitcode = capture(command) 33 assert exitcode == 0, f"FFprobe error: {err}" 34 return json.loads(config)["streams"][0] 35 36 37 def get_dir_layout(dirpath: str) -> List[str]: 38 """Get list of paths relative to dirpath of all files in dir and subdirs recursively.""" 39 index_files: List[str] = [] 40 for root, dirs, files in os.walk(dirpath): 41 for file in files: 42 index_files.append(f"{os.path.relpath(os.path.join(root, file), dirpath)}") 43 return index_files 44 [end of manim/utils/commands.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/manim/utils/commands.py b/manim/utils/commands.py --- a/manim/utils/commands.py +++ b/manim/utils/commands.py @@ -27,7 +27,7 @@ "stream=width,height,nb_frames,duration,avg_frame_rate,codec_name", "-print_format", "json", - path_to_video, + str(path_to_video), ] config, err, exitcode = capture(command) assert exitcode == 0, f"FFprobe error: {err}"
{"golden_diff": "diff --git a/manim/utils/commands.py b/manim/utils/commands.py\n--- a/manim/utils/commands.py\n+++ b/manim/utils/commands.py\n@@ -27,7 +27,7 @@\n \"stream=width,height,nb_frames,duration,avg_frame_rate,codec_name\",\n \"-print_format\",\n \"json\",\n- path_to_video,\n+ str(path_to_video),\n ]\n config, err, exitcode = capture(command)\n assert exitcode == 0, f\"FFprobe error: {err}\"\n", "issue": "Create a test for resolution configuration\n## Enhancement proposal\r\nA test for the `--resolution` configuration option. It should be placed [here](https://github.com/ManimCommunity/manim/blob/main/tests/test_scene_rendering/test_cli_flags.py) and [here](https://github.com/ManimCommunity/manim/blob/main/tests/test_scene_rendering/opengl/test_cli_flags_opengl.py) (the OpenGL test can be marked as `xfail` if setting resolution does not work properly on OpenGL). Test different resolutions as well as different ways of separating the resolutions (width and height separated by `;`, `,`, and `-`).\r\n\r\n## Additional comments\r\n<!-- Add further context that you think might be relevant. -->\r\n\n", "before_files": [{"content": "import json\nimport os\nfrom subprocess import run\nfrom typing import Any, Dict, List\n\n__all__ = [\n \"capture\",\n \"get_video_metadata\",\n \"get_dir_layout\",\n]\n\n\ndef capture(command, cwd=None, command_input=None):\n p = run(command, cwd=cwd, input=command_input, capture_output=True, text=True)\n out, err = p.stdout, p.stderr\n return out, err, p.returncode\n\n\ndef get_video_metadata(path_to_video: str) -> Dict[str, Any]:\n command = [\n \"ffprobe\",\n \"-v\",\n \"error\",\n \"-select_streams\",\n \"v:0\",\n \"-show_entries\",\n \"stream=width,height,nb_frames,duration,avg_frame_rate,codec_name\",\n \"-print_format\",\n \"json\",\n path_to_video,\n ]\n config, err, exitcode = capture(command)\n assert exitcode == 0, f\"FFprobe error: {err}\"\n return json.loads(config)[\"streams\"][0]\n\n\ndef get_dir_layout(dirpath: str) -> List[str]:\n \"\"\"Get list of paths relative to dirpath of all files in dir and subdirs recursively.\"\"\"\n index_files: List[str] = []\n for root, dirs, files in os.walk(dirpath):\n for file in files:\n index_files.append(f\"{os.path.relpath(os.path.join(root, file), dirpath)}\")\n return index_files\n", "path": "manim/utils/commands.py"}]}
1,077
120
gh_patches_debug_18067
rasdani/github-patches
git_diff
ivy-llc__ivy-13637
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> beta </issue> <code> [start of ivy/functional/frontends/jax/random.py] 1 # local 2 import ivy 3 from ivy.functional.frontends.jax.func_wrapper import ( 4 to_ivy_arrays_and_back, 5 handle_jax_dtype, 6 ) 7 8 9 @to_ivy_arrays_and_back 10 def PRNGKey(seed): 11 return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64) 12 13 14 @handle_jax_dtype 15 @to_ivy_arrays_and_back 16 def uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0): 17 return ivy.random_uniform( 18 low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]) 19 ) 20 21 22 @handle_jax_dtype 23 @to_ivy_arrays_and_back 24 def normal(key, shape=(), dtype=None): 25 return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])) 26 [end of ivy/functional/frontends/jax/random.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py --- a/ivy/functional/frontends/jax/random.py +++ b/ivy/functional/frontends/jax/random.py @@ -1,5 +1,6 @@ # local import ivy +from ivy.func_wrapper import with_unsupported_dtypes from ivy.functional.frontends.jax.func_wrapper import ( to_ivy_arrays_and_back, handle_jax_dtype, @@ -23,3 +24,24 @@ @to_ivy_arrays_and_back def normal(key, shape=(), dtype=None): return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])) + + +def _get_seed(key): + key1, key2 = int(key[0]), int(key[1]) + return ivy.to_scalar(int("".join(map(str, [key1, key2])))) + + +@handle_jax_dtype +@to_ivy_arrays_and_back +@with_unsupported_dtypes( + { + "0.3.14 and below": ( + "float16", + "bfloat16", + ) + }, + "jax", +) +def beta(key, a, b, shape=None, dtype=None): + seed = _get_seed(key) + return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)
{"golden_diff": "diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py\n--- a/ivy/functional/frontends/jax/random.py\n+++ b/ivy/functional/frontends/jax/random.py\n@@ -1,5 +1,6 @@\n # local\n import ivy\n+from ivy.func_wrapper import with_unsupported_dtypes\n from ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_jax_dtype,\n@@ -23,3 +24,24 @@\n @to_ivy_arrays_and_back\n def normal(key, shape=(), dtype=None):\n return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))\n+\n+\n+def _get_seed(key):\n+ key1, key2 = int(key[0]), int(key[1])\n+ return ivy.to_scalar(int(\"\".join(map(str, [key1, key2]))))\n+\n+\n+@handle_jax_dtype\n+@to_ivy_arrays_and_back\n+@with_unsupported_dtypes(\n+ {\n+ \"0.3.14 and below\": (\n+ \"float16\",\n+ \"bfloat16\",\n+ )\n+ },\n+ \"jax\",\n+)\n+def beta(key, a, b, shape=None, dtype=None):\n+ seed = _get_seed(key)\n+ return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)\n", "issue": "beta\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_jax_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef PRNGKey(seed):\n return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):\n return ivy.random_uniform(\n low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])\n )\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef normal(key, shape=(), dtype=None):\n return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))\n", "path": "ivy/functional/frontends/jax/random.py"}]}
809
328
gh_patches_debug_28361
rasdani/github-patches
git_diff
getsentry__sentry-41334
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pip-compile and CUSTOM_COMPILE_COMMAND ### Environment SaaS (https://sentry.io/) ### Version _No response_ ### Steps to Reproduce JFYI https://github.com/getsentry/sentry/blob/2e36d4d3a3926e9efe06287e59d27be1c40300fa/tools/freeze_requirements.py#L13 There is `CUSTOM_COMPILE_COMMAND` env to [customize](https://github.com/jazzband/pip-tools#configuration) the header: ``` $ CUSTOM_COMPILE_COMMAND="make freeze-requirements" pip-compile # # This file is autogenerated by pip-compile with python 3.11 # To update, run: # # make freeze-requirements # .... ``` ### Expected Result N/A ### Actual Result N/A </issue> <code> [start of tools/freeze_requirements.py] 1 from __future__ import annotations 2 3 import argparse 4 from concurrent.futures import Future, ThreadPoolExecutor 5 from os.path import abspath 6 from subprocess import CalledProcessError, run 7 from typing import Sequence 8 9 from tools.lib import gitroot 10 11 12 def worker(args: tuple[str, ...]) -> None: 13 # pip-compile doesn't let you customize the header, so we write 14 # one ourselves. However, pip-compile needs -o DEST otherwise 15 # it will bump >= pins even if they're satisfied. So, we need to 16 # unfortunately rewrite the whole file. 17 dest = args[-1] 18 try: 19 run(args, check=True, capture_output=True) 20 except CalledProcessError as e: 21 raise e 22 23 with open(dest, "rb+") as f: 24 content = f.read() 25 f.seek(0, 0) 26 f.write( 27 b"""# DO NOT MODIFY. This file was generated with `make freeze-requirements`. 28 29 """ 30 + content 31 ) 32 33 34 def check_futures(futures: list[Future[None]]) -> int: 35 rc = 0 36 for future in futures: 37 try: 38 future.result() 39 except CalledProcessError as e: 40 rc = 1 41 print( 42 f"""`{e.cmd}` returned code {e.returncode} 43 44 stdout: 45 {e.stdout.decode()} 46 47 stderr: 48 {e.stderr.decode()} 49 """ 50 ) 51 return rc 52 53 54 def main(argv: Sequence[str] | None = None) -> int: 55 parser = argparse.ArgumentParser() 56 parser.parse_args(argv) 57 58 base_path = abspath(gitroot()) 59 60 base_cmd = ( 61 "pip-compile", 62 "--allow-unsafe", 63 "--no-annotate", 64 "--no-header", 65 "--quiet", 66 "--strip-extras", 67 "--index-url=https://pypi.devinfra.sentry.io/simple", 68 ) 69 70 executor = ThreadPoolExecutor(max_workers=2) 71 futures = [ 72 executor.submit( 73 worker, 74 ( 75 *base_cmd, 76 f"{base_path}/requirements-base.txt", 77 f"{base_path}/requirements-getsentry.txt", 78 "-o", 79 f"{base_path}/requirements-frozen.txt", 80 ), 81 ), 82 executor.submit( 83 worker, 84 ( 85 *base_cmd, 86 f"{base_path}/requirements-base.txt", 87 f"{base_path}/requirements-getsentry.txt", 88 f"{base_path}/requirements-dev.txt", 89 "-o", 90 f"{base_path}/requirements-dev-frozen.txt", 91 ), 92 ), 93 ] 94 95 rc = check_futures(futures) 96 executor.shutdown() 97 return rc 98 99 100 if __name__ == "__main__": 101 raise SystemExit(main()) 102 [end of tools/freeze_requirements.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/freeze_requirements.py b/tools/freeze_requirements.py --- a/tools/freeze_requirements.py +++ b/tools/freeze_requirements.py @@ -1,6 +1,7 @@ from __future__ import annotations import argparse +import os from concurrent.futures import Future, ThreadPoolExecutor from os.path import abspath from subprocess import CalledProcessError, run @@ -10,25 +11,10 @@ def worker(args: tuple[str, ...]) -> None: - # pip-compile doesn't let you customize the header, so we write - # one ourselves. However, pip-compile needs -o DEST otherwise - # it will bump >= pins even if they're satisfied. So, we need to - # unfortunately rewrite the whole file. - dest = args[-1] - try: - run(args, check=True, capture_output=True) - except CalledProcessError as e: - raise e - - with open(dest, "rb+") as f: - content = f.read() - f.seek(0, 0) - f.write( - b"""# DO NOT MODIFY. This file was generated with `make freeze-requirements`. + env = os.environ.copy() + env["CUSTOM_COMPILE_COMMAND"] = "make freeze-requirements" -""" - + content - ) + run(args, check=True, capture_output=True, env=env) def check_futures(futures: list[Future[None]]) -> int: @@ -61,7 +47,6 @@ "pip-compile", "--allow-unsafe", "--no-annotate", - "--no-header", "--quiet", "--strip-extras", "--index-url=https://pypi.devinfra.sentry.io/simple",
{"golden_diff": "diff --git a/tools/freeze_requirements.py b/tools/freeze_requirements.py\n--- a/tools/freeze_requirements.py\n+++ b/tools/freeze_requirements.py\n@@ -1,6 +1,7 @@\n from __future__ import annotations\n \n import argparse\n+import os\n from concurrent.futures import Future, ThreadPoolExecutor\n from os.path import abspath\n from subprocess import CalledProcessError, run\n@@ -10,25 +11,10 @@\n \n \n def worker(args: tuple[str, ...]) -> None:\n- # pip-compile doesn't let you customize the header, so we write\n- # one ourselves. However, pip-compile needs -o DEST otherwise\n- # it will bump >= pins even if they're satisfied. So, we need to\n- # unfortunately rewrite the whole file.\n- dest = args[-1]\n- try:\n- run(args, check=True, capture_output=True)\n- except CalledProcessError as e:\n- raise e\n-\n- with open(dest, \"rb+\") as f:\n- content = f.read()\n- f.seek(0, 0)\n- f.write(\n- b\"\"\"# DO NOT MODIFY. This file was generated with `make freeze-requirements`.\n+ env = os.environ.copy()\n+ env[\"CUSTOM_COMPILE_COMMAND\"] = \"make freeze-requirements\"\n \n-\"\"\"\n- + content\n- )\n+ run(args, check=True, capture_output=True, env=env)\n \n \n def check_futures(futures: list[Future[None]]) -> int:\n@@ -61,7 +47,6 @@\n \"pip-compile\",\n \"--allow-unsafe\",\n \"--no-annotate\",\n- \"--no-header\",\n \"--quiet\",\n \"--strip-extras\",\n \"--index-url=https://pypi.devinfra.sentry.io/simple\",\n", "issue": "pip-compile and CUSTOM_COMPILE_COMMAND\n### Environment\r\n\r\nSaaS (https://sentry.io/)\r\n\r\n### Version\r\n\r\n_No response_\r\n\r\n### Steps to Reproduce\r\n\r\nJFYI\r\n\r\nhttps://github.com/getsentry/sentry/blob/2e36d4d3a3926e9efe06287e59d27be1c40300fa/tools/freeze_requirements.py#L13\r\n\r\nThere is `CUSTOM_COMPILE_COMMAND` env to [customize](https://github.com/jazzband/pip-tools#configuration) the header:\r\n\r\n\r\n```\r\n$ CUSTOM_COMPILE_COMMAND=\"make freeze-requirements\" pip-compile\r\n#\r\n# This file is autogenerated by pip-compile with python 3.11\r\n# To update, run:\r\n#\r\n# make freeze-requirements\r\n#\r\n....\r\n```\r\n\r\n### Expected Result\r\n\r\nN/A\r\n\r\n### Actual Result\r\n\r\nN/A\n", "before_files": [{"content": "from __future__ import annotations\n\nimport argparse\nfrom concurrent.futures import Future, ThreadPoolExecutor\nfrom os.path import abspath\nfrom subprocess import CalledProcessError, run\nfrom typing import Sequence\n\nfrom tools.lib import gitroot\n\n\ndef worker(args: tuple[str, ...]) -> None:\n # pip-compile doesn't let you customize the header, so we write\n # one ourselves. However, pip-compile needs -o DEST otherwise\n # it will bump >= pins even if they're satisfied. So, we need to\n # unfortunately rewrite the whole file.\n dest = args[-1]\n try:\n run(args, check=True, capture_output=True)\n except CalledProcessError as e:\n raise e\n\n with open(dest, \"rb+\") as f:\n content = f.read()\n f.seek(0, 0)\n f.write(\n b\"\"\"# DO NOT MODIFY. This file was generated with `make freeze-requirements`.\n\n\"\"\"\n + content\n )\n\n\ndef check_futures(futures: list[Future[None]]) -> int:\n rc = 0\n for future in futures:\n try:\n future.result()\n except CalledProcessError as e:\n rc = 1\n print(\n f\"\"\"`{e.cmd}` returned code {e.returncode}\n\nstdout:\n{e.stdout.decode()}\n\nstderr:\n{e.stderr.decode()}\n\"\"\"\n )\n return rc\n\n\ndef main(argv: Sequence[str] | None = None) -> int:\n parser = argparse.ArgumentParser()\n parser.parse_args(argv)\n\n base_path = abspath(gitroot())\n\n base_cmd = (\n \"pip-compile\",\n \"--allow-unsafe\",\n \"--no-annotate\",\n \"--no-header\",\n \"--quiet\",\n \"--strip-extras\",\n \"--index-url=https://pypi.devinfra.sentry.io/simple\",\n )\n\n executor = ThreadPoolExecutor(max_workers=2)\n futures = [\n executor.submit(\n worker,\n (\n *base_cmd,\n f\"{base_path}/requirements-base.txt\",\n f\"{base_path}/requirements-getsentry.txt\",\n \"-o\",\n f\"{base_path}/requirements-frozen.txt\",\n ),\n ),\n executor.submit(\n worker,\n (\n *base_cmd,\n f\"{base_path}/requirements-base.txt\",\n f\"{base_path}/requirements-getsentry.txt\",\n f\"{base_path}/requirements-dev.txt\",\n \"-o\",\n f\"{base_path}/requirements-dev-frozen.txt\",\n ),\n ),\n ]\n\n rc = check_futures(futures)\n executor.shutdown()\n return rc\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n", "path": "tools/freeze_requirements.py"}]}
1,500
396
gh_patches_debug_9494
rasdani/github-patches
git_diff
quantopian__zipline-382
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Make ta-lib optional `ta-lib` is causing us many installation problems. We should thus make it optional. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 # 3 # Copyright 2013 Quantopian, Inc. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 import sys 17 18 from setuptools import setup, find_packages 19 20 LONG_DESCRIPTION = None 21 README_MARKDOWN = None 22 23 with open('README.md') as markdown_source: 24 README_MARKDOWN = markdown_source.read() 25 26 if 'upload' in sys.argv: 27 # Converts the README.md file to ReST, since PyPI uses ReST for formatting, 28 # This allows to have one canonical README file, being the README.md 29 # The conversion only needs to be done on upload. 30 # Otherwise, the pandoc import and errors that are thrown when 31 # pandoc are both overhead and a source of confusion for general 32 # usage/installation. 33 import pandoc 34 pandoc.core.PANDOC_PATH = 'pandoc' 35 doc = pandoc.Document() 36 doc.markdown = README_MARKDOWN 37 LONG_DESCRIPTION = doc.rst 38 else: 39 # If pandoc isn't installed, e.g. when downloading from pip, 40 # just use the regular README. 41 LONG_DESCRIPTION = README_MARKDOWN 42 43 setup( 44 name='zipline', 45 version='0.7.0', 46 description='A backtester for financial algorithms.', 47 author='Quantopian Inc.', 48 author_email='[email protected]', 49 packages=find_packages(), 50 scripts=['scripts/run_algo.py'], 51 long_description=LONG_DESCRIPTION, 52 license='Apache 2.0', 53 classifiers=[ 54 'Development Status :: 4 - Beta', 55 'License :: OSI Approved :: Apache Software License', 56 'Natural Language :: English', 57 'Programming Language :: Python', 58 'Programming Language :: Python :: 2.7', 59 'Programming Language :: Python :: 3.3', 60 'Operating System :: OS Independent', 61 'Intended Audience :: Science/Research', 62 'Topic :: Office/Business :: Financial', 63 'Topic :: Scientific/Engineering :: Information Analysis', 64 'Topic :: System :: Distributed Computing', 65 ], 66 install_requires=[ 67 'Logbook', 68 'pytz', 69 'requests', 70 'numpy', 71 'pandas', 72 'six' 73 ], 74 url="https://github.com/quantopian/zipline" 75 ) 76 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ #!/usr/bin/env python # -# Copyright 2013 Quantopian, Inc. +# Copyright 2014 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -71,5 +71,8 @@ 'pandas', 'six' ], + extras_require = { + 'talib': ["talib"], + }, url="https://github.com/quantopian/zipline" )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,6 @@\n #!/usr/bin/env python\n #\n-# Copyright 2013 Quantopian, Inc.\n+# Copyright 2014 Quantopian, Inc.\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n@@ -71,5 +71,8 @@\n 'pandas',\n 'six'\n ],\n+ extras_require = {\n+ 'talib': [\"talib\"],\n+ },\n url=\"https://github.com/quantopian/zipline\"\n )\n", "issue": "Make ta-lib optional\n`ta-lib` is causing us many installation problems. We should thus make it optional.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Copyright 2013 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\n\nfrom setuptools import setup, find_packages\n\nLONG_DESCRIPTION = None\nREADME_MARKDOWN = None\n\nwith open('README.md') as markdown_source:\n README_MARKDOWN = markdown_source.read()\n\nif 'upload' in sys.argv:\n # Converts the README.md file to ReST, since PyPI uses ReST for formatting,\n # This allows to have one canonical README file, being the README.md\n # The conversion only needs to be done on upload.\n # Otherwise, the pandoc import and errors that are thrown when\n # pandoc are both overhead and a source of confusion for general\n # usage/installation.\n import pandoc\n pandoc.core.PANDOC_PATH = 'pandoc'\n doc = pandoc.Document()\n doc.markdown = README_MARKDOWN\n LONG_DESCRIPTION = doc.rst\nelse:\n # If pandoc isn't installed, e.g. when downloading from pip,\n # just use the regular README.\n LONG_DESCRIPTION = README_MARKDOWN\n\nsetup(\n name='zipline',\n version='0.7.0',\n description='A backtester for financial algorithms.',\n author='Quantopian Inc.',\n author_email='[email protected]',\n packages=find_packages(),\n scripts=['scripts/run_algo.py'],\n long_description=LONG_DESCRIPTION,\n license='Apache 2.0',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Topic :: Office/Business :: Financial',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: System :: Distributed Computing',\n ],\n install_requires=[\n 'Logbook',\n 'pytz',\n 'requests',\n 'numpy',\n 'pandas',\n 'six'\n ],\n url=\"https://github.com/quantopian/zipline\"\n)\n", "path": "setup.py"}]}
1,285
154
gh_patches_debug_3571
rasdani/github-patches
git_diff
nerfstudio-project__nerfstudio-2076
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Doc Description Wrong Hello, I find python doc in [get_depth_image_from_path](https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/data/utils/data_utils.py) is wrong about the return tensor shape, it should be [height, width, 1] not [width, height, 1]. ![图片](https://github.com/nerfstudio-project/nerfstudio/assets/20349525/e6acb5ee-4f66-44ad-b21e-1161b1df61b3) a simple verification: ![图片](https://github.com/nerfstudio-project/nerfstudio/assets/20349525/dfd65f57-7f8b-44ab-897b-d06793c03375) </issue> <code> [start of nerfstudio/data/utils/data_utils.py] 1 # Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Utility functions to allow easy re-use of common operations across dataloaders""" 16 from pathlib import Path 17 from typing import List, Tuple, Union 18 19 import cv2 20 import numpy as np 21 import torch 22 from PIL import Image 23 24 25 def get_image_mask_tensor_from_path(filepath: Path, scale_factor: float = 1.0) -> torch.Tensor: 26 """ 27 Utility function to read a mask image from the given path and return a boolean tensor 28 """ 29 pil_mask = Image.open(filepath) 30 if scale_factor != 1.0: 31 width, height = pil_mask.size 32 newsize = (int(width * scale_factor), int(height * scale_factor)) 33 pil_mask = pil_mask.resize(newsize, resample=Image.NEAREST) 34 mask_tensor = torch.from_numpy(np.array(pil_mask)).unsqueeze(-1).bool() 35 if len(mask_tensor.shape) != 3: 36 raise ValueError("The mask image should have 1 channel") 37 return mask_tensor 38 39 40 def get_semantics_and_mask_tensors_from_path( 41 filepath: Path, mask_indices: Union[List, torch.Tensor], scale_factor: float = 1.0 42 ) -> Tuple[torch.Tensor, torch.Tensor]: 43 """ 44 Utility function to read segmentation from the given filepath 45 If no mask is required - use mask_indices = [] 46 """ 47 if isinstance(mask_indices, List): 48 mask_indices = torch.tensor(mask_indices, dtype=torch.int64).view(1, 1, -1) 49 pil_image = Image.open(filepath) 50 if scale_factor != 1.0: 51 width, height = pil_image.size 52 newsize = (int(width * scale_factor), int(height * scale_factor)) 53 pil_image = pil_image.resize(newsize, resample=Image.NEAREST) 54 semantics = torch.from_numpy(np.array(pil_image, dtype="int64"))[..., None] 55 mask = torch.sum(semantics == mask_indices, dim=-1, keepdim=True) == 0 56 return semantics, mask 57 58 59 def get_depth_image_from_path( 60 filepath: Path, 61 height: int, 62 width: int, 63 scale_factor: float, 64 interpolation: int = cv2.INTER_NEAREST, 65 ) -> torch.Tensor: 66 """Loads, rescales and resizes depth images. 67 Filepath points to a 16-bit or 32-bit depth image, or a numpy array `*.npy`. 68 69 Args: 70 filepath: Path to depth image. 71 height: Target depth image height. 72 width: Target depth image width. 73 scale_factor: Factor by which to scale depth image. 74 interpolation: Depth value interpolation for resizing. 75 76 Returns: 77 Depth image torch tensor with shape [width, height, 1]. 78 """ 79 if filepath.suffix == ".npy": 80 image = np.load(filepath) * scale_factor 81 image = cv2.resize(image, (width, height), interpolation=interpolation) 82 else: 83 image = cv2.imread(str(filepath.absolute()), cv2.IMREAD_ANYDEPTH) 84 image = image.astype(np.float64) * scale_factor 85 image = cv2.resize(image, (width, height), interpolation=interpolation) 86 return torch.from_numpy(image[:, :, np.newaxis]) 87 [end of nerfstudio/data/utils/data_utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nerfstudio/data/utils/data_utils.py b/nerfstudio/data/utils/data_utils.py --- a/nerfstudio/data/utils/data_utils.py +++ b/nerfstudio/data/utils/data_utils.py @@ -74,7 +74,7 @@ interpolation: Depth value interpolation for resizing. Returns: - Depth image torch tensor with shape [width, height, 1]. + Depth image torch tensor with shape [height, width, 1]. """ if filepath.suffix == ".npy": image = np.load(filepath) * scale_factor
{"golden_diff": "diff --git a/nerfstudio/data/utils/data_utils.py b/nerfstudio/data/utils/data_utils.py\n--- a/nerfstudio/data/utils/data_utils.py\n+++ b/nerfstudio/data/utils/data_utils.py\n@@ -74,7 +74,7 @@\n interpolation: Depth value interpolation for resizing.\n \n Returns:\n- Depth image torch tensor with shape [width, height, 1].\n+ Depth image torch tensor with shape [height, width, 1].\n \"\"\"\n if filepath.suffix == \".npy\":\n image = np.load(filepath) * scale_factor\n", "issue": "Doc Description Wrong\nHello, \r\n\r\nI find python doc in [get_depth_image_from_path](https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/data/utils/data_utils.py) is wrong about the return tensor shape, \r\n\r\nit should be [height, width, 1] not [width, height, 1].\r\n\r\n![\u56fe\u7247](https://github.com/nerfstudio-project/nerfstudio/assets/20349525/e6acb5ee-4f66-44ad-b21e-1161b1df61b3)\r\n\r\na simple verification:\r\n\r\n![\u56fe\u7247](https://github.com/nerfstudio-project/nerfstudio/assets/20349525/dfd65f57-7f8b-44ab-897b-d06793c03375)\r\n\r\n\n", "before_files": [{"content": "# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions to allow easy re-use of common operations across dataloaders\"\"\"\nfrom pathlib import Path\nfrom typing import List, Tuple, Union\n\nimport cv2\nimport numpy as np\nimport torch\nfrom PIL import Image\n\n\ndef get_image_mask_tensor_from_path(filepath: Path, scale_factor: float = 1.0) -> torch.Tensor:\n \"\"\"\n Utility function to read a mask image from the given path and return a boolean tensor\n \"\"\"\n pil_mask = Image.open(filepath)\n if scale_factor != 1.0:\n width, height = pil_mask.size\n newsize = (int(width * scale_factor), int(height * scale_factor))\n pil_mask = pil_mask.resize(newsize, resample=Image.NEAREST)\n mask_tensor = torch.from_numpy(np.array(pil_mask)).unsqueeze(-1).bool()\n if len(mask_tensor.shape) != 3:\n raise ValueError(\"The mask image should have 1 channel\")\n return mask_tensor\n\n\ndef get_semantics_and_mask_tensors_from_path(\n filepath: Path, mask_indices: Union[List, torch.Tensor], scale_factor: float = 1.0\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Utility function to read segmentation from the given filepath\n If no mask is required - use mask_indices = []\n \"\"\"\n if isinstance(mask_indices, List):\n mask_indices = torch.tensor(mask_indices, dtype=torch.int64).view(1, 1, -1)\n pil_image = Image.open(filepath)\n if scale_factor != 1.0:\n width, height = pil_image.size\n newsize = (int(width * scale_factor), int(height * scale_factor))\n pil_image = pil_image.resize(newsize, resample=Image.NEAREST)\n semantics = torch.from_numpy(np.array(pil_image, dtype=\"int64\"))[..., None]\n mask = torch.sum(semantics == mask_indices, dim=-1, keepdim=True) == 0\n return semantics, mask\n\n\ndef get_depth_image_from_path(\n filepath: Path,\n height: int,\n width: int,\n scale_factor: float,\n interpolation: int = cv2.INTER_NEAREST,\n) -> torch.Tensor:\n \"\"\"Loads, rescales and resizes depth images.\n Filepath points to a 16-bit or 32-bit depth image, or a numpy array `*.npy`.\n\n Args:\n filepath: Path to depth image.\n height: Target depth image height.\n width: Target depth image width.\n scale_factor: Factor by which to scale depth image.\n interpolation: Depth value interpolation for resizing.\n\n Returns:\n Depth image torch tensor with shape [width, height, 1].\n \"\"\"\n if filepath.suffix == \".npy\":\n image = np.load(filepath) * scale_factor\n image = cv2.resize(image, (width, height), interpolation=interpolation)\n else:\n image = cv2.imread(str(filepath.absolute()), cv2.IMREAD_ANYDEPTH)\n image = image.astype(np.float64) * scale_factor\n image = cv2.resize(image, (width, height), interpolation=interpolation)\n return torch.from_numpy(image[:, :, np.newaxis])\n", "path": "nerfstudio/data/utils/data_utils.py"}]}
1,732
129
gh_patches_debug_9412
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-3099
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> allow_remote=false does not prevent remote access ##### Steps to reproduce the problem: 1. Run mitmproxy on a publicly routable host, with default configuration including `listen_host=""`, `listen_port=8080`, and `allow_remote=false`. 2. From a host on a different network, send a request through that instance of mitmproxy, e.g. with `curl --proxy http://your-host.example:8080` The default `allow_remote=false` should prevent this request from succeeding. However, it is served by mitmproxy just fine. ##### Any other comments? What have you tried so far? I have a laptop sitting in the “DMZ” of a home router, which is globally IPv4 routable. I also have a VPS which is globally IPv4 routable. Both the laptop and the VPS are running Ubuntu 16.04 “Xenial Xerus”. I can reproduce the problem with mitmproxy running on the VPS and curl on the laptop, as well as vice-versa. Both tcpdump and mitmproxy’s own Details pane show the request as originating from a remote network. I only noticed this because I saw strange flows in a mitmproxy instance that I spun up on the laptop. ##### System information Mitmproxy: 3.0.3 binary Python: 3.5.2 OpenSSL: OpenSSL 1.1.0g 2 Nov 2017 Platform: Linux-4.4.0-116-generic-x86_64-with-debian-stretch-sid </issue> <code> [start of mitmproxy/addons/allowremote.py] 1 import ipaddress 2 from mitmproxy import ctx 3 4 5 class AllowRemote: 6 def load(self, loader): 7 loader.add_option( 8 "allow_remote", bool, False, 9 """ 10 Allow remote clients to connect to proxy. If set to false, 11 client will not be able to connect to proxy unless it is on the same network 12 or the proxyauth option is set 13 """ 14 ) 15 16 def clientconnect(self, layer): 17 address = layer.client_conn.address 18 19 accept_connection = ( 20 ctx.options.allow_remote or 21 ipaddress.ip_address(address[0]).is_private or 22 ctx.options.proxyauth is not None 23 ) 24 25 if not accept_connection: 26 layer.reply.kill() 27 ctx.log.warn("Client connection was killed because allow_remote option is set to false, " 28 "client IP was not a private IP and proxyauth was not set.\n" 29 "To allow remote connections set allow_remote option to true or set proxyauth option.") 30 [end of mitmproxy/addons/allowremote.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mitmproxy/addons/allowremote.py b/mitmproxy/addons/allowremote.py --- a/mitmproxy/addons/allowremote.py +++ b/mitmproxy/addons/allowremote.py @@ -14,11 +14,13 @@ ) def clientconnect(self, layer): - address = layer.client_conn.address + address = ipaddress.ip_address(layer.client_conn.address[0]) + if isinstance(address, ipaddress.IPv6Address): + address = address.ipv4_mapped or address accept_connection = ( ctx.options.allow_remote or - ipaddress.ip_address(address[0]).is_private or + ipaddress.ip_address(address).is_private or ctx.options.proxyauth is not None )
{"golden_diff": "diff --git a/mitmproxy/addons/allowremote.py b/mitmproxy/addons/allowremote.py\n--- a/mitmproxy/addons/allowremote.py\n+++ b/mitmproxy/addons/allowremote.py\n@@ -14,11 +14,13 @@\n )\n \n def clientconnect(self, layer):\n- address = layer.client_conn.address\n+ address = ipaddress.ip_address(layer.client_conn.address[0])\n+ if isinstance(address, ipaddress.IPv6Address):\n+ address = address.ipv4_mapped or address\n \n accept_connection = (\n ctx.options.allow_remote or\n- ipaddress.ip_address(address[0]).is_private or\n+ ipaddress.ip_address(address).is_private or\n ctx.options.proxyauth is not None\n )\n", "issue": "allow_remote=false does not prevent remote access\n##### Steps to reproduce the problem:\r\n\r\n1. Run mitmproxy on a publicly routable host, with default configuration including `listen_host=\"\"`, `listen_port=8080`, and `allow_remote=false`.\r\n2. From a host on a different network, send a request through that instance of mitmproxy, e.g. with `curl --proxy http://your-host.example:8080`\r\n\r\nThe default `allow_remote=false` should prevent this request from succeeding. However, it is served by mitmproxy just fine.\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nI have a laptop sitting in the \u201cDMZ\u201d of a home router, which is globally IPv4 routable. I also have a VPS which is globally IPv4 routable. Both the laptop and the VPS are running Ubuntu 16.04 \u201cXenial Xerus\u201d. I can reproduce the problem with mitmproxy running on the VPS and curl on the laptop, as well as vice-versa.\r\n\r\nBoth tcpdump and mitmproxy\u2019s own Details pane show the request as originating from a remote network.\r\n\r\nI only noticed this because I saw strange flows in a mitmproxy instance that I spun up on the laptop.\r\n\r\n##### System information\r\n\r\nMitmproxy: 3.0.3 binary\r\nPython: 3.5.2\r\nOpenSSL: OpenSSL 1.1.0g 2 Nov 2017\r\nPlatform: Linux-4.4.0-116-generic-x86_64-with-debian-stretch-sid\r\n\n", "before_files": [{"content": "import ipaddress\nfrom mitmproxy import ctx\n\n\nclass AllowRemote:\n def load(self, loader):\n loader.add_option(\n \"allow_remote\", bool, False,\n \"\"\"\n Allow remote clients to connect to proxy. If set to false,\n client will not be able to connect to proxy unless it is on the same network\n or the proxyauth option is set\n \"\"\"\n )\n\n def clientconnect(self, layer):\n address = layer.client_conn.address\n\n accept_connection = (\n ctx.options.allow_remote or\n ipaddress.ip_address(address[0]).is_private or\n ctx.options.proxyauth is not None\n )\n\n if not accept_connection:\n layer.reply.kill()\n ctx.log.warn(\"Client connection was killed because allow_remote option is set to false, \"\n \"client IP was not a private IP and proxyauth was not set.\\n\"\n \"To allow remote connections set allow_remote option to true or set proxyauth option.\")\n", "path": "mitmproxy/addons/allowremote.py"}]}
1,145
171
gh_patches_debug_10238
rasdani/github-patches
git_diff
quantumlib__Cirq-4003
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> optimized_for_sycamore fails when you measure more than 16 qubits ```python def thing(): q = cirq.LineQubit.range(17) c = cirq.Circuit(cirq.H.on_each(*q), cirq.measure(*q, key='z')) c = cg.optimized_for_sycamore(c) return c thing() ``` ``` --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-44-accaace1183a> in <module> 4 c = cg.optimized_for_sycamore(c) 5 return c ----> 6 thing() <ipython-input-44-accaace1183a> in thing() 2 q = cirq.LineQubit.range(17) 3 c = cirq.Circuit(cirq.H.on_each(*q), cirq.measure(*q, key='z')) ----> 4 c = cg.optimized_for_sycamore(c) 5 return c 6 thing() ~/ecc/cirq/cirq/google/optimizers/optimize_for_sycamore.py in optimized_for_sycamore(circuit, new_device, qubit_map, optimizer_type, tolerance, tabulation_resolution) 159 opts = _OPTIMIZER_TYPES[optimizer_type](tolerance=tolerance, tabulation=tabulation) 160 for optimizer in opts: --> 161 optimizer(copy) 162 163 return circuits.Circuit( ~/ecc/cirq/cirq/optimizers/drop_negligible.py in optimize_circuit(self, circuit) 37 for moment_index, moment in enumerate(circuit): 38 for op in moment.operations: ---> 39 if op is not None and protocols.trace_distance_bound(op) <= self.tolerance: 40 deletions.append((moment_index, op)) 41 circuit.batch_remove(deletions) ~/ecc/cirq/cirq/protocols/trace_distance_bound.py in trace_distance_bound(val) 71 72 for strat in strats: ---> 73 result = strat(val) 74 if result is None: 75 break ~/ecc/cirq/cirq/protocols/trace_distance_bound.py in _strat_distance_from_unitary(val) 96 def _strat_distance_from_unitary(val: Any) -> Optional[float]: 97 """Attempts to compute a value's trace_distance_bound from its unitary.""" ---> 98 u = unitary_protocol.unitary(val, default=None) 99 100 if u is None: ~/ecc/cirq/cirq/protocols/unitary_protocol.py in unitary(val, default) 127 ] 128 for strat in strats: --> 129 result = strat(val) 130 if result is None: 131 break ~/ecc/cirq/cirq/protocols/unitary_protocol.py in _strat_unitary_from_apply_unitary(val) 173 174 # Apply unitary effect to an identity matrix. --> 175 state = qis.eye_tensor(val_qid_shape, dtype=np.complex128) 176 buffer = np.empty_like(state) 177 result = method(ApplyUnitaryArgs(state, buffer, range(len(val_qid_shape)))) ~/ecc/cirq/cirq/qis/states.py in eye_tensor(half_shape, dtype) 669 """ 670 identity = np.eye(np.prod(half_shape, dtype=int), dtype=dtype) --> 671 identity.shape = half_shape * 2 672 return identity ValueError: maximum supported dimension for an ndarray is 32, found 34 ``` </issue> <code> [start of cirq/optimizers/drop_negligible.py] 1 # Copyright 2018 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """An optimization pass that removes operations with tiny effects.""" 16 17 from typing import List, Tuple, TYPE_CHECKING 18 19 from cirq import protocols 20 from cirq.circuits import circuit as _circuit 21 22 if TYPE_CHECKING: 23 from cirq import ops 24 25 26 class DropNegligible: 27 """An optimization pass that removes operations with tiny effects.""" 28 29 def __init__(self, tolerance: float = 1e-8) -> None: 30 self.tolerance = tolerance 31 32 def __call__(self, circuit: _circuit.Circuit): 33 self.optimize_circuit(circuit) 34 35 def optimize_circuit(self, circuit: _circuit.Circuit) -> None: 36 deletions: List[Tuple[int, ops.Operation]] = [] 37 for moment_index, moment in enumerate(circuit): 38 for op in moment.operations: 39 if op is not None and protocols.trace_distance_bound(op) <= self.tolerance: 40 deletions.append((moment_index, op)) 41 circuit.batch_remove(deletions) 42 [end of cirq/optimizers/drop_negligible.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cirq/optimizers/drop_negligible.py b/cirq/optimizers/drop_negligible.py --- a/cirq/optimizers/drop_negligible.py +++ b/cirq/optimizers/drop_negligible.py @@ -36,6 +36,8 @@ deletions: List[Tuple[int, ops.Operation]] = [] for moment_index, moment in enumerate(circuit): for op in moment.operations: - if op is not None and protocols.trace_distance_bound(op) <= self.tolerance: + if protocols.is_measurement(op): + continue + if protocols.trace_distance_bound(op) <= self.tolerance: deletions.append((moment_index, op)) circuit.batch_remove(deletions)
{"golden_diff": "diff --git a/cirq/optimizers/drop_negligible.py b/cirq/optimizers/drop_negligible.py\n--- a/cirq/optimizers/drop_negligible.py\n+++ b/cirq/optimizers/drop_negligible.py\n@@ -36,6 +36,8 @@\n deletions: List[Tuple[int, ops.Operation]] = []\n for moment_index, moment in enumerate(circuit):\n for op in moment.operations:\n- if op is not None and protocols.trace_distance_bound(op) <= self.tolerance:\n+ if protocols.is_measurement(op):\n+ continue\n+ if protocols.trace_distance_bound(op) <= self.tolerance:\n deletions.append((moment_index, op))\n circuit.batch_remove(deletions)\n", "issue": "optimized_for_sycamore fails when you measure more than 16 qubits\n```python\r\ndef thing():\r\n q = cirq.LineQubit.range(17)\r\n c = cirq.Circuit(cirq.H.on_each(*q), cirq.measure(*q, key='z'))\r\n c = cg.optimized_for_sycamore(c)\r\n return c\r\nthing() \r\n```\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n<ipython-input-44-accaace1183a> in <module>\r\n 4 c = cg.optimized_for_sycamore(c)\r\n 5 return c\r\n----> 6 thing()\r\n\r\n<ipython-input-44-accaace1183a> in thing()\r\n 2 q = cirq.LineQubit.range(17)\r\n 3 c = cirq.Circuit(cirq.H.on_each(*q), cirq.measure(*q, key='z'))\r\n----> 4 c = cg.optimized_for_sycamore(c)\r\n 5 return c\r\n 6 thing()\r\n\r\n~/ecc/cirq/cirq/google/optimizers/optimize_for_sycamore.py in optimized_for_sycamore(circuit, new_device, qubit_map, optimizer_type, tolerance, tabulation_resolution)\r\n 159 opts = _OPTIMIZER_TYPES[optimizer_type](tolerance=tolerance, tabulation=tabulation)\r\n 160 for optimizer in opts:\r\n--> 161 optimizer(copy)\r\n 162 \r\n 163 return circuits.Circuit(\r\n\r\n~/ecc/cirq/cirq/optimizers/drop_negligible.py in optimize_circuit(self, circuit)\r\n 37 for moment_index, moment in enumerate(circuit):\r\n 38 for op in moment.operations:\r\n---> 39 if op is not None and protocols.trace_distance_bound(op) <= self.tolerance:\r\n 40 deletions.append((moment_index, op))\r\n 41 circuit.batch_remove(deletions)\r\n\r\n~/ecc/cirq/cirq/protocols/trace_distance_bound.py in trace_distance_bound(val)\r\n 71 \r\n 72 for strat in strats:\r\n---> 73 result = strat(val)\r\n 74 if result is None:\r\n 75 break\r\n\r\n~/ecc/cirq/cirq/protocols/trace_distance_bound.py in _strat_distance_from_unitary(val)\r\n 96 def _strat_distance_from_unitary(val: Any) -> Optional[float]:\r\n 97 \"\"\"Attempts to compute a value's trace_distance_bound from its unitary.\"\"\"\r\n---> 98 u = unitary_protocol.unitary(val, default=None)\r\n 99 \r\n 100 if u is None:\r\n\r\n~/ecc/cirq/cirq/protocols/unitary_protocol.py in unitary(val, default)\r\n 127 ]\r\n 128 for strat in strats:\r\n--> 129 result = strat(val)\r\n 130 if result is None:\r\n 131 break\r\n\r\n~/ecc/cirq/cirq/protocols/unitary_protocol.py in _strat_unitary_from_apply_unitary(val)\r\n 173 \r\n 174 # Apply unitary effect to an identity matrix.\r\n--> 175 state = qis.eye_tensor(val_qid_shape, dtype=np.complex128)\r\n 176 buffer = np.empty_like(state)\r\n 177 result = method(ApplyUnitaryArgs(state, buffer, range(len(val_qid_shape))))\r\n\r\n~/ecc/cirq/cirq/qis/states.py in eye_tensor(half_shape, dtype)\r\n 669 \"\"\"\r\n 670 identity = np.eye(np.prod(half_shape, dtype=int), dtype=dtype)\r\n--> 671 identity.shape = half_shape * 2\r\n 672 return identity\r\n\r\nValueError: maximum supported dimension for an ndarray is 32, found 34\r\n```\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An optimization pass that removes operations with tiny effects.\"\"\"\n\nfrom typing import List, Tuple, TYPE_CHECKING\n\nfrom cirq import protocols\nfrom cirq.circuits import circuit as _circuit\n\nif TYPE_CHECKING:\n from cirq import ops\n\n\nclass DropNegligible:\n \"\"\"An optimization pass that removes operations with tiny effects.\"\"\"\n\n def __init__(self, tolerance: float = 1e-8) -> None:\n self.tolerance = tolerance\n\n def __call__(self, circuit: _circuit.Circuit):\n self.optimize_circuit(circuit)\n\n def optimize_circuit(self, circuit: _circuit.Circuit) -> None:\n deletions: List[Tuple[int, ops.Operation]] = []\n for moment_index, moment in enumerate(circuit):\n for op in moment.operations:\n if op is not None and protocols.trace_distance_bound(op) <= self.tolerance:\n deletions.append((moment_index, op))\n circuit.batch_remove(deletions)\n", "path": "cirq/optimizers/drop_negligible.py"}]}
1,856
164
gh_patches_debug_9405
rasdani/github-patches
git_diff
ivy-llc__ivy-17873
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dropout1d </issue> <code> [start of ivy/functional/frontends/torch/nn/functional/dropout_functions.py] 1 # local 2 import ivy 3 from ivy.func_wrapper import with_unsupported_dtypes 4 5 from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back 6 7 8 @to_ivy_arrays_and_back 9 @with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, "torch") 10 def dropout(input, p=0.5, training=True, inplace=False): 11 return ivy.dropout(input, p, training=training) 12 [end of ivy/functional/frontends/torch/nn/functional/dropout_functions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/torch/nn/functional/dropout_functions.py b/ivy/functional/frontends/torch/nn/functional/dropout_functions.py --- a/ivy/functional/frontends/torch/nn/functional/dropout_functions.py +++ b/ivy/functional/frontends/torch/nn/functional/dropout_functions.py @@ -9,3 +9,11 @@ @with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, "torch") def dropout(input, p=0.5, training=True, inplace=False): return ivy.dropout(input, p, training=training) + + +@to_ivy_arrays_and_back +@with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, "torch") +def dropout1d(input, p=0.5, training=True, inplace=False): + if inplace: + return ivy.dropout1d(input, p, training=training, data_format="NCW", out=input) + return ivy.dropout1d(input, p, training=training, data_format="NCW")
{"golden_diff": "diff --git a/ivy/functional/frontends/torch/nn/functional/dropout_functions.py b/ivy/functional/frontends/torch/nn/functional/dropout_functions.py\n--- a/ivy/functional/frontends/torch/nn/functional/dropout_functions.py\n+++ b/ivy/functional/frontends/torch/nn/functional/dropout_functions.py\n@@ -9,3 +9,11 @@\n @with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\n def dropout(input, p=0.5, training=True, inplace=False):\n return ivy.dropout(input, p, training=training)\n+\n+\n+@to_ivy_arrays_and_back\n+@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\n+def dropout1d(input, p=0.5, training=True, inplace=False):\n+ if inplace:\n+ return ivy.dropout1d(input, p, training=training, data_format=\"NCW\", out=input)\n+ return ivy.dropout1d(input, p, training=training, data_format=\"NCW\")\n", "issue": "dropout1d\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\ndef dropout(input, p=0.5, training=True, inplace=False):\n return ivy.dropout(input, p, training=training)\n", "path": "ivy/functional/frontends/torch/nn/functional/dropout_functions.py"}]}
680
252
gh_patches_debug_25242
rasdani/github-patches
git_diff
fedora-infra__bodhi-5479
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Updates sometimes get stuck in pending state From quick look, it seems that the composer does: 1. move from `f*-updates-candidate` to `f*-updates-testing` 2. do stuff 3. untag from `f*-updates-testing-pending` 4. mark update state as testing If the composer hang on 2 the update remains stuck in pending as the builds are not tagged anymore in `f*-updates-candidate`. We should find a solution. </issue> <code> [start of bodhi-server/bodhi/server/tasks/check_signed_builds.py] 1 # Copyright © 2017 Red Hat, Inc. 2 # 3 # This file is part of Bodhi. 4 # 5 # This program is free software; you can redistribute it and/or 6 # modify it under the terms of the GNU General Public License 7 # as published by the Free Software Foundation; either version 2 8 # of the License, or (at your option) any later version. 9 # 10 # This program is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with this program; if not, write to the Free Software 17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 18 19 """ 20 Avoid Updates being stuck in pending. 21 22 It may happen that Bodhi misses fedora-messaging messages announcing builds 23 have been signed. 24 In these cases, the Update remain stuck in pending until a manual intervention. 25 26 This script will cycle through builds of Updates in pending status and update 27 the signed status in the db to match the tags found in Koji. 28 """ 29 30 import logging 31 from datetime import datetime, timedelta 32 33 from bodhi.server import buildsys, models 34 from bodhi.server.config import config 35 from bodhi.server.util import transactional_session_maker 36 37 38 log = logging.getLogger(__name__) 39 40 41 def main(): 42 """Check build tags and sign those we missed.""" 43 db_factory = transactional_session_maker() 44 older_than = datetime.utcnow() - timedelta(days=config.get('check_signed_builds_delay')) 45 with db_factory() as session: 46 updates = models.Update.query.filter( 47 models.Update.status == models.UpdateStatus.pending 48 ).filter( 49 models.Update.release_id == models.Release.id 50 ).filter( 51 models.Release.state.in_([ 52 models.ReleaseState.current, 53 models.ReleaseState.pending, 54 models.ReleaseState.frozen, 55 ]) 56 ).all() 57 58 if len(updates) == 0: 59 log.debug('No stuck Updates found') 60 return 61 62 kc = buildsys.get_session() 63 stuck_builds = [] 64 overlooked_builds = [] 65 66 for update in updates: 67 # Let Bodhi have its times 68 if update.date_submitted >= older_than: 69 continue 70 builds = update.builds 71 # Clean Updates with no builds 72 if len(builds) == 0: 73 log.debug(f'Obsoleting empty update {update.alias}') 74 update.obsolete(session) 75 session.flush() 76 continue 77 pending_signing_tag = update.release.pending_signing_tag 78 pending_testing_tag = update.release.pending_testing_tag 79 for build in builds: 80 if build.signed: 81 log.debug(f'{build.nvr} already marked as signed') 82 continue 83 build_tags = [t['name'] for t in kc.listTags(build=build.nvr)] 84 if pending_signing_tag not in build_tags and pending_testing_tag in build_tags: 85 # Our composer missed the message that the build got signed 86 log.debug(f'Changing signed status of {build.nvr}') 87 build.signed = True 88 elif pending_signing_tag in build_tags and pending_testing_tag not in build_tags: 89 # autosign missed the message that the build is waiting to be signed 90 log.debug(f'{build.nvr} is stuck waiting to be signed, let\'s try again') 91 stuck_builds.append((build.nvr, pending_signing_tag)) 92 elif (pending_signing_tag not in build_tags 93 and pending_testing_tag not in build_tags): 94 # this means that an update has been created but we never tagged the build 95 # as pending-signing 96 log.debug(f'Oh, no! We\'ve never sent {build.nvr} for signing, let\'s fix it') 97 overlooked_builds.append((build.nvr, pending_signing_tag)) 98 session.flush() 99 100 if stuck_builds: 101 kc.multicall = True 102 for b, t in stuck_builds: 103 kc.untagBuild(t, b, force=True) 104 kc.multiCall() 105 for b, t in stuck_builds: 106 kc.tagBuild(t, b, force=True) 107 kc.multiCall() 108 109 if overlooked_builds: 110 kc.multicall = True 111 for b, t in overlooked_builds: 112 kc.tagBuild(t, b, force=True) 113 kc.multiCall() 114 [end of bodhi-server/bodhi/server/tasks/check_signed_builds.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bodhi-server/bodhi/server/tasks/check_signed_builds.py b/bodhi-server/bodhi/server/tasks/check_signed_builds.py --- a/bodhi-server/bodhi/server/tasks/check_signed_builds.py +++ b/bodhi-server/bodhi/server/tasks/check_signed_builds.py @@ -77,10 +77,16 @@ pending_signing_tag = update.release.pending_signing_tag pending_testing_tag = update.release.pending_testing_tag for build in builds: + build_tags = [t['name'] for t in kc.listTags(build=build.nvr)] if build.signed: log.debug(f'{build.nvr} already marked as signed') + if (update.release.testing_tag in build_tags + and update.release.candidate_tag not in build_tags): + # The update was probably ejected from a compose and is stuck + log.debug(f'Resubmitting {update.alias} to testing') + update.set_request(session, models.UpdateRequest.testing, 'bodhi') + break continue - build_tags = [t['name'] for t in kc.listTags(build=build.nvr)] if pending_signing_tag not in build_tags and pending_testing_tag in build_tags: # Our composer missed the message that the build got signed log.debug(f'Changing signed status of {build.nvr}')
{"golden_diff": "diff --git a/bodhi-server/bodhi/server/tasks/check_signed_builds.py b/bodhi-server/bodhi/server/tasks/check_signed_builds.py\n--- a/bodhi-server/bodhi/server/tasks/check_signed_builds.py\n+++ b/bodhi-server/bodhi/server/tasks/check_signed_builds.py\n@@ -77,10 +77,16 @@\n pending_signing_tag = update.release.pending_signing_tag\n pending_testing_tag = update.release.pending_testing_tag\n for build in builds:\n+ build_tags = [t['name'] for t in kc.listTags(build=build.nvr)]\n if build.signed:\n log.debug(f'{build.nvr} already marked as signed')\n+ if (update.release.testing_tag in build_tags\n+ and update.release.candidate_tag not in build_tags):\n+ # The update was probably ejected from a compose and is stuck\n+ log.debug(f'Resubmitting {update.alias} to testing')\n+ update.set_request(session, models.UpdateRequest.testing, 'bodhi')\n+ break\n continue\n- build_tags = [t['name'] for t in kc.listTags(build=build.nvr)]\n if pending_signing_tag not in build_tags and pending_testing_tag in build_tags:\n # Our composer missed the message that the build got signed\n log.debug(f'Changing signed status of {build.nvr}')\n", "issue": "Updates sometimes get stuck in pending state\nFrom quick look, it seems that the composer does:\r\n1. move from `f*-updates-candidate` to `f*-updates-testing`\r\n2. do stuff\r\n3. untag from `f*-updates-testing-pending`\r\n4. mark update state as testing\r\n\r\nIf the composer hang on 2 the update remains stuck in pending as the builds are not tagged anymore in `f*-updates-candidate`. We should find a solution.\n", "before_files": [{"content": "# Copyright \u00a9 2017 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n\"\"\"\nAvoid Updates being stuck in pending.\n\nIt may happen that Bodhi misses fedora-messaging messages announcing builds\nhave been signed.\nIn these cases, the Update remain stuck in pending until a manual intervention.\n\nThis script will cycle through builds of Updates in pending status and update\nthe signed status in the db to match the tags found in Koji.\n\"\"\"\n\nimport logging\nfrom datetime import datetime, timedelta\n\nfrom bodhi.server import buildsys, models\nfrom bodhi.server.config import config\nfrom bodhi.server.util import transactional_session_maker\n\n\nlog = logging.getLogger(__name__)\n\n\ndef main():\n \"\"\"Check build tags and sign those we missed.\"\"\"\n db_factory = transactional_session_maker()\n older_than = datetime.utcnow() - timedelta(days=config.get('check_signed_builds_delay'))\n with db_factory() as session:\n updates = models.Update.query.filter(\n models.Update.status == models.UpdateStatus.pending\n ).filter(\n models.Update.release_id == models.Release.id\n ).filter(\n models.Release.state.in_([\n models.ReleaseState.current,\n models.ReleaseState.pending,\n models.ReleaseState.frozen,\n ])\n ).all()\n\n if len(updates) == 0:\n log.debug('No stuck Updates found')\n return\n\n kc = buildsys.get_session()\n stuck_builds = []\n overlooked_builds = []\n\n for update in updates:\n # Let Bodhi have its times\n if update.date_submitted >= older_than:\n continue\n builds = update.builds\n # Clean Updates with no builds\n if len(builds) == 0:\n log.debug(f'Obsoleting empty update {update.alias}')\n update.obsolete(session)\n session.flush()\n continue\n pending_signing_tag = update.release.pending_signing_tag\n pending_testing_tag = update.release.pending_testing_tag\n for build in builds:\n if build.signed:\n log.debug(f'{build.nvr} already marked as signed')\n continue\n build_tags = [t['name'] for t in kc.listTags(build=build.nvr)]\n if pending_signing_tag not in build_tags and pending_testing_tag in build_tags:\n # Our composer missed the message that the build got signed\n log.debug(f'Changing signed status of {build.nvr}')\n build.signed = True\n elif pending_signing_tag in build_tags and pending_testing_tag not in build_tags:\n # autosign missed the message that the build is waiting to be signed\n log.debug(f'{build.nvr} is stuck waiting to be signed, let\\'s try again')\n stuck_builds.append((build.nvr, pending_signing_tag))\n elif (pending_signing_tag not in build_tags\n and pending_testing_tag not in build_tags):\n # this means that an update has been created but we never tagged the build\n # as pending-signing\n log.debug(f'Oh, no! We\\'ve never sent {build.nvr} for signing, let\\'s fix it')\n overlooked_builds.append((build.nvr, pending_signing_tag))\n session.flush()\n\n if stuck_builds:\n kc.multicall = True\n for b, t in stuck_builds:\n kc.untagBuild(t, b, force=True)\n kc.multiCall()\n for b, t in stuck_builds:\n kc.tagBuild(t, b, force=True)\n kc.multiCall()\n\n if overlooked_builds:\n kc.multicall = True\n for b, t in overlooked_builds:\n kc.tagBuild(t, b, force=True)\n kc.multiCall()\n", "path": "bodhi-server/bodhi/server/tasks/check_signed_builds.py"}]}
1,845
301
gh_patches_debug_10905
rasdani/github-patches
git_diff
saleor__saleor-1775
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improving product variant behaviour ### Steps to reproduce the problem 1. Choose any product in dashboard 2. Remove all variants 3. Try to go to the product page in storefront ### What I expected to happen Product page in storefront with "UNAVAILABLE" label. ### What happened instead/how it failed 500 code. It is getting crashed with `list index out of range` error, because in method `update_field_data` in `VariantChoiceField` lines ``` if self.queryset.count() < 2: self.widget = forms.HiddenInput( {'value': variants.all()[0].pk}) ``` are trying to get index of empty query set. Possible solution would be to omit this block of code if `variants.all()` is `False`. </issue> <code> [start of saleor/product/forms.py] 1 import json 2 3 from django import forms 4 from django.utils.encoding import smart_text 5 from django.utils.translation import pgettext_lazy 6 from django_prices.templatetags.prices_i18n import gross 7 8 from ..cart.forms import AddToCartForm 9 10 11 class VariantChoiceField(forms.ModelChoiceField): 12 discounts = None 13 14 def label_from_instance(self, obj): 15 variant_label = smart_text(obj) 16 label = pgettext_lazy( 17 'Variant choice field label', 18 '%(variant_label)s - %(price)s') % { 19 'variant_label': variant_label, 20 'price': gross( 21 obj.get_price_per_item(discounts=self.discounts))} 22 return label 23 24 def update_field_data(self, variants, cart): 25 """Initialize variant picker metadata.""" 26 self.queryset = variants 27 self.discounts = cart.discounts 28 self.empty_label = None 29 images_map = { 30 variant.pk: [ 31 vi.image.image.url for vi in variant.variant_images.all()] 32 for variant in variants.all()} 33 self.widget.attrs['data-images'] = json.dumps(images_map) 34 # Don't display select input if there are less than two variants 35 if self.queryset.count() < 2: 36 self.widget = forms.HiddenInput( 37 {'value': variants.all()[0].pk}) 38 39 40 class ProductForm(AddToCartForm): 41 variant = VariantChoiceField(queryset=None) 42 43 def __init__(self, *args, **kwargs): 44 super().__init__(*args, **kwargs) 45 variant_field = self.fields['variant'] 46 variant_field.update_field_data(self.product.variants, self.cart) 47 48 def get_variant(self, cleaned_data): 49 return cleaned_data.get('variant') 50 [end of saleor/product/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/product/forms.py b/saleor/product/forms.py --- a/saleor/product/forms.py +++ b/saleor/product/forms.py @@ -31,8 +31,8 @@ vi.image.image.url for vi in variant.variant_images.all()] for variant in variants.all()} self.widget.attrs['data-images'] = json.dumps(images_map) - # Don't display select input if there are less than two variants - if self.queryset.count() < 2: + # Don't display select input if there is only one variant. + if self.queryset.count() == 1: self.widget = forms.HiddenInput( {'value': variants.all()[0].pk})
{"golden_diff": "diff --git a/saleor/product/forms.py b/saleor/product/forms.py\n--- a/saleor/product/forms.py\n+++ b/saleor/product/forms.py\n@@ -31,8 +31,8 @@\n vi.image.image.url for vi in variant.variant_images.all()]\n for variant in variants.all()}\n self.widget.attrs['data-images'] = json.dumps(images_map)\n- # Don't display select input if there are less than two variants\n- if self.queryset.count() < 2:\n+ # Don't display select input if there is only one variant.\n+ if self.queryset.count() == 1:\n self.widget = forms.HiddenInput(\n {'value': variants.all()[0].pk})\n", "issue": "Improving product variant behaviour\n### Steps to reproduce the problem\r\n\r\n1. Choose any product in dashboard\r\n2. Remove all variants\r\n3. Try to go to the product page in storefront\r\n\r\n### What I expected to happen\r\n\r\nProduct page in storefront with \"UNAVAILABLE\" label.\r\n\r\n### What happened instead/how it failed\r\n500 code. \r\n\r\n\r\nIt is getting crashed with `list index out of range` error, because in method `update_field_data` in `VariantChoiceField` lines\r\n```\r\n if self.queryset.count() < 2:\r\n self.widget = forms.HiddenInput(\r\n {'value': variants.all()[0].pk})\r\n```\r\nare trying to get index of empty query set.\r\nPossible solution would be to omit this block of code if `variants.all()` is `False`.\n", "before_files": [{"content": "import json\n\nfrom django import forms\nfrom django.utils.encoding import smart_text\nfrom django.utils.translation import pgettext_lazy\nfrom django_prices.templatetags.prices_i18n import gross\n\nfrom ..cart.forms import AddToCartForm\n\n\nclass VariantChoiceField(forms.ModelChoiceField):\n discounts = None\n\n def label_from_instance(self, obj):\n variant_label = smart_text(obj)\n label = pgettext_lazy(\n 'Variant choice field label',\n '%(variant_label)s - %(price)s') % {\n 'variant_label': variant_label,\n 'price': gross(\n obj.get_price_per_item(discounts=self.discounts))}\n return label\n\n def update_field_data(self, variants, cart):\n \"\"\"Initialize variant picker metadata.\"\"\"\n self.queryset = variants\n self.discounts = cart.discounts\n self.empty_label = None\n images_map = {\n variant.pk: [\n vi.image.image.url for vi in variant.variant_images.all()]\n for variant in variants.all()}\n self.widget.attrs['data-images'] = json.dumps(images_map)\n # Don't display select input if there are less than two variants\n if self.queryset.count() < 2:\n self.widget = forms.HiddenInput(\n {'value': variants.all()[0].pk})\n\n\nclass ProductForm(AddToCartForm):\n variant = VariantChoiceField(queryset=None)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n variant_field = self.fields['variant']\n variant_field.update_field_data(self.product.variants, self.cart)\n\n def get_variant(self, cleaned_data):\n return cleaned_data.get('variant')\n", "path": "saleor/product/forms.py"}]}
1,154
157
gh_patches_debug_6255
rasdani/github-patches
git_diff
qtile__qtile-180
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> using Qtile.cmd_{info, debug} doesn't switch logging level It seems that critical, warning, and error do all work, though. </issue> <code> [start of libqtile/log_utils.py] 1 import logging 2 import os 3 import sys 4 from logging import getLogger, StreamHandler 5 6 7 class ColorFormatter(logging.Formatter): 8 """Logging formatter adding console colors to the output. 9 """ 10 black, red, green, yellow, blue, magenta, cyan, white = range(8) 11 colors = { 12 'WARNING': yellow, 13 'INFO': green, 14 'DEBUG': blue, 15 'CRITICAL': yellow, 16 'ERROR': red, 17 'RED': red, 18 'GREEN': green, 19 'YELLOW': yellow, 20 'BLUE': blue, 21 'MAGENTA': magenta, 22 'CYAN': cyan, 23 'WHITE': white} 24 reset_seq = '\033[0m' 25 color_seq = '\033[%dm' 26 bold_seq = '\033[1m' 27 28 def format(self, record): 29 """Format the record with colors.""" 30 color = self.color_seq % (30 + self.colors[record.levelname]) 31 message = logging.Formatter.format(self, record) 32 message = message.replace('$RESET', self.reset_seq)\ 33 .replace('$BOLD', self.bold_seq)\ 34 .replace('$COLOR', color) 35 for color, value in self.colors.items(): 36 message = message.replace( 37 '$' + color, self.color_seq % (value + 30))\ 38 .replace('$BG' + color, self.color_seq % (value + 40))\ 39 .replace('$BG-' + color, self.color_seq % (value + 40)) 40 return message + self.reset_seq 41 42 43 def init_log(log_level=logging.WARNING, logger='qtile'): 44 handler = logging.FileHandler( 45 os.path.expanduser('~/.%s.log' % logger)) 46 handler.setLevel(logging.WARNING) 47 handler.setFormatter( 48 logging.Formatter( 49 "%(asctime)s %(levelname)s %(funcName)s:%(lineno)d %(message)s")) 50 log = getLogger(logger) 51 log.setLevel(log_level) 52 log.addHandler(handler) 53 log.warning('Starting %s' % logger.title()) 54 handler = StreamHandler(sys.stderr) 55 handler.setFormatter( 56 ColorFormatter( 57 '$RESET$COLOR%(asctime)s $BOLD$COLOR%(name)s' 58 ' %(funcName)s:%(lineno)d $RESET %(message)s')) 59 log.addHandler(handler) 60 return log 61 [end of libqtile/log_utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libqtile/log_utils.py b/libqtile/log_utils.py --- a/libqtile/log_utils.py +++ b/libqtile/log_utils.py @@ -43,7 +43,6 @@ def init_log(log_level=logging.WARNING, logger='qtile'): handler = logging.FileHandler( os.path.expanduser('~/.%s.log' % logger)) - handler.setLevel(logging.WARNING) handler.setFormatter( logging.Formatter( "%(asctime)s %(levelname)s %(funcName)s:%(lineno)d %(message)s"))
{"golden_diff": "diff --git a/libqtile/log_utils.py b/libqtile/log_utils.py\n--- a/libqtile/log_utils.py\n+++ b/libqtile/log_utils.py\n@@ -43,7 +43,6 @@\n def init_log(log_level=logging.WARNING, logger='qtile'):\n handler = logging.FileHandler(\n os.path.expanduser('~/.%s.log' % logger))\n- handler.setLevel(logging.WARNING)\n handler.setFormatter(\n logging.Formatter(\n \"%(asctime)s %(levelname)s %(funcName)s:%(lineno)d %(message)s\"))\n", "issue": "using Qtile.cmd_{info, debug} doesn't switch logging level\nIt seems that critical, warning, and error do all work, though.\n\n", "before_files": [{"content": "import logging\nimport os\nimport sys\nfrom logging import getLogger, StreamHandler\n\n\nclass ColorFormatter(logging.Formatter):\n \"\"\"Logging formatter adding console colors to the output.\n \"\"\"\n black, red, green, yellow, blue, magenta, cyan, white = range(8)\n colors = {\n 'WARNING': yellow,\n 'INFO': green,\n 'DEBUG': blue,\n 'CRITICAL': yellow,\n 'ERROR': red,\n 'RED': red,\n 'GREEN': green,\n 'YELLOW': yellow,\n 'BLUE': blue,\n 'MAGENTA': magenta,\n 'CYAN': cyan,\n 'WHITE': white}\n reset_seq = '\\033[0m'\n color_seq = '\\033[%dm'\n bold_seq = '\\033[1m'\n\n def format(self, record):\n \"\"\"Format the record with colors.\"\"\"\n color = self.color_seq % (30 + self.colors[record.levelname])\n message = logging.Formatter.format(self, record)\n message = message.replace('$RESET', self.reset_seq)\\\n .replace('$BOLD', self.bold_seq)\\\n .replace('$COLOR', color)\n for color, value in self.colors.items():\n message = message.replace(\n '$' + color, self.color_seq % (value + 30))\\\n .replace('$BG' + color, self.color_seq % (value + 40))\\\n .replace('$BG-' + color, self.color_seq % (value + 40))\n return message + self.reset_seq\n\n\ndef init_log(log_level=logging.WARNING, logger='qtile'):\n handler = logging.FileHandler(\n os.path.expanduser('~/.%s.log' % logger))\n handler.setLevel(logging.WARNING)\n handler.setFormatter(\n logging.Formatter(\n \"%(asctime)s %(levelname)s %(funcName)s:%(lineno)d %(message)s\"))\n log = getLogger(logger)\n log.setLevel(log_level)\n log.addHandler(handler)\n log.warning('Starting %s' % logger.title())\n handler = StreamHandler(sys.stderr)\n handler.setFormatter(\n ColorFormatter(\n '$RESET$COLOR%(asctime)s $BOLD$COLOR%(name)s'\n ' %(funcName)s:%(lineno)d $RESET %(message)s'))\n log.addHandler(handler)\n return log\n", "path": "libqtile/log_utils.py"}]}
1,174
119
gh_patches_debug_20514
rasdani/github-patches
git_diff
liqd__a4-product-149
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> tile images on partner page are not cut to same size ![screenshot from 2017-11-28 12-12-35](https://user-images.githubusercontent.com/8178179/33316792-7b8fc848-d435-11e7-8723-8849b5804bce.png) </issue> <code> [start of liqd_product/config/urls.py] 1 """Beteiligung.in URL Configuration.""" 2 3 from ckeditor_uploader import views as ck_views 4 from django.conf import settings 5 from django.conf.urls import include 6 from django.conf.urls import url 7 from django.contrib import admin 8 from django.views.decorators.cache import never_cache 9 from django.views.i18n import javascript_catalog 10 from rest_framework import routers 11 12 from adhocracy4.api import routers as a4routers 13 from adhocracy4.comments.api import CommentViewSet 14 from adhocracy4.follows.api import FollowViewSet 15 from adhocracy4.ratings.api import RatingViewSet 16 from adhocracy4.reports.api import ReportViewSet 17 from liqd_product.apps.partners.urlresolvers import partner_patterns 18 from liqd_product.apps.users.decorators import user_is_project_admin 19 from meinberlin.apps.documents.api import DocumentViewSet 20 from meinberlin.apps.polls.api import PollViewSet 21 from meinberlin.apps.polls.api import VoteViewSet 22 from meinberlin.apps.polls.routers import QuestionDefaultRouter 23 24 js_info_dict = { 25 'packages': ('adhocracy4.comments',), 26 } 27 28 router = routers.DefaultRouter() 29 router.register(r'follows', FollowViewSet, base_name='follows') 30 router.register(r'reports', ReportViewSet, base_name='reports') 31 router.register(r'polls', PollViewSet, base_name='polls') 32 33 module_router = a4routers.ModuleDefaultRouter() 34 # FIXME: rename to 'chapters' 35 module_router.register(r'documents', DocumentViewSet, base_name='chapters') 36 37 orga_router = a4routers.OrganisationDefaultRouter() 38 39 ct_router = a4routers.ContentTypeDefaultRouter() 40 ct_router.register(r'comments', CommentViewSet, base_name='comments') 41 ct_router.register(r'ratings', RatingViewSet, base_name='ratings') 42 43 question_router = QuestionDefaultRouter() 44 question_router.register(r'vote', VoteViewSet, base_name='vote') 45 46 47 urlpatterns = [ 48 # General platform urls 49 url(r'^django-admin/', include(admin.site.urls)), 50 url(r'^admin/', include('wagtail.wagtailadmin.urls')), 51 52 url(r'^accounts/', include('allauth.urls')), 53 url(r'^account/', include('liqd_product.apps.account.urls')), 54 url(r'^embed/', include('meinberlin.apps.embed.urls')), 55 url(r'^dashboard/', include('meinberlin.apps.dashboard2.urls')), 56 url(r'^profile/', include('liqd_product.apps.users.urls')), 57 58 # API urls 59 url(r'^api/', include(ct_router.urls)), 60 url(r'^api/', include(module_router.urls)), 61 url(r'^api/', include(orga_router.urls)), 62 url(r'^api/', include(question_router.urls)), 63 url(r'^api/', include(router.urls)), 64 65 url(r'^upload/', user_is_project_admin(ck_views.upload), 66 name='ckeditor_upload'), 67 url(r'^browse/', never_cache(user_is_project_admin(ck_views.browse)), 68 name='ckeditor_browse'), 69 70 url(r'^jsi18n/$', javascript_catalog, 71 js_info_dict, name='javascript-catalog'), 72 73 # Urls within the context of a partner 74 partner_patterns( 75 url(r'^modules/', include('adhocracy4.modules.urls')), 76 url(r'^projects/', include('adhocracy4.projects.urls')), 77 url(r'^offlineevents/', include('meinberlin.apps.offlineevents.urls', 78 namespace='meinberlin_offlineevents')), 79 url(r'^ideas/', include(r'meinberlin.apps.ideas.urls', 80 namespace='meinberlin_ideas')), 81 url(r'^mapideas/', include('meinberlin.apps.mapideas.urls', 82 namespace='meinberlin_mapideas')), 83 url(r'^text/', include('meinberlin.apps.documents.urls', 84 namespace='meinberlin_documents')), 85 ), 86 87 url(r'', include('liqd_product.apps.partners.urls')), 88 url(r'', include('wagtail.wagtailcore.urls')) 89 ] 90 91 92 if settings.DEBUG: 93 from django.conf.urls.static import static 94 from django.contrib.staticfiles.urls import staticfiles_urlpatterns 95 96 # Serve static and media locally 97 urlpatterns += staticfiles_urlpatterns() 98 urlpatterns += static(settings.MEDIA_URL, 99 document_root=settings.MEDIA_ROOT) 100 try: 101 import debug_toolbar 102 except ImportError: 103 pass 104 else: 105 urlpatterns = [ 106 url(r'^__debug__/', include(debug_toolbar.urls)), 107 ] + urlpatterns 108 [end of liqd_product/config/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/liqd_product/config/urls.py b/liqd_product/config/urls.py --- a/liqd_product/config/urls.py +++ b/liqd_product/config/urls.py @@ -16,6 +16,7 @@ from adhocracy4.reports.api import ReportViewSet from liqd_product.apps.partners.urlresolvers import partner_patterns from liqd_product.apps.users.decorators import user_is_project_admin +from meinberlin.apps.contrib import views as contrib_views from meinberlin.apps.documents.api import DocumentViewSet from meinberlin.apps.polls.api import PollViewSet from meinberlin.apps.polls.api import VoteViewSet @@ -67,6 +68,7 @@ url(r'^browse/', never_cache(user_is_project_admin(ck_views.browse)), name='ckeditor_browse'), + url(r'^components/$', contrib_views.ComponentLibraryView.as_view()), url(r'^jsi18n/$', javascript_catalog, js_info_dict, name='javascript-catalog'),
{"golden_diff": "diff --git a/liqd_product/config/urls.py b/liqd_product/config/urls.py\n--- a/liqd_product/config/urls.py\n+++ b/liqd_product/config/urls.py\n@@ -16,6 +16,7 @@\n from adhocracy4.reports.api import ReportViewSet\n from liqd_product.apps.partners.urlresolvers import partner_patterns\n from liqd_product.apps.users.decorators import user_is_project_admin\n+from meinberlin.apps.contrib import views as contrib_views\n from meinberlin.apps.documents.api import DocumentViewSet\n from meinberlin.apps.polls.api import PollViewSet\n from meinberlin.apps.polls.api import VoteViewSet\n@@ -67,6 +68,7 @@\n url(r'^browse/', never_cache(user_is_project_admin(ck_views.browse)),\n name='ckeditor_browse'),\n \n+ url(r'^components/$', contrib_views.ComponentLibraryView.as_view()),\n url(r'^jsi18n/$', javascript_catalog,\n js_info_dict, name='javascript-catalog'),\n", "issue": "tile images on partner page are not cut to same size\n![screenshot from 2017-11-28 12-12-35](https://user-images.githubusercontent.com/8178179/33316792-7b8fc848-d435-11e7-8723-8849b5804bce.png)\r\n\n", "before_files": [{"content": "\"\"\"Beteiligung.in URL Configuration.\"\"\"\n\nfrom ckeditor_uploader import views as ck_views\nfrom django.conf import settings\nfrom django.conf.urls import include\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.views.decorators.cache import never_cache\nfrom django.views.i18n import javascript_catalog\nfrom rest_framework import routers\n\nfrom adhocracy4.api import routers as a4routers\nfrom adhocracy4.comments.api import CommentViewSet\nfrom adhocracy4.follows.api import FollowViewSet\nfrom adhocracy4.ratings.api import RatingViewSet\nfrom adhocracy4.reports.api import ReportViewSet\nfrom liqd_product.apps.partners.urlresolvers import partner_patterns\nfrom liqd_product.apps.users.decorators import user_is_project_admin\nfrom meinberlin.apps.documents.api import DocumentViewSet\nfrom meinberlin.apps.polls.api import PollViewSet\nfrom meinberlin.apps.polls.api import VoteViewSet\nfrom meinberlin.apps.polls.routers import QuestionDefaultRouter\n\njs_info_dict = {\n 'packages': ('adhocracy4.comments',),\n}\n\nrouter = routers.DefaultRouter()\nrouter.register(r'follows', FollowViewSet, base_name='follows')\nrouter.register(r'reports', ReportViewSet, base_name='reports')\nrouter.register(r'polls', PollViewSet, base_name='polls')\n\nmodule_router = a4routers.ModuleDefaultRouter()\n# FIXME: rename to 'chapters'\nmodule_router.register(r'documents', DocumentViewSet, base_name='chapters')\n\norga_router = a4routers.OrganisationDefaultRouter()\n\nct_router = a4routers.ContentTypeDefaultRouter()\nct_router.register(r'comments', CommentViewSet, base_name='comments')\nct_router.register(r'ratings', RatingViewSet, base_name='ratings')\n\nquestion_router = QuestionDefaultRouter()\nquestion_router.register(r'vote', VoteViewSet, base_name='vote')\n\n\nurlpatterns = [\n # General platform urls\n url(r'^django-admin/', include(admin.site.urls)),\n url(r'^admin/', include('wagtail.wagtailadmin.urls')),\n\n url(r'^accounts/', include('allauth.urls')),\n url(r'^account/', include('liqd_product.apps.account.urls')),\n url(r'^embed/', include('meinberlin.apps.embed.urls')),\n url(r'^dashboard/', include('meinberlin.apps.dashboard2.urls')),\n url(r'^profile/', include('liqd_product.apps.users.urls')),\n\n # API urls\n url(r'^api/', include(ct_router.urls)),\n url(r'^api/', include(module_router.urls)),\n url(r'^api/', include(orga_router.urls)),\n url(r'^api/', include(question_router.urls)),\n url(r'^api/', include(router.urls)),\n\n url(r'^upload/', user_is_project_admin(ck_views.upload),\n name='ckeditor_upload'),\n url(r'^browse/', never_cache(user_is_project_admin(ck_views.browse)),\n name='ckeditor_browse'),\n\n url(r'^jsi18n/$', javascript_catalog,\n js_info_dict, name='javascript-catalog'),\n\n # Urls within the context of a partner\n partner_patterns(\n url(r'^modules/', include('adhocracy4.modules.urls')),\n url(r'^projects/', include('adhocracy4.projects.urls')),\n url(r'^offlineevents/', include('meinberlin.apps.offlineevents.urls',\n namespace='meinberlin_offlineevents')),\n url(r'^ideas/', include(r'meinberlin.apps.ideas.urls',\n namespace='meinberlin_ideas')),\n url(r'^mapideas/', include('meinberlin.apps.mapideas.urls',\n namespace='meinberlin_mapideas')),\n url(r'^text/', include('meinberlin.apps.documents.urls',\n namespace='meinberlin_documents')),\n ),\n\n url(r'', include('liqd_product.apps.partners.urls')),\n url(r'', include('wagtail.wagtailcore.urls'))\n]\n\n\nif settings.DEBUG:\n from django.conf.urls.static import static\n from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\n # Serve static and media locally\n urlpatterns += staticfiles_urlpatterns()\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n try:\n import debug_toolbar\n except ImportError:\n pass\n else:\n urlpatterns = [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n ] + urlpatterns\n", "path": "liqd_product/config/urls.py"}]}
1,790
217
gh_patches_debug_30361
rasdani/github-patches
git_diff
pytorch__ignite-1771
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Patch MNIST dataset downloading to fix CI Currently, there is an issue with downloading MNIST dataset using torchvision. Let's introduce the following patch to our CI to fix it: - https://github.com/pytorch/vision/issues/3500#issuecomment-790491487 Where to put that: - create new step with the patch before [here](https://github.com/pytorch/ignite/blob/700f0e1325efc5dc0dce88d26284e51bc2a7c87c/.github/workflows/unit-tests.yml#L106) - add patch [here](https://github.com/pytorch/ignite/blob/700f0e1325efc5dc0dce88d26284e51bc2a7c87c/.circleci/config.yml#L147). </issue> <code> [start of examples/mnist/mnist_patch.py] 1 """Patch to fix MNIST download issue as described here: 2 - https://github.com/pytorch/ignite/issues/1737 3 - https://github.com/pytorch/vision/issues/3500 4 """ 5 6 import os 7 import subprocess as sp 8 9 import torch 10 from torchvision.datasets.mnist import MNIST, read_image_file, read_label_file 11 from torchvision.datasets.utils import extract_archive 12 13 14 def patched_download(self): 15 """wget patched download method. 16 """ 17 if self._check_exists(): 18 return 19 20 os.makedirs(self.raw_folder, exist_ok=True) 21 os.makedirs(self.processed_folder, exist_ok=True) 22 23 # download files 24 for url, md5 in self.resources: 25 filename = url.rpartition("/")[2] 26 download_root = os.path.expanduser(self.raw_folder) 27 extract_root = None 28 remove_finished = False 29 30 if extract_root is None: 31 extract_root = download_root 32 if not filename: 33 filename = os.path.basename(url) 34 35 # Use wget to download archives 36 sp.run(["wget", url, "-P", download_root]) 37 38 archive = os.path.join(download_root, filename) 39 print("Extracting {} to {}".format(archive, extract_root)) 40 extract_archive(archive, extract_root, remove_finished) 41 42 # process and save as torch files 43 print("Processing...") 44 45 training_set = ( 46 read_image_file(os.path.join(self.raw_folder, "train-images-idx3-ubyte")), 47 read_label_file(os.path.join(self.raw_folder, "train-labels-idx1-ubyte")), 48 ) 49 test_set = ( 50 read_image_file(os.path.join(self.raw_folder, "t10k-images-idx3-ubyte")), 51 read_label_file(os.path.join(self.raw_folder, "t10k-labels-idx1-ubyte")), 52 ) 53 with open(os.path.join(self.processed_folder, self.training_file), "wb") as f: 54 torch.save(training_set, f) 55 with open(os.path.join(self.processed_folder, self.test_file), "wb") as f: 56 torch.save(test_set, f) 57 58 print("Done!") 59 60 61 def main(): 62 # Patch download method 63 MNIST.download = patched_download 64 # Download MNIST 65 MNIST(".", download=True) 66 67 68 if __name__ == "__main__": 69 main() 70 [end of examples/mnist/mnist_patch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/mnist/mnist_patch.py b/examples/mnist/mnist_patch.py deleted file mode 100644 --- a/examples/mnist/mnist_patch.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Patch to fix MNIST download issue as described here: -- https://github.com/pytorch/ignite/issues/1737 -- https://github.com/pytorch/vision/issues/3500 -""" - -import os -import subprocess as sp - -import torch -from torchvision.datasets.mnist import MNIST, read_image_file, read_label_file -from torchvision.datasets.utils import extract_archive - - -def patched_download(self): - """wget patched download method. - """ - if self._check_exists(): - return - - os.makedirs(self.raw_folder, exist_ok=True) - os.makedirs(self.processed_folder, exist_ok=True) - - # download files - for url, md5 in self.resources: - filename = url.rpartition("/")[2] - download_root = os.path.expanduser(self.raw_folder) - extract_root = None - remove_finished = False - - if extract_root is None: - extract_root = download_root - if not filename: - filename = os.path.basename(url) - - # Use wget to download archives - sp.run(["wget", url, "-P", download_root]) - - archive = os.path.join(download_root, filename) - print("Extracting {} to {}".format(archive, extract_root)) - extract_archive(archive, extract_root, remove_finished) - - # process and save as torch files - print("Processing...") - - training_set = ( - read_image_file(os.path.join(self.raw_folder, "train-images-idx3-ubyte")), - read_label_file(os.path.join(self.raw_folder, "train-labels-idx1-ubyte")), - ) - test_set = ( - read_image_file(os.path.join(self.raw_folder, "t10k-images-idx3-ubyte")), - read_label_file(os.path.join(self.raw_folder, "t10k-labels-idx1-ubyte")), - ) - with open(os.path.join(self.processed_folder, self.training_file), "wb") as f: - torch.save(training_set, f) - with open(os.path.join(self.processed_folder, self.test_file), "wb") as f: - torch.save(test_set, f) - - print("Done!") - - -def main(): - # Patch download method - MNIST.download = patched_download - # Download MNIST - MNIST(".", download=True) - - -if __name__ == "__main__": - main()
{"golden_diff": "diff --git a/examples/mnist/mnist_patch.py b/examples/mnist/mnist_patch.py\ndeleted file mode 100644\n--- a/examples/mnist/mnist_patch.py\n+++ /dev/null\n@@ -1,69 +0,0 @@\n-\"\"\"Patch to fix MNIST download issue as described here:\n-- https://github.com/pytorch/ignite/issues/1737\n-- https://github.com/pytorch/vision/issues/3500\n-\"\"\"\n-\n-import os\n-import subprocess as sp\n-\n-import torch\n-from torchvision.datasets.mnist import MNIST, read_image_file, read_label_file\n-from torchvision.datasets.utils import extract_archive\n-\n-\n-def patched_download(self):\n- \"\"\"wget patched download method.\n- \"\"\"\n- if self._check_exists():\n- return\n-\n- os.makedirs(self.raw_folder, exist_ok=True)\n- os.makedirs(self.processed_folder, exist_ok=True)\n-\n- # download files\n- for url, md5 in self.resources:\n- filename = url.rpartition(\"/\")[2]\n- download_root = os.path.expanduser(self.raw_folder)\n- extract_root = None\n- remove_finished = False\n-\n- if extract_root is None:\n- extract_root = download_root\n- if not filename:\n- filename = os.path.basename(url)\n-\n- # Use wget to download archives\n- sp.run([\"wget\", url, \"-P\", download_root])\n-\n- archive = os.path.join(download_root, filename)\n- print(\"Extracting {} to {}\".format(archive, extract_root))\n- extract_archive(archive, extract_root, remove_finished)\n-\n- # process and save as torch files\n- print(\"Processing...\")\n-\n- training_set = (\n- read_image_file(os.path.join(self.raw_folder, \"train-images-idx3-ubyte\")),\n- read_label_file(os.path.join(self.raw_folder, \"train-labels-idx1-ubyte\")),\n- )\n- test_set = (\n- read_image_file(os.path.join(self.raw_folder, \"t10k-images-idx3-ubyte\")),\n- read_label_file(os.path.join(self.raw_folder, \"t10k-labels-idx1-ubyte\")),\n- )\n- with open(os.path.join(self.processed_folder, self.training_file), \"wb\") as f:\n- torch.save(training_set, f)\n- with open(os.path.join(self.processed_folder, self.test_file), \"wb\") as f:\n- torch.save(test_set, f)\n-\n- print(\"Done!\")\n-\n-\n-def main():\n- # Patch download method\n- MNIST.download = patched_download\n- # Download MNIST\n- MNIST(\".\", download=True)\n-\n-\n-if __name__ == \"__main__\":\n- main()\n", "issue": "Patch MNIST dataset downloading to fix CI\nCurrently, there is an issue with downloading MNIST dataset using torchvision. Let's introduce the following patch to our CI to fix it:\r\n- https://github.com/pytorch/vision/issues/3500#issuecomment-790491487\r\n\r\nWhere to put that:\r\n- create new step with the patch before [here](https://github.com/pytorch/ignite/blob/700f0e1325efc5dc0dce88d26284e51bc2a7c87c/.github/workflows/unit-tests.yml#L106)\r\n- add patch [here](https://github.com/pytorch/ignite/blob/700f0e1325efc5dc0dce88d26284e51bc2a7c87c/.circleci/config.yml#L147).\n", "before_files": [{"content": "\"\"\"Patch to fix MNIST download issue as described here:\n- https://github.com/pytorch/ignite/issues/1737\n- https://github.com/pytorch/vision/issues/3500\n\"\"\"\n\nimport os\nimport subprocess as sp\n\nimport torch\nfrom torchvision.datasets.mnist import MNIST, read_image_file, read_label_file\nfrom torchvision.datasets.utils import extract_archive\n\n\ndef patched_download(self):\n \"\"\"wget patched download method.\n \"\"\"\n if self._check_exists():\n return\n\n os.makedirs(self.raw_folder, exist_ok=True)\n os.makedirs(self.processed_folder, exist_ok=True)\n\n # download files\n for url, md5 in self.resources:\n filename = url.rpartition(\"/\")[2]\n download_root = os.path.expanduser(self.raw_folder)\n extract_root = None\n remove_finished = False\n\n if extract_root is None:\n extract_root = download_root\n if not filename:\n filename = os.path.basename(url)\n\n # Use wget to download archives\n sp.run([\"wget\", url, \"-P\", download_root])\n\n archive = os.path.join(download_root, filename)\n print(\"Extracting {} to {}\".format(archive, extract_root))\n extract_archive(archive, extract_root, remove_finished)\n\n # process and save as torch files\n print(\"Processing...\")\n\n training_set = (\n read_image_file(os.path.join(self.raw_folder, \"train-images-idx3-ubyte\")),\n read_label_file(os.path.join(self.raw_folder, \"train-labels-idx1-ubyte\")),\n )\n test_set = (\n read_image_file(os.path.join(self.raw_folder, \"t10k-images-idx3-ubyte\")),\n read_label_file(os.path.join(self.raw_folder, \"t10k-labels-idx1-ubyte\")),\n )\n with open(os.path.join(self.processed_folder, self.training_file), \"wb\") as f:\n torch.save(training_set, f)\n with open(os.path.join(self.processed_folder, self.test_file), \"wb\") as f:\n torch.save(test_set, f)\n\n print(\"Done!\")\n\n\ndef main():\n # Patch download method\n MNIST.download = patched_download\n # Download MNIST\n MNIST(\".\", download=True)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/mnist/mnist_patch.py"}]}
1,374
601
gh_patches_debug_15872
rasdani/github-patches
git_diff
azavea__raster-vision-427
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fill out model-defaults for tensorflow-od Currently we only have 1 model in model_defaults.json: https://github.com/azavea/raster-vision/blob/feature/api-refactor/src/rastervision/backend/model_defaults.json#L2 We need to fill it out to include each of these configurations, matched up with each of the pretrained weights from the model zoo: #### Configs https://github.com/azavea/models/tree/master/research/object_detection/samples/configs #### Weights https://github.com/azavea/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md </issue> <code> [start of src/rastervision/backend/api.py] 1 # flake8: noqa 2 3 # Registry keys 4 5 BACKEND = 'BACKEND' 6 7 ## Backend Keys 8 9 TF_OBJECT_DETECTION = 'TF_OBJECT_DETECTION' 10 KERAS_CLASSIFICATION = 'KERAS_CLASSIFICATION' 11 12 ## Model keys 13 14 ### TF Object Detection 15 SSD_MOBILENET_V1_COCO = 'SSD_MOBILENET_V1_COCO' 16 17 ## Keras Classificaiton 18 RESNET50_IMAGENET = 'RESNET50_IMAGENET' 19 20 from .backend_config import BackendConfig 21 [end of src/rastervision/backend/api.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/rastervision/backend/api.py b/src/rastervision/backend/api.py --- a/src/rastervision/backend/api.py +++ b/src/rastervision/backend/api.py @@ -13,6 +13,20 @@ ### TF Object Detection SSD_MOBILENET_V1_COCO = 'SSD_MOBILENET_V1_COCO' +SSD_MOBILENET_V2_COCO = 'SSD_MOBILENET_V2_COCO' +SSDLITE_MOBILENET_V2_COCO = 'SSDLITE_MOBILENET_V2_COCO' +SSD_INCEPTION_V2_COCO = 'SSD_INCEPTION_V2_COCO' +FASTER_RCNN_INCEPTION_V2_COCO = 'FASTER_RCNN_INCEPTION_V2_COCO' +FASTER_RCNN_RESNET50_COCO = 'FASTER_RCNN_RESNET50_COCO' +RFCN_RESNET101_COCO = 'RFCN_RESNET101_COCO' +FASTER_RCNN_RESNET101_COCO = 'FASTER_RCNN_RESNET101_COCO' +FASTER_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO = \ +'FASTER_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO' +MASK_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO = \ +'MASK_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO' +MASK_RCNN_INCEPTION_V2_COCO = 'MASK_RCNN_INCEPTION_V2_COCO' +MASK_RCNN_RESNET101_ATROUS_COCO = 'MASK_RCNN_RESNET101_ATROUS_COCO' +MASK_RCNN_RESNET50_ATROUS_COCO = 'MASK_RCNN_RESNET50_ATROUS_COCO' ## Keras Classificaiton RESNET50_IMAGENET = 'RESNET50_IMAGENET'
{"golden_diff": "diff --git a/src/rastervision/backend/api.py b/src/rastervision/backend/api.py\n--- a/src/rastervision/backend/api.py\n+++ b/src/rastervision/backend/api.py\n@@ -13,6 +13,20 @@\n \n ### TF Object Detection\n SSD_MOBILENET_V1_COCO = 'SSD_MOBILENET_V1_COCO'\n+SSD_MOBILENET_V2_COCO = 'SSD_MOBILENET_V2_COCO'\n+SSDLITE_MOBILENET_V2_COCO = 'SSDLITE_MOBILENET_V2_COCO'\n+SSD_INCEPTION_V2_COCO = 'SSD_INCEPTION_V2_COCO'\n+FASTER_RCNN_INCEPTION_V2_COCO = 'FASTER_RCNN_INCEPTION_V2_COCO'\n+FASTER_RCNN_RESNET50_COCO = 'FASTER_RCNN_RESNET50_COCO'\n+RFCN_RESNET101_COCO = 'RFCN_RESNET101_COCO'\n+FASTER_RCNN_RESNET101_COCO = 'FASTER_RCNN_RESNET101_COCO'\n+FASTER_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO = \\\n+'FASTER_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO'\n+MASK_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO = \\\n+'MASK_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO'\n+MASK_RCNN_INCEPTION_V2_COCO = 'MASK_RCNN_INCEPTION_V2_COCO'\n+MASK_RCNN_RESNET101_ATROUS_COCO = 'MASK_RCNN_RESNET101_ATROUS_COCO'\n+MASK_RCNN_RESNET50_ATROUS_COCO = 'MASK_RCNN_RESNET50_ATROUS_COCO'\n \n ## Keras Classificaiton\n RESNET50_IMAGENET = 'RESNET50_IMAGENET'\n", "issue": "Fill out model-defaults for tensorflow-od\nCurrently we only have 1 model in model_defaults.json: \r\n\r\nhttps://github.com/azavea/raster-vision/blob/feature/api-refactor/src/rastervision/backend/model_defaults.json#L2\r\n\r\nWe need to fill it out to include each of these configurations, matched up with each of the pretrained weights from the model zoo:\r\n\r\n#### Configs\r\nhttps://github.com/azavea/models/tree/master/research/object_detection/samples/configs\r\n\r\n#### Weights\r\nhttps://github.com/azavea/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md\n", "before_files": [{"content": "# flake8: noqa\n\n# Registry keys\n\nBACKEND = 'BACKEND'\n\n## Backend Keys\n\nTF_OBJECT_DETECTION = 'TF_OBJECT_DETECTION'\nKERAS_CLASSIFICATION = 'KERAS_CLASSIFICATION'\n\n## Model keys\n\n### TF Object Detection\nSSD_MOBILENET_V1_COCO = 'SSD_MOBILENET_V1_COCO'\n\n## Keras Classificaiton\nRESNET50_IMAGENET = 'RESNET50_IMAGENET'\n\nfrom .backend_config import BackendConfig\n", "path": "src/rastervision/backend/api.py"}]}
825
442
gh_patches_debug_37445
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-4815
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests </issue> <code> [start of op_builder/gptq.py] 1 import os 2 import torch 3 import re 4 5 from .builder import Builder 6 from .utils import append_nvcc_threads, get_cuda_cc_flag 7 8 class GPTQBuilder(Builder): 9 10 NAME = "cu_gptq" 11 PREBUILT_IMPORT_PATH = "colossalai._C.cu_gptq" 12 13 def __init__(self): 14 super().__init__(name=GPTQBuilder.NAME, 15 prebuilt_import_path=GPTQBuilder.PREBUILT_IMPORT_PATH) 16 17 18 def include_dirs(self): 19 ret = [self.csrc_abs_path("gptq"), self.get_cuda_home_include()] 20 return ret 21 22 def sources_files(self): 23 ret = [ 24 self.csrc_abs_path(fname) for fname in [ 25 'gptq/linear_gptq.cpp', 26 'gptq/column_remap.cu', 27 'gptq/cuda_buffers.cu', 28 'gptq/q4_matmul.cu', 29 'gptq/q4_matrix.cu' 30 ] 31 ] 32 return ret 33 34 def cxx_flags(self): 35 return ['-O3'] + self.version_dependent_macros 36 37 def nvcc_flags(self): 38 extra_cuda_flags = ['-v', 39 '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', 40 '-U__CUDA_NO_HALF2_OPERATORS__', '-DTHRUST_IGNORE_CUB_VERSION_CHECK', "-lcublas", "-std=c++17" 41 ] 42 43 44 for arch in torch.cuda.get_arch_list(): 45 res = re.search(r'sm_(\d+)', arch) 46 if res: 47 arch_cap = res[1] 48 if int(arch_cap) >= 80: 49 extra_cuda_flags.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}']) 50 51 ret = ['-O3', '--use_fast_math'] + self.version_dependent_macros + extra_cuda_flags 52 return append_nvcc_threads(ret) [end of op_builder/gptq.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/op_builder/gptq.py b/op_builder/gptq.py --- a/op_builder/gptq.py +++ b/op_builder/gptq.py @@ -1,19 +1,17 @@ -import os -import torch import re +import torch + from .builder import Builder -from .utils import append_nvcc_threads, get_cuda_cc_flag +from .utils import append_nvcc_threads -class GPTQBuilder(Builder): +class GPTQBuilder(Builder): NAME = "cu_gptq" PREBUILT_IMPORT_PATH = "colossalai._C.cu_gptq" def __init__(self): - super().__init__(name=GPTQBuilder.NAME, - prebuilt_import_path=GPTQBuilder.PREBUILT_IMPORT_PATH) - + super().__init__(name=GPTQBuilder.NAME, prebuilt_import_path=GPTQBuilder.PREBUILT_IMPORT_PATH) def include_dirs(self): ret = [self.csrc_abs_path("gptq"), self.get_cuda_home_include()] @@ -21,32 +19,38 @@ def sources_files(self): ret = [ - self.csrc_abs_path(fname) for fname in [ - 'gptq/linear_gptq.cpp', - 'gptq/column_remap.cu', - 'gptq/cuda_buffers.cu', - 'gptq/q4_matmul.cu', - 'gptq/q4_matrix.cu' + self.csrc_abs_path(fname) + for fname in [ + "gptq/linear_gptq.cpp", + "gptq/column_remap.cu", + "gptq/cuda_buffers.cu", + "gptq/q4_matmul.cu", + "gptq/q4_matrix.cu", ] ] return ret def cxx_flags(self): - return ['-O3'] + self.version_dependent_macros + return ["-O3"] + self.version_dependent_macros def nvcc_flags(self): - extra_cuda_flags = ['-v', - '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', - '-U__CUDA_NO_HALF2_OPERATORS__', '-DTHRUST_IGNORE_CUB_VERSION_CHECK', "-lcublas", "-std=c++17" + extra_cuda_flags = [ + "-v", + "-std=c++14", + "-U__CUDA_NO_HALF_OPERATORS__", + "-U__CUDA_NO_HALF_CONVERSIONS__", + "-U__CUDA_NO_HALF2_OPERATORS__", + "-DTHRUST_IGNORE_CUB_VERSION_CHECK", + "-lcublas", + "-std=c++17", ] - for arch in torch.cuda.get_arch_list(): - res = re.search(r'sm_(\d+)', arch) + res = re.search(r"sm_(\d+)", arch) if res: arch_cap = res[1] if int(arch_cap) >= 80: - extra_cuda_flags.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}']) + extra_cuda_flags.extend(["-gencode", f"arch=compute_{arch_cap},code={arch}"]) - ret = ['-O3', '--use_fast_math'] + self.version_dependent_macros + extra_cuda_flags - return append_nvcc_threads(ret) \ No newline at end of file + ret = ["-O3", "--use_fast_math"] + self.version_dependent_macros + extra_cuda_flags + return append_nvcc_threads(ret)
{"golden_diff": "diff --git a/op_builder/gptq.py b/op_builder/gptq.py\n--- a/op_builder/gptq.py\n+++ b/op_builder/gptq.py\n@@ -1,19 +1,17 @@\n-import os\n-import torch\n import re\n \n+import torch\n+\n from .builder import Builder\n-from .utils import append_nvcc_threads, get_cuda_cc_flag\n+from .utils import append_nvcc_threads\n \n-class GPTQBuilder(Builder):\n \n+class GPTQBuilder(Builder):\n NAME = \"cu_gptq\"\n PREBUILT_IMPORT_PATH = \"colossalai._C.cu_gptq\"\n \n def __init__(self):\n- super().__init__(name=GPTQBuilder.NAME,\n- prebuilt_import_path=GPTQBuilder.PREBUILT_IMPORT_PATH)\n-\n+ super().__init__(name=GPTQBuilder.NAME, prebuilt_import_path=GPTQBuilder.PREBUILT_IMPORT_PATH)\n \n def include_dirs(self):\n ret = [self.csrc_abs_path(\"gptq\"), self.get_cuda_home_include()]\n@@ -21,32 +19,38 @@\n \n def sources_files(self):\n ret = [\n- self.csrc_abs_path(fname) for fname in [\n- 'gptq/linear_gptq.cpp',\n- 'gptq/column_remap.cu',\n- 'gptq/cuda_buffers.cu',\n- 'gptq/q4_matmul.cu',\n- 'gptq/q4_matrix.cu'\n+ self.csrc_abs_path(fname)\n+ for fname in [\n+ \"gptq/linear_gptq.cpp\",\n+ \"gptq/column_remap.cu\",\n+ \"gptq/cuda_buffers.cu\",\n+ \"gptq/q4_matmul.cu\",\n+ \"gptq/q4_matrix.cu\",\n ]\n ]\n return ret\n \n def cxx_flags(self):\n- return ['-O3'] + self.version_dependent_macros\n+ return [\"-O3\"] + self.version_dependent_macros\n \n def nvcc_flags(self):\n- extra_cuda_flags = ['-v',\n- '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__',\n- '-U__CUDA_NO_HALF2_OPERATORS__', '-DTHRUST_IGNORE_CUB_VERSION_CHECK', \"-lcublas\", \"-std=c++17\"\n+ extra_cuda_flags = [\n+ \"-v\",\n+ \"-std=c++14\",\n+ \"-U__CUDA_NO_HALF_OPERATORS__\",\n+ \"-U__CUDA_NO_HALF_CONVERSIONS__\",\n+ \"-U__CUDA_NO_HALF2_OPERATORS__\",\n+ \"-DTHRUST_IGNORE_CUB_VERSION_CHECK\",\n+ \"-lcublas\",\n+ \"-std=c++17\",\n ]\n \n-\n for arch in torch.cuda.get_arch_list():\n- res = re.search(r'sm_(\\d+)', arch)\n+ res = re.search(r\"sm_(\\d+)\", arch)\n if res:\n arch_cap = res[1]\n if int(arch_cap) >= 80:\n- extra_cuda_flags.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}'])\n+ extra_cuda_flags.extend([\"-gencode\", f\"arch=compute_{arch_cap},code={arch}\"])\n \n- ret = ['-O3', '--use_fast_math'] + self.version_dependent_macros + extra_cuda_flags\n- return append_nvcc_threads(ret)\n\\ No newline at end of file\n+ ret = [\"-O3\", \"--use_fast_math\"] + self.version_dependent_macros + extra_cuda_flags\n+ return append_nvcc_threads(ret)\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import os\nimport torch\nimport re\n\nfrom .builder import Builder\nfrom .utils import append_nvcc_threads, get_cuda_cc_flag\n\nclass GPTQBuilder(Builder):\n\n NAME = \"cu_gptq\"\n PREBUILT_IMPORT_PATH = \"colossalai._C.cu_gptq\"\n\n def __init__(self):\n super().__init__(name=GPTQBuilder.NAME,\n prebuilt_import_path=GPTQBuilder.PREBUILT_IMPORT_PATH)\n\n\n def include_dirs(self):\n ret = [self.csrc_abs_path(\"gptq\"), self.get_cuda_home_include()]\n return ret\n\n def sources_files(self):\n ret = [\n self.csrc_abs_path(fname) for fname in [\n 'gptq/linear_gptq.cpp',\n 'gptq/column_remap.cu',\n 'gptq/cuda_buffers.cu',\n 'gptq/q4_matmul.cu',\n 'gptq/q4_matrix.cu'\n ]\n ]\n return ret\n\n def cxx_flags(self):\n return ['-O3'] + self.version_dependent_macros\n\n def nvcc_flags(self):\n extra_cuda_flags = ['-v',\n '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__',\n '-U__CUDA_NO_HALF2_OPERATORS__', '-DTHRUST_IGNORE_CUB_VERSION_CHECK', \"-lcublas\", \"-std=c++17\"\n ]\n\n\n for arch in torch.cuda.get_arch_list():\n res = re.search(r'sm_(\\d+)', arch)\n if res:\n arch_cap = res[1]\n if int(arch_cap) >= 80:\n extra_cuda_flags.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}'])\n\n ret = ['-O3', '--use_fast_math'] + self.version_dependent_macros + extra_cuda_flags\n return append_nvcc_threads(ret)", "path": "op_builder/gptq.py"}]}
1,087
816
gh_patches_debug_35667
rasdani/github-patches
git_diff
scrapy__scrapy-4799
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Usage of HttpCompressionMiddleware needs to be reflected in Scrapy stats ## Summary Usage of `HttpCompressionMiddleware` needs to be relfected in Scrapy stats. ## Motivation In order to estimate scrapy memory usage efficiency and prevent.. memory leaks like [this](https://stackoverflow.com/q/63936759/10884791). I will need to know: 1. number of request/response objects that can be active (can be achieved by using [`trackref`](https://docs.scrapy.org/en/latest/topics/leaks.html#debugging-memory-leaks-with-trackref) ) 2. size of memory required to store that number of request/response objects. A lot of websites use compression to reduce traffic. In this case I would like to calculate average size of **decompressed** responses to estimate p.2. Decompression process means that at some point application will require to allocate memory to store both compressed and decompressed response body and I will need to know this sizes to have more complete vision of scrapy memory usage. Also size of decompressed body will be several times more than size of compressed response and it will affect scrapy memory usage. ## Describe alternatives you've considered The easiest one - is to change priority of `DownloaderStats` middleware and check difference in `downloader/response_bytes` stats parameter. ``` custom_settings = {"DOWNLOAD_DELAY":1, "DOWNLOADER_MIDDLEWARES":{ 'scrapy.downloadermiddlewares.stats.DownloaderStats':50 } ``` Stats from quotes.toscrape.com spider (it uses `gzip` compression) with default settings: ``` {'downloader/request_bytes': 2642, 'downloader/request_count': 10, 'downloader/request_method_count/GET': 10, 'downloader/response_bytes': 24534, ``` And with changed priority of `DownloaderStats` middleware: ``` {'downloader/request_bytes': 912, # size reduced as it didn't count size of request headers populated by downloader middlewares 'downloader/request_count': 10, 'downloader/request_method_count/GET': 10, 'downloader/response_bytes': 110191, # it counted size of decompressed data ``` Average size of compressed response (by default) - 2453 bytes. Average size of decompressed response - 11019 bytes (~4.5 times more). ## Additional context Potential solution is to add something like this: ` self.stats.inc_value('decompressed_bytes', spider=spider)` into `process_response` method of `HttpCompressionMiddleware` </issue> <code> [start of scrapy/downloadermiddlewares/httpcompression.py] 1 import io 2 import zlib 3 4 from scrapy.utils.gz import gunzip 5 from scrapy.http import Response, TextResponse 6 from scrapy.responsetypes import responsetypes 7 from scrapy.exceptions import NotConfigured 8 9 10 ACCEPTED_ENCODINGS = [b'gzip', b'deflate'] 11 12 try: 13 import brotli 14 ACCEPTED_ENCODINGS.append(b'br') 15 except ImportError: 16 pass 17 18 try: 19 import zstandard 20 ACCEPTED_ENCODINGS.append(b'zstd') 21 except ImportError: 22 pass 23 24 25 class HttpCompressionMiddleware: 26 """This middleware allows compressed (gzip, deflate) traffic to be 27 sent/received from web sites""" 28 @classmethod 29 def from_crawler(cls, crawler): 30 if not crawler.settings.getbool('COMPRESSION_ENABLED'): 31 raise NotConfigured 32 return cls() 33 34 def process_request(self, request, spider): 35 request.headers.setdefault('Accept-Encoding', 36 b", ".join(ACCEPTED_ENCODINGS)) 37 38 def process_response(self, request, response, spider): 39 40 if request.method == 'HEAD': 41 return response 42 if isinstance(response, Response): 43 content_encoding = response.headers.getlist('Content-Encoding') 44 if content_encoding: 45 encoding = content_encoding.pop() 46 decoded_body = self._decode(response.body, encoding.lower()) 47 respcls = responsetypes.from_args( 48 headers=response.headers, url=response.url, body=decoded_body 49 ) 50 kwargs = dict(cls=respcls, body=decoded_body) 51 if issubclass(respcls, TextResponse): 52 # force recalculating the encoding until we make sure the 53 # responsetypes guessing is reliable 54 kwargs['encoding'] = None 55 response = response.replace(**kwargs) 56 if not content_encoding: 57 del response.headers['Content-Encoding'] 58 59 return response 60 61 def _decode(self, body, encoding): 62 if encoding == b'gzip' or encoding == b'x-gzip': 63 body = gunzip(body) 64 65 if encoding == b'deflate': 66 try: 67 body = zlib.decompress(body) 68 except zlib.error: 69 # ugly hack to work with raw deflate content that may 70 # be sent by microsoft servers. For more information, see: 71 # http://carsten.codimi.de/gzip.yaws/ 72 # http://www.port80software.com/200ok/archive/2005/10/31/868.aspx 73 # http://www.gzip.org/zlib/zlib_faq.html#faq38 74 body = zlib.decompress(body, -15) 75 if encoding == b'br' and b'br' in ACCEPTED_ENCODINGS: 76 body = brotli.decompress(body) 77 if encoding == b'zstd' and b'zstd' in ACCEPTED_ENCODINGS: 78 # Using its streaming API since its simple API could handle only cases 79 # where there is content size data embedded in the frame 80 reader = zstandard.ZstdDecompressor().stream_reader(io.BytesIO(body)) 81 body = reader.read() 82 return body 83 [end of scrapy/downloadermiddlewares/httpcompression.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/downloadermiddlewares/httpcompression.py b/scrapy/downloadermiddlewares/httpcompression.py --- a/scrapy/downloadermiddlewares/httpcompression.py +++ b/scrapy/downloadermiddlewares/httpcompression.py @@ -1,10 +1,12 @@ import io +import warnings import zlib -from scrapy.utils.gz import gunzip +from scrapy.exceptions import NotConfigured from scrapy.http import Response, TextResponse from scrapy.responsetypes import responsetypes -from scrapy.exceptions import NotConfigured +from scrapy.utils.deprecate import ScrapyDeprecationWarning +from scrapy.utils.gz import gunzip ACCEPTED_ENCODINGS = [b'gzip', b'deflate'] @@ -25,11 +27,25 @@ class HttpCompressionMiddleware: """This middleware allows compressed (gzip, deflate) traffic to be sent/received from web sites""" + def __init__(self, stats=None): + self.stats = stats + @classmethod def from_crawler(cls, crawler): if not crawler.settings.getbool('COMPRESSION_ENABLED'): raise NotConfigured - return cls() + try: + return cls(stats=crawler.stats) + except TypeError: + warnings.warn( + "HttpCompressionMiddleware subclasses must either modify " + "their '__init__' method to support a 'stats' parameter or " + "reimplement the 'from_crawler' method.", + ScrapyDeprecationWarning, + ) + result = cls() + result.stats = crawler.stats + return result def process_request(self, request, spider): request.headers.setdefault('Accept-Encoding', @@ -44,6 +60,9 @@ if content_encoding: encoding = content_encoding.pop() decoded_body = self._decode(response.body, encoding.lower()) + if self.stats: + self.stats.inc_value('httpcompression/response_bytes', len(decoded_body), spider=spider) + self.stats.inc_value('httpcompression/response_count', spider=spider) respcls = responsetypes.from_args( headers=response.headers, url=response.url, body=decoded_body )
{"golden_diff": "diff --git a/scrapy/downloadermiddlewares/httpcompression.py b/scrapy/downloadermiddlewares/httpcompression.py\n--- a/scrapy/downloadermiddlewares/httpcompression.py\n+++ b/scrapy/downloadermiddlewares/httpcompression.py\n@@ -1,10 +1,12 @@\n import io\n+import warnings\n import zlib\n \n-from scrapy.utils.gz import gunzip\n+from scrapy.exceptions import NotConfigured\n from scrapy.http import Response, TextResponse\n from scrapy.responsetypes import responsetypes\n-from scrapy.exceptions import NotConfigured\n+from scrapy.utils.deprecate import ScrapyDeprecationWarning\n+from scrapy.utils.gz import gunzip\n \n \n ACCEPTED_ENCODINGS = [b'gzip', b'deflate']\n@@ -25,11 +27,25 @@\n class HttpCompressionMiddleware:\n \"\"\"This middleware allows compressed (gzip, deflate) traffic to be\n sent/received from web sites\"\"\"\n+ def __init__(self, stats=None):\n+ self.stats = stats\n+\n @classmethod\n def from_crawler(cls, crawler):\n if not crawler.settings.getbool('COMPRESSION_ENABLED'):\n raise NotConfigured\n- return cls()\n+ try:\n+ return cls(stats=crawler.stats)\n+ except TypeError:\n+ warnings.warn(\n+ \"HttpCompressionMiddleware subclasses must either modify \"\n+ \"their '__init__' method to support a 'stats' parameter or \"\n+ \"reimplement the 'from_crawler' method.\",\n+ ScrapyDeprecationWarning,\n+ )\n+ result = cls()\n+ result.stats = crawler.stats\n+ return result\n \n def process_request(self, request, spider):\n request.headers.setdefault('Accept-Encoding',\n@@ -44,6 +60,9 @@\n if content_encoding:\n encoding = content_encoding.pop()\n decoded_body = self._decode(response.body, encoding.lower())\n+ if self.stats:\n+ self.stats.inc_value('httpcompression/response_bytes', len(decoded_body), spider=spider)\n+ self.stats.inc_value('httpcompression/response_count', spider=spider)\n respcls = responsetypes.from_args(\n headers=response.headers, url=response.url, body=decoded_body\n )\n", "issue": "Usage of HttpCompressionMiddleware needs to be reflected in Scrapy stats\n## Summary\r\nUsage of `HttpCompressionMiddleware` needs to be relfected in Scrapy stats.\r\n## Motivation\r\nIn order to estimate scrapy memory usage efficiency and prevent.. memory leaks like [this](https://stackoverflow.com/q/63936759/10884791).\r\nI will need to know:\r\n1. number of request/response objects that can be active (can be achieved by using [`trackref`](https://docs.scrapy.org/en/latest/topics/leaks.html#debugging-memory-leaks-with-trackref) )\r\n2. size of memory required to store that number of request/response objects. \r\n\r\nA lot of websites use compression to reduce traffic. In this case I would like to calculate average size of **decompressed** responses to estimate p.2.\r\n\r\nDecompression process means that at some point application will require to allocate memory to store both compressed and decompressed response body and I will need to know this sizes to have more complete vision of scrapy memory usage.\r\n\r\nAlso size of decompressed body will be several times more than size of compressed response and it will affect scrapy memory usage.\r\n\r\n## Describe alternatives you've considered\r\nThe easiest one - is to change priority of `DownloaderStats` middleware and check difference in `downloader/response_bytes` stats parameter.\r\n```\r\n custom_settings = {\"DOWNLOAD_DELAY\":1,\r\n \"DOWNLOADER_MIDDLEWARES\":{\r\n 'scrapy.downloadermiddlewares.stats.DownloaderStats':50\r\n }\r\n```\r\nStats from quotes.toscrape.com spider (it uses `gzip` compression) with default settings:\r\n\r\n```\r\n{'downloader/request_bytes': 2642,\r\n 'downloader/request_count': 10,\r\n 'downloader/request_method_count/GET': 10,\r\n 'downloader/response_bytes': 24534,\r\n```\r\n \r\nAnd with changed priority of `DownloaderStats` middleware:\r\n\r\n```\r\n{'downloader/request_bytes': 912, # size reduced as it didn't count size of request headers populated by downloader middlewares\r\n 'downloader/request_count': 10,\r\n 'downloader/request_method_count/GET': 10,\r\n 'downloader/response_bytes': 110191, # it counted size of decompressed data \r\n```\r\n\r\nAverage size of compressed response (by default) - 2453 bytes.\r\nAverage size of decompressed response - 11019 bytes (~4.5 times more).\r\n\r\n## Additional context\r\nPotential solution is to add something like this:\r\n` self.stats.inc_value('decompressed_bytes', spider=spider)`\r\ninto `process_response` method of `HttpCompressionMiddleware`\n", "before_files": [{"content": "import io\nimport zlib\n\nfrom scrapy.utils.gz import gunzip\nfrom scrapy.http import Response, TextResponse\nfrom scrapy.responsetypes import responsetypes\nfrom scrapy.exceptions import NotConfigured\n\n\nACCEPTED_ENCODINGS = [b'gzip', b'deflate']\n\ntry:\n import brotli\n ACCEPTED_ENCODINGS.append(b'br')\nexcept ImportError:\n pass\n\ntry:\n import zstandard\n ACCEPTED_ENCODINGS.append(b'zstd')\nexcept ImportError:\n pass\n\n\nclass HttpCompressionMiddleware:\n \"\"\"This middleware allows compressed (gzip, deflate) traffic to be\n sent/received from web sites\"\"\"\n @classmethod\n def from_crawler(cls, crawler):\n if not crawler.settings.getbool('COMPRESSION_ENABLED'):\n raise NotConfigured\n return cls()\n\n def process_request(self, request, spider):\n request.headers.setdefault('Accept-Encoding',\n b\", \".join(ACCEPTED_ENCODINGS))\n\n def process_response(self, request, response, spider):\n\n if request.method == 'HEAD':\n return response\n if isinstance(response, Response):\n content_encoding = response.headers.getlist('Content-Encoding')\n if content_encoding:\n encoding = content_encoding.pop()\n decoded_body = self._decode(response.body, encoding.lower())\n respcls = responsetypes.from_args(\n headers=response.headers, url=response.url, body=decoded_body\n )\n kwargs = dict(cls=respcls, body=decoded_body)\n if issubclass(respcls, TextResponse):\n # force recalculating the encoding until we make sure the\n # responsetypes guessing is reliable\n kwargs['encoding'] = None\n response = response.replace(**kwargs)\n if not content_encoding:\n del response.headers['Content-Encoding']\n\n return response\n\n def _decode(self, body, encoding):\n if encoding == b'gzip' or encoding == b'x-gzip':\n body = gunzip(body)\n\n if encoding == b'deflate':\n try:\n body = zlib.decompress(body)\n except zlib.error:\n # ugly hack to work with raw deflate content that may\n # be sent by microsoft servers. For more information, see:\n # http://carsten.codimi.de/gzip.yaws/\n # http://www.port80software.com/200ok/archive/2005/10/31/868.aspx\n # http://www.gzip.org/zlib/zlib_faq.html#faq38\n body = zlib.decompress(body, -15)\n if encoding == b'br' and b'br' in ACCEPTED_ENCODINGS:\n body = brotli.decompress(body)\n if encoding == b'zstd' and b'zstd' in ACCEPTED_ENCODINGS:\n # Using its streaming API since its simple API could handle only cases\n # where there is content size data embedded in the frame\n reader = zstandard.ZstdDecompressor().stream_reader(io.BytesIO(body))\n body = reader.read()\n return body\n", "path": "scrapy/downloadermiddlewares/httpcompression.py"}]}
1,922
472
gh_patches_debug_35340
rasdani/github-patches
git_diff
microsoft__playwright-python-86
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update versions in README.md on Playwright roll </issue> <code> [start of build_driver.py] 1 # Copyright (c) Microsoft Corporation. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import gzip 16 import os 17 import shutil 18 import subprocess 19 20 driver_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "driver") 21 package_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "playwright") 22 drivers_path = os.path.join(package_path, "drivers") 23 24 if os.path.exists(os.path.join(driver_path, "package-lock.json")): 25 os.remove(os.path.join(driver_path, "package-lock.json")) 26 if os.path.exists(os.path.join(driver_path, "node_modules")): 27 shutil.rmtree(os.path.join(driver_path, "node_modules")) 28 if os.path.exists(os.path.join(driver_path, "out")): 29 shutil.rmtree(os.path.join(driver_path, "out")) 30 31 subprocess.run("npm i", cwd=driver_path, shell=True) 32 subprocess.run("npm run bake", cwd=driver_path, shell=True) 33 34 for driver in ["driver-linux", "driver-macos", "driver-win.exe"]: 35 if os.path.exists(os.path.join(package_path, driver)): 36 os.remove(os.path.join(package_path, driver)) 37 38 in_path = os.path.join(driver_path, "out", driver) 39 out_path = os.path.join(drivers_path, driver + ".gz") 40 with open(in_path, "rb") as f_in, gzip.open(out_path, "wb") as f_out: 41 shutil.copyfileobj(f_in, f_out) 42 43 shutil.copyfile( 44 os.path.join(driver_path, "node_modules", "playwright", "browsers.json"), 45 os.path.join(drivers_path, "browsers.json"), 46 ) 47 [end of build_driver.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/build_driver.py b/build_driver.py --- a/build_driver.py +++ b/build_driver.py @@ -14,33 +14,52 @@ import gzip import os +import re import shutil import subprocess +from pathlib import Path -driver_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "driver") -package_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "playwright") -drivers_path = os.path.join(package_path, "drivers") +_dirname = Path(os.path.dirname(os.path.abspath(__file__))) -if os.path.exists(os.path.join(driver_path, "package-lock.json")): - os.remove(os.path.join(driver_path, "package-lock.json")) -if os.path.exists(os.path.join(driver_path, "node_modules")): - shutil.rmtree(os.path.join(driver_path, "node_modules")) -if os.path.exists(os.path.join(driver_path, "out")): - shutil.rmtree(os.path.join(driver_path, "out")) +driver_path = _dirname / "driver" +package_path = _dirname / "playwright" +drivers_path = package_path / "drivers" + +if (driver_path / "package-lock.json").exists(): + os.remove(driver_path / "package-lock.json") +if (driver_path / "node_modules").exists(): + shutil.rmtree(driver_path / "node_modules") +if (driver_path / "out").exists(): + shutil.rmtree(driver_path / "out") subprocess.run("npm i", cwd=driver_path, shell=True) subprocess.run("npm run bake", cwd=driver_path, shell=True) for driver in ["driver-linux", "driver-macos", "driver-win.exe"]: - if os.path.exists(os.path.join(package_path, driver)): - os.remove(os.path.join(package_path, driver)) + if (package_path / driver).exists(): + os.remove((package_path / driver)) - in_path = os.path.join(driver_path, "out", driver) - out_path = os.path.join(drivers_path, driver + ".gz") + in_path = driver_path / "out" / driver + out_path = drivers_path / (driver + ".gz") with open(in_path, "rb") as f_in, gzip.open(out_path, "wb") as f_out: shutil.copyfileobj(f_in, f_out) +node_modules_playwright = driver_path / "node_modules" / "playwright" + shutil.copyfile( - os.path.join(driver_path, "node_modules", "playwright", "browsers.json"), - os.path.join(drivers_path, "browsers.json"), + node_modules_playwright / "browsers.json", drivers_path / "browsers.json", ) + +upstream_readme = (node_modules_playwright / "README.md").read_text() +pw_python_readme = (_dirname / "README.md").read_text() + +matches = re.findall(r"<!-- GEN:(.*?) -->(.*?)<!-- GEN:stop -->", upstream_readme) + +for key, value in matches: + pw_python_readme = re.sub( + rf"(<!-- GEN:{key} -->).*?(<!-- GEN:stop -->)", + f"<!-- GEN:{key} -->{value}<!-- GEN:stop -->", + pw_python_readme, + ) + +(_dirname / "README.md").write_text(pw_python_readme)
{"golden_diff": "diff --git a/build_driver.py b/build_driver.py\n--- a/build_driver.py\n+++ b/build_driver.py\n@@ -14,33 +14,52 @@\n \n import gzip\n import os\n+import re\n import shutil\n import subprocess\n+from pathlib import Path\n \n-driver_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"driver\")\n-package_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"playwright\")\n-drivers_path = os.path.join(package_path, \"drivers\")\n+_dirname = Path(os.path.dirname(os.path.abspath(__file__)))\n \n-if os.path.exists(os.path.join(driver_path, \"package-lock.json\")):\n- os.remove(os.path.join(driver_path, \"package-lock.json\"))\n-if os.path.exists(os.path.join(driver_path, \"node_modules\")):\n- shutil.rmtree(os.path.join(driver_path, \"node_modules\"))\n-if os.path.exists(os.path.join(driver_path, \"out\")):\n- shutil.rmtree(os.path.join(driver_path, \"out\"))\n+driver_path = _dirname / \"driver\"\n+package_path = _dirname / \"playwright\"\n+drivers_path = package_path / \"drivers\"\n+\n+if (driver_path / \"package-lock.json\").exists():\n+ os.remove(driver_path / \"package-lock.json\")\n+if (driver_path / \"node_modules\").exists():\n+ shutil.rmtree(driver_path / \"node_modules\")\n+if (driver_path / \"out\").exists():\n+ shutil.rmtree(driver_path / \"out\")\n \n subprocess.run(\"npm i\", cwd=driver_path, shell=True)\n subprocess.run(\"npm run bake\", cwd=driver_path, shell=True)\n \n for driver in [\"driver-linux\", \"driver-macos\", \"driver-win.exe\"]:\n- if os.path.exists(os.path.join(package_path, driver)):\n- os.remove(os.path.join(package_path, driver))\n+ if (package_path / driver).exists():\n+ os.remove((package_path / driver))\n \n- in_path = os.path.join(driver_path, \"out\", driver)\n- out_path = os.path.join(drivers_path, driver + \".gz\")\n+ in_path = driver_path / \"out\" / driver\n+ out_path = drivers_path / (driver + \".gz\")\n with open(in_path, \"rb\") as f_in, gzip.open(out_path, \"wb\") as f_out:\n shutil.copyfileobj(f_in, f_out)\n \n+node_modules_playwright = driver_path / \"node_modules\" / \"playwright\"\n+\n shutil.copyfile(\n- os.path.join(driver_path, \"node_modules\", \"playwright\", \"browsers.json\"),\n- os.path.join(drivers_path, \"browsers.json\"),\n+ node_modules_playwright / \"browsers.json\", drivers_path / \"browsers.json\",\n )\n+\n+upstream_readme = (node_modules_playwright / \"README.md\").read_text()\n+pw_python_readme = (_dirname / \"README.md\").read_text()\n+\n+matches = re.findall(r\"<!-- GEN:(.*?) -->(.*?)<!-- GEN:stop -->\", upstream_readme)\n+\n+for key, value in matches:\n+ pw_python_readme = re.sub(\n+ rf\"(<!-- GEN:{key} -->).*?(<!-- GEN:stop -->)\",\n+ f\"<!-- GEN:{key} -->{value}<!-- GEN:stop -->\",\n+ pw_python_readme,\n+ )\n+\n+(_dirname / \"README.md\").write_text(pw_python_readme)\n", "issue": "Update versions in README.md on Playwright roll\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gzip\nimport os\nimport shutil\nimport subprocess\n\ndriver_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"driver\")\npackage_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"playwright\")\ndrivers_path = os.path.join(package_path, \"drivers\")\n\nif os.path.exists(os.path.join(driver_path, \"package-lock.json\")):\n os.remove(os.path.join(driver_path, \"package-lock.json\"))\nif os.path.exists(os.path.join(driver_path, \"node_modules\")):\n shutil.rmtree(os.path.join(driver_path, \"node_modules\"))\nif os.path.exists(os.path.join(driver_path, \"out\")):\n shutil.rmtree(os.path.join(driver_path, \"out\"))\n\nsubprocess.run(\"npm i\", cwd=driver_path, shell=True)\nsubprocess.run(\"npm run bake\", cwd=driver_path, shell=True)\n\nfor driver in [\"driver-linux\", \"driver-macos\", \"driver-win.exe\"]:\n if os.path.exists(os.path.join(package_path, driver)):\n os.remove(os.path.join(package_path, driver))\n\n in_path = os.path.join(driver_path, \"out\", driver)\n out_path = os.path.join(drivers_path, driver + \".gz\")\n with open(in_path, \"rb\") as f_in, gzip.open(out_path, \"wb\") as f_out:\n shutil.copyfileobj(f_in, f_out)\n\nshutil.copyfile(\n os.path.join(driver_path, \"node_modules\", \"playwright\", \"browsers.json\"),\n os.path.join(drivers_path, \"browsers.json\"),\n)\n", "path": "build_driver.py"}]}
1,083
734
gh_patches_debug_1213
rasdani/github-patches
git_diff
scalableminds__webknossos-libs-312
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Convenience for wkcuber.api To open/create a dataset with the cool new high-level API the following code is required: ```python from wkcuber.api.Dataset import WKDataset from pathlib import Path ds1 = WKDataset.create(Path("path") / "to" / "dataset1", scale=(128,128,128)) ds2 = WKDataset.open(Path("path") / "to" / "dataset2") ``` For one-off scripts, I think that could be a bit more convenient, if we had an API like this ```python from wkcuber import WKDataset ds1 = WKDataset.create("path/to/dataset1", scale=(128, 128, 128)) ds2 = WKDataset.open("path/to/dataset2") ``` Any thoughts? @rschwanhold @jstriebel @philippotto </issue> <code> [start of wkcuber/__init__.py] 1 from .cubing import cubing 2 from .downsampling import downsample_mags 3 from .compress import compress_mag 4 from .metadata import write_webknossos_metadata 5 [end of wkcuber/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wkcuber/__init__.py b/wkcuber/__init__.py --- a/wkcuber/__init__.py +++ b/wkcuber/__init__.py @@ -1,4 +1,6 @@ +from .api.Dataset import WKDataset from .cubing import cubing from .downsampling import downsample_mags from .compress import compress_mag +from .mag import Mag from .metadata import write_webknossos_metadata
{"golden_diff": "diff --git a/wkcuber/__init__.py b/wkcuber/__init__.py\n--- a/wkcuber/__init__.py\n+++ b/wkcuber/__init__.py\n@@ -1,4 +1,6 @@\n+from .api.Dataset import WKDataset\n from .cubing import cubing\n from .downsampling import downsample_mags\n from .compress import compress_mag\n+from .mag import Mag\n from .metadata import write_webknossos_metadata\n", "issue": "Convenience for wkcuber.api\nTo open/create a dataset with the cool new high-level API the following code is required:\r\n\r\n```python\r\nfrom wkcuber.api.Dataset import WKDataset\r\nfrom pathlib import Path\r\n\r\nds1 = WKDataset.create(Path(\"path\") / \"to\" / \"dataset1\", scale=(128,128,128))\r\nds2 = WKDataset.open(Path(\"path\") / \"to\" / \"dataset2\")\r\n\r\n```\r\n\r\nFor one-off scripts, I think that could be a bit more convenient, if we had an API like this\r\n\r\n```python\r\nfrom wkcuber import WKDataset\r\n\r\nds1 = WKDataset.create(\"path/to/dataset1\", scale=(128, 128, 128))\r\nds2 = WKDataset.open(\"path/to/dataset2\")\r\n```\r\n\r\nAny thoughts? @rschwanhold @jstriebel @philippotto \r\n\n", "before_files": [{"content": "from .cubing import cubing\nfrom .downsampling import downsample_mags\nfrom .compress import compress_mag\nfrom .metadata import write_webknossos_metadata\n", "path": "wkcuber/__init__.py"}]}
778
104
gh_patches_debug_29909
rasdani/github-patches
git_diff
nf-core__tools-2031
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bump minimum required Nextflow version ### Description of feature Latest stable release brings lots of new features that we probably want to use at module level (eg. `bin` directories). </issue> <code> [start of nf_core/lint/readme.py] 1 import os 2 import re 3 4 5 def readme(self): 6 """Repository ``README.md`` tests 7 8 The ``README.md`` files for a project are very important and must meet some requirements: 9 10 * Nextflow badge 11 12 * If no Nextflow badge is found, a warning is given 13 * If a badge is found but the version doesn't match the minimum version in the config file, the test fails 14 * Example badge code: 15 16 .. code-block:: md 17 18 [![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A50.27.6-brightgreen.svg)](https://www.nextflow.io/) 19 20 * Bioconda badge 21 22 * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required 23 * Required badge code: 24 25 .. code-block:: md 26 27 [![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/) 28 29 .. note:: These badges are a markdown image ``![alt-text](<image URL>)`` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write. 30 """ 31 passed = [] 32 warned = [] 33 failed = [] 34 35 # Remove field that should be ignored according to the linting config 36 ignore_configs = self.lint_config.get("readme", []) 37 38 with open(os.path.join(self.wf_path, "README.md"), "r") as fh: 39 content = fh.read() 40 41 if "nextflow_badge" not in ignore_configs: 42 # Check that there is a readme badge showing the minimum required version of Nextflow 43 # [![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.10.3-23aa62.svg)](https://www.nextflow.io/) 44 # and that it has the correct version 45 nf_badge_re = r"\[!\[Nextflow\]\(https://img\.shields\.io/badge/nextflow%20DSL2-!?(?:%E2%89%A5|%3E%3D)([\d\.]+)-23aa62\.svg\)\]\(https://www\.nextflow\.io/\)" 46 match = re.search(nf_badge_re, content) 47 if match: 48 nf_badge_version = match.group(1).strip("'\"") 49 try: 50 if nf_badge_version != self.minNextflowVersion: 51 raise AssertionError() 52 except (AssertionError, KeyError): 53 failed.append( 54 f"README Nextflow minimum version badge does not match config. Badge: `{nf_badge_version}`, " 55 f"Config: `{self.minNextflowVersion}`" 56 ) 57 else: 58 passed.append( 59 f"README Nextflow minimum version badge matched config. Badge: `{nf_badge_version}`, " 60 f"Config: `{self.minNextflowVersion}`" 61 ) 62 else: 63 warned.append("README did not have a Nextflow minimum version badge.") 64 65 # Check that the minimum version mentioned in the quick start section is consistent 66 # Looking for: "1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.10.3`)" 67 nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://www.nextflow.io/docs/latest/getstarted.html#installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)" 68 match = re.search(nf_version_re, content) 69 if match: 70 nf_quickstart_version = match.group(1) 71 try: 72 if nf_quickstart_version != self.minNextflowVersion: 73 raise AssertionError() 74 except (AssertionError, KeyError): 75 failed.append( 76 f"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`" 77 ) 78 else: 79 passed.append( 80 f"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`" 81 ) 82 else: 83 warned.append("README did not have a Nextflow minimum version mentioned in Quick Start section.") 84 85 return {"passed": passed, "warned": warned, "failed": failed} 86 [end of nf_core/lint/readme.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nf_core/lint/readme.py b/nf_core/lint/readme.py --- a/nf_core/lint/readme.py +++ b/nf_core/lint/readme.py @@ -40,7 +40,7 @@ if "nextflow_badge" not in ignore_configs: # Check that there is a readme badge showing the minimum required version of Nextflow - # [![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.10.3-23aa62.svg)](https://www.nextflow.io/) + # [![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/) # and that it has the correct version nf_badge_re = r"\[!\[Nextflow\]\(https://img\.shields\.io/badge/nextflow%20DSL2-!?(?:%E2%89%A5|%3E%3D)([\d\.]+)-23aa62\.svg\)\]\(https://www\.nextflow\.io/\)" match = re.search(nf_badge_re, content) @@ -63,7 +63,7 @@ warned.append("README did not have a Nextflow minimum version badge.") # Check that the minimum version mentioned in the quick start section is consistent - # Looking for: "1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.10.3`)" + # Looking for: "1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.10.1`)" nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://www.nextflow.io/docs/latest/getstarted.html#installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)" match = re.search(nf_version_re, content) if match:
{"golden_diff": "diff --git a/nf_core/lint/readme.py b/nf_core/lint/readme.py\n--- a/nf_core/lint/readme.py\n+++ b/nf_core/lint/readme.py\n@@ -40,7 +40,7 @@\n \n if \"nextflow_badge\" not in ignore_configs:\n # Check that there is a readme badge showing the minimum required version of Nextflow\n- # [![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.10.3-23aa62.svg)](https://www.nextflow.io/)\n+ # [![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)\n # and that it has the correct version\n nf_badge_re = r\"\\[!\\[Nextflow\\]\\(https://img\\.shields\\.io/badge/nextflow%20DSL2-!?(?:%E2%89%A5|%3E%3D)([\\d\\.]+)-23aa62\\.svg\\)\\]\\(https://www\\.nextflow\\.io/\\)\"\n match = re.search(nf_badge_re, content)\n@@ -63,7 +63,7 @@\n warned.append(\"README did not have a Nextflow minimum version badge.\")\n \n # Check that the minimum version mentioned in the quick start section is consistent\n- # Looking for: \"1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.10.3`)\"\n+ # Looking for: \"1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.10.1`)\"\n nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://www.nextflow.io/docs/latest/getstarted.html#installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n match = re.search(nf_version_re, content)\n if match:\n", "issue": "Bump minimum required Nextflow version\n### Description of feature\n\nLatest stable release brings lots of new features that we probably want to use at module level (eg. `bin` directories).\n", "before_files": [{"content": "import os\nimport re\n\n\ndef readme(self):\n \"\"\"Repository ``README.md`` tests\n\n The ``README.md`` files for a project are very important and must meet some requirements:\n\n * Nextflow badge\n\n * If no Nextflow badge is found, a warning is given\n * If a badge is found but the version doesn't match the minimum version in the config file, the test fails\n * Example badge code:\n\n .. code-block:: md\n\n [![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A50.27.6-brightgreen.svg)](https://www.nextflow.io/)\n\n * Bioconda badge\n\n * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required\n * Required badge code:\n\n .. code-block:: md\n\n [![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/)\n\n .. note:: These badges are a markdown image ``![alt-text](<image URL>)`` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write.\n \"\"\"\n passed = []\n warned = []\n failed = []\n\n # Remove field that should be ignored according to the linting config\n ignore_configs = self.lint_config.get(\"readme\", [])\n\n with open(os.path.join(self.wf_path, \"README.md\"), \"r\") as fh:\n content = fh.read()\n\n if \"nextflow_badge\" not in ignore_configs:\n # Check that there is a readme badge showing the minimum required version of Nextflow\n # [![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.10.3-23aa62.svg)](https://www.nextflow.io/)\n # and that it has the correct version\n nf_badge_re = r\"\\[!\\[Nextflow\\]\\(https://img\\.shields\\.io/badge/nextflow%20DSL2-!?(?:%E2%89%A5|%3E%3D)([\\d\\.]+)-23aa62\\.svg\\)\\]\\(https://www\\.nextflow\\.io/\\)\"\n match = re.search(nf_badge_re, content)\n if match:\n nf_badge_version = match.group(1).strip(\"'\\\"\")\n try:\n if nf_badge_version != self.minNextflowVersion:\n raise AssertionError()\n except (AssertionError, KeyError):\n failed.append(\n f\"README Nextflow minimum version badge does not match config. Badge: `{nf_badge_version}`, \"\n f\"Config: `{self.minNextflowVersion}`\"\n )\n else:\n passed.append(\n f\"README Nextflow minimum version badge matched config. Badge: `{nf_badge_version}`, \"\n f\"Config: `{self.minNextflowVersion}`\"\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version badge.\")\n\n # Check that the minimum version mentioned in the quick start section is consistent\n # Looking for: \"1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.10.3`)\"\n nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://www.nextflow.io/docs/latest/getstarted.html#installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n match = re.search(nf_version_re, content)\n if match:\n nf_quickstart_version = match.group(1)\n try:\n if nf_quickstart_version != self.minNextflowVersion:\n raise AssertionError()\n except (AssertionError, KeyError):\n failed.append(\n f\"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`\"\n )\n else:\n passed.append(\n f\"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`\"\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version mentioned in Quick Start section.\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed}\n", "path": "nf_core/lint/readme.py"}]}
1,716
491
gh_patches_debug_937
rasdani/github-patches
git_diff
boto__boto-2166
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Invalid path check in euca-bundle-image The -i option uses convert_file in boto/roboto/param.py to verify that the path passed is, indeed, a file. This fails unless the path specified is a boring old file which is not necessary. Indeed it not being necessary is sort of the whole point in unix having a /dev in the first place. Everything is a file. The code calls os.path.isfile(value) in convert_file(). It should call os.path.exists(value) and not os.path.isdir(value). Directories are the only types of files which need to be considered special in the normal course of events. </issue> <code> [start of boto/roboto/param.py] 1 # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ 2 # Copyright (c) 2010, Eucalyptus Systems, Inc. 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining a 5 # copy of this software and associated documentation files (the 6 # "Software"), to deal in the Software without restriction, including 7 # without limitation the rights to use, copy, modify, merge, publish, dis- 8 # tribute, sublicense, and/or sell copies of the Software, and to permit 9 # persons to whom the Software is furnished to do so, subject to the fol- 10 # lowing conditions: 11 # 12 # The above copyright notice and this permission notice shall be included 13 # in all copies or substantial portions of the Software. 14 # 15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 16 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- 17 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 18 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 19 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 # IN THE SOFTWARE. 22 23 import os 24 25 class Converter(object): 26 27 @classmethod 28 def convert_string(cls, param, value): 29 # TODO: could do length validation, etc. here 30 if not isinstance(value, basestring): 31 raise ValueError 32 return value 33 34 @classmethod 35 def convert_integer(cls, param, value): 36 # TODO: could do range checking here 37 return int(value) 38 39 @classmethod 40 def convert_boolean(cls, param, value): 41 """ 42 For command line arguments, just the presence 43 of the option means True so just return True 44 """ 45 return True 46 47 @classmethod 48 def convert_file(cls, param, value): 49 if os.path.isfile(value): 50 return value 51 raise ValueError 52 53 @classmethod 54 def convert_dir(cls, param, value): 55 if os.path.isdir(value): 56 return value 57 raise ValueError 58 59 @classmethod 60 def convert(cls, param, value): 61 try: 62 if hasattr(cls, 'convert_'+param.ptype): 63 mthd = getattr(cls, 'convert_'+param.ptype) 64 else: 65 mthd = cls.convert_string 66 return mthd(param, value) 67 except: 68 raise ValidationException(param, '') 69 70 class Param(Converter): 71 72 def __init__(self, name=None, ptype='string', optional=True, 73 short_name=None, long_name=None, doc='', 74 metavar=None, cardinality=1, default=None, 75 choices=None, encoder=None, request_param=True): 76 self.name = name 77 self.ptype = ptype 78 self.optional = optional 79 self.short_name = short_name 80 self.long_name = long_name 81 self.doc = doc 82 self.metavar = metavar 83 self.cardinality = cardinality 84 self.default = default 85 self.choices = choices 86 self.encoder = encoder 87 self.request_param = request_param 88 89 @property 90 def optparse_long_name(self): 91 ln = None 92 if self.long_name: 93 ln = '--%s' % self.long_name 94 return ln 95 96 @property 97 def synopsis_long_name(self): 98 ln = None 99 if self.long_name: 100 ln = '--%s' % self.long_name 101 return ln 102 103 @property 104 def getopt_long_name(self): 105 ln = None 106 if self.long_name: 107 ln = '%s' % self.long_name 108 if self.ptype != 'boolean': 109 ln += '=' 110 return ln 111 112 @property 113 def optparse_short_name(self): 114 sn = None 115 if self.short_name: 116 sn = '-%s' % self.short_name 117 return sn 118 119 @property 120 def synopsis_short_name(self): 121 sn = None 122 if self.short_name: 123 sn = '-%s' % self.short_name 124 return sn 125 126 @property 127 def getopt_short_name(self): 128 sn = None 129 if self.short_name: 130 sn = '%s' % self.short_name 131 if self.ptype != 'boolean': 132 sn += ':' 133 return sn 134 135 def convert(self, value): 136 """ 137 Convert a string value as received in the command line 138 tools and convert to the appropriate type of value. 139 Raise a ValidationError if the value can't be converted. 140 141 :type value: str 142 :param value: The value to convert. This should always 143 be a string. 144 """ 145 return super(Param, self).convert(self,value) 146 147 148 [end of boto/roboto/param.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/boto/roboto/param.py b/boto/roboto/param.py --- a/boto/roboto/param.py +++ b/boto/roboto/param.py @@ -46,7 +46,7 @@ @classmethod def convert_file(cls, param, value): - if os.path.isfile(value): + if os.path.exists(value) and not os.path.isdir(value): return value raise ValueError
{"golden_diff": "diff --git a/boto/roboto/param.py b/boto/roboto/param.py\n--- a/boto/roboto/param.py\n+++ b/boto/roboto/param.py\n@@ -46,7 +46,7 @@\n \n @classmethod\n def convert_file(cls, param, value):\n- if os.path.isfile(value):\n+ if os.path.exists(value) and not os.path.isdir(value):\n return value\n raise ValueError\n", "issue": "Invalid path check in euca-bundle-image\nThe -i option uses convert_file in boto/roboto/param.py to verify that the path passed is, indeed, a file. This fails unless the path specified is a boring old file which is not necessary. Indeed it not being necessary is sort of the whole point in unix having a /dev in the first place. Everything is a file.\n\nThe code calls os.path.isfile(value) in convert_file(). It should call os.path.exists(value) and not os.path.isdir(value). Directories are the only types of files which need to be considered special in the normal course of events.\n\n", "before_files": [{"content": "# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nimport os\n\nclass Converter(object):\n\n @classmethod\n def convert_string(cls, param, value):\n # TODO: could do length validation, etc. here\n if not isinstance(value, basestring):\n raise ValueError\n return value\n\n @classmethod\n def convert_integer(cls, param, value):\n # TODO: could do range checking here\n return int(value)\n\n @classmethod\n def convert_boolean(cls, param, value):\n \"\"\"\n For command line arguments, just the presence\n of the option means True so just return True\n \"\"\"\n return True\n\n @classmethod\n def convert_file(cls, param, value):\n if os.path.isfile(value):\n return value\n raise ValueError\n\n @classmethod\n def convert_dir(cls, param, value):\n if os.path.isdir(value):\n return value\n raise ValueError\n\n @classmethod\n def convert(cls, param, value):\n try:\n if hasattr(cls, 'convert_'+param.ptype):\n mthd = getattr(cls, 'convert_'+param.ptype)\n else:\n mthd = cls.convert_string\n return mthd(param, value)\n except:\n raise ValidationException(param, '')\n\nclass Param(Converter):\n\n def __init__(self, name=None, ptype='string', optional=True,\n short_name=None, long_name=None, doc='',\n metavar=None, cardinality=1, default=None,\n choices=None, encoder=None, request_param=True):\n self.name = name\n self.ptype = ptype\n self.optional = optional\n self.short_name = short_name\n self.long_name = long_name\n self.doc = doc\n self.metavar = metavar\n self.cardinality = cardinality\n self.default = default\n self.choices = choices\n self.encoder = encoder\n self.request_param = request_param\n\n @property\n def optparse_long_name(self):\n ln = None\n if self.long_name:\n ln = '--%s' % self.long_name\n return ln\n\n @property\n def synopsis_long_name(self):\n ln = None\n if self.long_name:\n ln = '--%s' % self.long_name\n return ln\n\n @property\n def getopt_long_name(self):\n ln = None\n if self.long_name:\n ln = '%s' % self.long_name\n if self.ptype != 'boolean':\n ln += '='\n return ln\n\n @property\n def optparse_short_name(self):\n sn = None\n if self.short_name:\n sn = '-%s' % self.short_name\n return sn\n\n @property\n def synopsis_short_name(self):\n sn = None\n if self.short_name:\n sn = '-%s' % self.short_name\n return sn\n\n @property\n def getopt_short_name(self):\n sn = None\n if self.short_name:\n sn = '%s' % self.short_name\n if self.ptype != 'boolean':\n sn += ':'\n return sn\n\n def convert(self, value):\n \"\"\"\n Convert a string value as received in the command line\n tools and convert to the appropriate type of value.\n Raise a ValidationError if the value can't be converted.\n\n :type value: str\n :param value: The value to convert. This should always\n be a string.\n \"\"\"\n return super(Param, self).convert(self,value)\n\n\n", "path": "boto/roboto/param.py"}]}
2,028
103
gh_patches_debug_526
rasdani/github-patches
git_diff
Parsl__parsl-2302
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove parsl container bits This issue is to remind us to remove Parsl container support and update the docs as soon as the funcX executor is integrated-- we should switch to recommending container support through it. </issue> <code> [start of docker/app1/app1.py] 1 2 def predict(list_items): 3 """Returns the double of the items""" 4 return [i*2 for i in list_items] 5 [end of docker/app1/app1.py] [start of docker/app2/app2.py] 1 2 def predict(list_items): 3 """Returns items+10""" 4 return [i+10 for i in list_items] 5 [end of docker/app2/app2.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docker/app1/app1.py b/docker/app1/app1.py deleted file mode 100644 --- a/docker/app1/app1.py +++ /dev/null @@ -1,4 +0,0 @@ - -def predict(list_items): - """Returns the double of the items""" - return [i*2 for i in list_items] diff --git a/docker/app2/app2.py b/docker/app2/app2.py deleted file mode 100644 --- a/docker/app2/app2.py +++ /dev/null @@ -1,4 +0,0 @@ - -def predict(list_items): - """Returns items+10""" - return [i+10 for i in list_items]
{"golden_diff": "diff --git a/docker/app1/app1.py b/docker/app1/app1.py\ndeleted file mode 100644\n--- a/docker/app1/app1.py\n+++ /dev/null\n@@ -1,4 +0,0 @@\n-\n-def predict(list_items):\n- \"\"\"Returns the double of the items\"\"\"\n- return [i*2 for i in list_items]\ndiff --git a/docker/app2/app2.py b/docker/app2/app2.py\ndeleted file mode 100644\n--- a/docker/app2/app2.py\n+++ /dev/null\n@@ -1,4 +0,0 @@\n-\n-def predict(list_items):\n- \"\"\"Returns items+10\"\"\"\n- return [i+10 for i in list_items]\n", "issue": "Remove parsl container bits\nThis issue is to remind us to remove Parsl container support and update the docs as soon as the funcX executor is integrated-- we should switch to recommending container support through it.\n", "before_files": [{"content": "\ndef predict(list_items):\n \"\"\"Returns the double of the items\"\"\"\n return [i*2 for i in list_items]\n", "path": "docker/app1/app1.py"}, {"content": "\ndef predict(list_items):\n \"\"\"Returns items+10\"\"\"\n return [i+10 for i in list_items]\n", "path": "docker/app2/app2.py"}]}
663
165
gh_patches_debug_28035
rasdani/github-patches
git_diff
scikit-image__scikit-image-6293
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Build image pyramids not always working with other images ## Description Using the *[Build image pyramids](https://scikit-image.org/docs/dev/auto_examples/transform/plot_pyramid.html)* example with a random image is not always working. ## Way to reproduce ### hand.jpg ![hand](https://user-images.githubusercontent.com/28227183/69906161-5e4ce380-13bf-11ea-9d4a-b51c54f11581.jpg) ```python import numpy as np import matplotlib.pyplot as plt from skimage import data from skimage.transform import pyramid_gaussian import imageio as io image = io.imread('hand.jpg') # data.astronaut() rows, cols, dim = image.shape pyramid = tuple(pyramid_gaussian(image, downscale=2, multichannel=True)) composite_image = np.zeros((rows, cols + cols // 2, 3), dtype=np.double) composite_image[:rows, :cols, :] = pyramid[0] i_row = 0 for p in pyramid[1:]: n_rows, n_cols = p.shape[:2] composite_image[i_row:i_row + n_rows, cols:cols + n_cols] = p i_row += n_rows fig, ax = plt.subplots() ax.imshow(composite_image) plt.show() ``` ## Version information ```python 3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 20:34:20) [MSC v.1916 64 bit (AMD64)] Windows-10-10.0.18362-SP0 scikit-image version: 0.16.1 numpy version: 1.17.2 ``` ```python Traceback (most recent call last): File "D:\Vincent\Bureau\Patern recongnition and image analysis\Patern recognition and patern analysis\LAB_1\plot_pyramid.py", line 44, in <module> composite_image[i_row:i_row + n_rows, cols:cols + n_cols] = p ValueError: could not broadcast input array from shape (2,2,3) into shape (1,2,3) ``` ## Possible solution I was able to make it works for the same RGB image but this code is not adapted for BW and RGBA. ```python import numpy as np import matplotlib.pyplot as plt from skimage import data from skimage.transform import pyramid_gaussian import imageio as io image = io.imread('hand.jpg') # data.astronaut() rows, cols, dim = image.shape pyramid = tuple(pyramid_gaussian(image, downscale=2, multichannel=True)) composite_image = np.zeros((rows, cols + cols // 2, dim), dtype=np.double) composite_image[:rows, :cols, :] = pyramid[0] i_row = 0 for p in pyramid[1:]: n_rows, n_cols = p.shape[:2] # Check the dimension before assignement if(composite_image[i_row:i_row + n_rows, cols:cols + n_cols].shape==p.shape): composite_image[i_row:i_row + n_rows, cols:cols + n_cols] = p i_row += n_rows else: break fig, ax = plt.subplots() ax.imshow(composite_image) plt.show() ``` ### Result ![Figure_1](https://user-images.githubusercontent.com/28227183/69906188-d915fe80-13bf-11ea-8a07-7f9d1d354897.png) </issue> <code> [start of doc/examples/transform/plot_pyramid.py] 1 """ 2 ==================== 3 Build image pyramids 4 ==================== 5 6 The ``pyramid_gaussian`` function takes an image and yields successive images 7 shrunk by a constant scale factor. Image pyramids are often used, e.g., to 8 implement algorithms for denoising, texture discrimination, and scale-invariant 9 detection. 10 11 """ 12 import numpy as np 13 import matplotlib.pyplot as plt 14 15 from skimage import data 16 from skimage.transform import pyramid_gaussian 17 18 19 image = data.astronaut() 20 rows, cols, dim = image.shape 21 pyramid = tuple(pyramid_gaussian(image, downscale=2, channel_axis=-1)) 22 23 composite_image = np.zeros((rows, cols + cols // 2, 3), dtype=np.double) 24 25 composite_image[:rows, :cols, :] = pyramid[0] 26 27 i_row = 0 28 for p in pyramid[1:]: 29 n_rows, n_cols = p.shape[:2] 30 composite_image[i_row:i_row + n_rows, cols:cols + n_cols] = p 31 i_row += n_rows 32 33 fig, ax = plt.subplots() 34 ax.imshow(composite_image) 35 plt.show() 36 [end of doc/examples/transform/plot_pyramid.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/doc/examples/transform/plot_pyramid.py b/doc/examples/transform/plot_pyramid.py --- a/doc/examples/transform/plot_pyramid.py +++ b/doc/examples/transform/plot_pyramid.py @@ -9,6 +9,8 @@ detection. """ +import math + import numpy as np import matplotlib.pyplot as plt @@ -20,10 +22,31 @@ rows, cols, dim = image.shape pyramid = tuple(pyramid_gaussian(image, downscale=2, channel_axis=-1)) -composite_image = np.zeros((rows, cols + cols // 2, 3), dtype=np.double) - +##################################################################### +# Generate a composite image for visualization +# ============================================ +# +# For visualization, we generate a composite image with the same number of rows +# as the source image but with ``cols + pyramid[1].shape[1]`` columns. We then +# have space to stack all of the dowsampled images to the right of the +# original. +# +# Note: The sum of the number of rows in all dowsampled images in the pyramid +# may sometimes exceed the original image size in cases when image.shape[0] is +# not a power of two. We expand the number of rows in the composite slightly as +# necessary to account for this. Expansion beyond the number of rows in the +# original will also be necessary to cover cases where downscale < 2. + +# determine the total number of rows and columns for the composite +composite_rows = max(rows, sum(p.shape[0] for p in pyramid[1:])) +composite_cols = cols + pyramid[1].shape[1] +composite_image = np.zeros((composite_rows, composite_cols, 3), + dtype=np.double) + +# store the original to the left composite_image[:rows, :cols, :] = pyramid[0] +# stack all downsampled images in a column to the right of the original i_row = 0 for p in pyramid[1:]: n_rows, n_cols = p.shape[:2]
{"golden_diff": "diff --git a/doc/examples/transform/plot_pyramid.py b/doc/examples/transform/plot_pyramid.py\n--- a/doc/examples/transform/plot_pyramid.py\n+++ b/doc/examples/transform/plot_pyramid.py\n@@ -9,6 +9,8 @@\n detection.\n \n \"\"\"\n+import math\n+\n import numpy as np\n import matplotlib.pyplot as plt\n \n@@ -20,10 +22,31 @@\n rows, cols, dim = image.shape\n pyramid = tuple(pyramid_gaussian(image, downscale=2, channel_axis=-1))\n \n-composite_image = np.zeros((rows, cols + cols // 2, 3), dtype=np.double)\n-\n+#####################################################################\n+# Generate a composite image for visualization\n+# ============================================\n+#\n+# For visualization, we generate a composite image with the same number of rows\n+# as the source image but with ``cols + pyramid[1].shape[1]`` columns. We then\n+# have space to stack all of the dowsampled images to the right of the\n+# original.\n+#\n+# Note: The sum of the number of rows in all dowsampled images in the pyramid\n+# may sometimes exceed the original image size in cases when image.shape[0] is\n+# not a power of two. We expand the number of rows in the composite slightly as\n+# necessary to account for this. Expansion beyond the number of rows in the\n+# original will also be necessary to cover cases where downscale < 2.\n+\n+# determine the total number of rows and columns for the composite\n+composite_rows = max(rows, sum(p.shape[0] for p in pyramid[1:]))\n+composite_cols = cols + pyramid[1].shape[1]\n+composite_image = np.zeros((composite_rows, composite_cols, 3),\n+ dtype=np.double)\n+\n+# store the original to the left\n composite_image[:rows, :cols, :] = pyramid[0]\n \n+# stack all downsampled images in a column to the right of the original\n i_row = 0\n for p in pyramid[1:]:\n n_rows, n_cols = p.shape[:2]\n", "issue": "Build image pyramids not always working with other images\n## Description\r\nUsing the *[Build image pyramids](https://scikit-image.org/docs/dev/auto_examples/transform/plot_pyramid.html)* example with a random image is not always working.\r\n\r\n## Way to reproduce\r\n### hand.jpg\r\n![hand](https://user-images.githubusercontent.com/28227183/69906161-5e4ce380-13bf-11ea-9d4a-b51c54f11581.jpg)\r\n```python\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom skimage import data\r\nfrom skimage.transform import pyramid_gaussian\r\n\r\nimport imageio as io\r\n\r\nimage = io.imread('hand.jpg') # data.astronaut()\r\nrows, cols, dim = image.shape\r\npyramid = tuple(pyramid_gaussian(image, downscale=2, multichannel=True))\r\n\r\ncomposite_image = np.zeros((rows, cols + cols // 2, 3), dtype=np.double)\r\n\r\ncomposite_image[:rows, :cols, :] = pyramid[0]\r\n\r\ni_row = 0\r\nfor p in pyramid[1:]:\r\n n_rows, n_cols = p.shape[:2]\r\n composite_image[i_row:i_row + n_rows, cols:cols + n_cols] = p\r\n i_row += n_rows\r\n\r\nfig, ax = plt.subplots()\r\nax.imshow(composite_image)\r\nplt.show()\r\n```\r\n\r\n\r\n## Version information\r\n```python\r\n3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 20:34:20) [MSC v.1916 64 bit (AMD64)]\r\nWindows-10-10.0.18362-SP0\r\nscikit-image version: 0.16.1\r\nnumpy version: 1.17.2\r\n```\r\n\r\n```python\r\nTraceback (most recent call last):\r\n File \"D:\\Vincent\\Bureau\\Patern recongnition and image analysis\\Patern recognition and patern analysis\\LAB_1\\plot_pyramid.py\", line 44, in <module>\r\n composite_image[i_row:i_row + n_rows, cols:cols + n_cols] = p\r\nValueError: could not broadcast input array from shape (2,2,3) into shape (1,2,3)\r\n```\r\n## Possible solution\r\nI was able to make it works for the same RGB image but this code is not adapted for BW and RGBA.\r\n\r\n```python\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom skimage import data\r\nfrom skimage.transform import pyramid_gaussian\r\nimport imageio as io\r\n\r\nimage = io.imread('hand.jpg') # data.astronaut()\r\n\r\nrows, cols, dim = image.shape\r\npyramid = tuple(pyramid_gaussian(image, downscale=2, multichannel=True))\r\n\r\ncomposite_image = np.zeros((rows, cols + cols // 2, dim), dtype=np.double)\r\n\r\ncomposite_image[:rows, :cols, :] = pyramid[0]\r\n\r\ni_row = 0\r\nfor p in pyramid[1:]:\r\n n_rows, n_cols = p.shape[:2]\r\n # Check the dimension before assignement\r\n if(composite_image[i_row:i_row + n_rows, cols:cols + n_cols].shape==p.shape):\r\n composite_image[i_row:i_row + n_rows, cols:cols + n_cols] = p\r\n i_row += n_rows\r\n else:\r\n break\r\n \r\nfig, ax = plt.subplots()\r\nax.imshow(composite_image)\r\nplt.show()\r\n```\r\n### Result\r\n![Figure_1](https://user-images.githubusercontent.com/28227183/69906188-d915fe80-13bf-11ea-8a07-7f9d1d354897.png)\r\n\r\n\n", "before_files": [{"content": "\"\"\"\n====================\nBuild image pyramids\n====================\n\nThe ``pyramid_gaussian`` function takes an image and yields successive images\nshrunk by a constant scale factor. Image pyramids are often used, e.g., to\nimplement algorithms for denoising, texture discrimination, and scale-invariant\ndetection.\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage.transform import pyramid_gaussian\n\n\nimage = data.astronaut()\nrows, cols, dim = image.shape\npyramid = tuple(pyramid_gaussian(image, downscale=2, channel_axis=-1))\n\ncomposite_image = np.zeros((rows, cols + cols // 2, 3), dtype=np.double)\n\ncomposite_image[:rows, :cols, :] = pyramid[0]\n\ni_row = 0\nfor p in pyramid[1:]:\n n_rows, n_cols = p.shape[:2]\n composite_image[i_row:i_row + n_rows, cols:cols + n_cols] = p\n i_row += n_rows\n\nfig, ax = plt.subplots()\nax.imshow(composite_image)\nplt.show()\n", "path": "doc/examples/transform/plot_pyramid.py"}]}
1,685
451
gh_patches_debug_25326
rasdani/github-patches
git_diff
mlflow__mlflow-12224
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] uc_volume_dataset_source only validates file paths, not folder paths ### Issues Policy acknowledgement - [X] I have read and agree to submit bug reports in accordance with the [issues policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md) ### Where did you encounter this bug? Local machine ### Willingness to contribute Yes. I would be willing to contribute a fix for this bug with guidance from the MLflow community. ### MLflow version mlflow-2.12.2 ### System information - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: - **Python version**: - **yarn version, if running the dev UI**: ### Describe the problem https://github.com/mlflow/mlflow/blob/72df4a2a0f44c52179dfbdc7d47ad10f58ceec39/mlflow/data/uc_volume_dataset_source.py#L28 doesn't verify folder paths, only file paths ### Tracking information <!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW --> ```shell REPLACE_ME ``` ### Code to reproduce issue <!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW --> ``` REPLACE_ME ``` ### Stack trace <!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW --> ``` REPLACE_ME ``` ### Other info / logs <!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW --> ``` REPLACE_ME ``` ### What component(s) does this bug affect? - [ ] `area/artifacts`: Artifact stores and artifact logging - [ ] `area/build`: Build and test infrastructure for MLflow - [ ] `area/deployments`: MLflow Deployments client APIs, server, and third-party Deployments integrations - [ ] `area/docs`: MLflow documentation pages - [ ] `area/examples`: Example code - [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry - [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors - [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates - [ ] `area/projects`: MLproject format, project running backends - [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs - [ ] `area/server-infra`: MLflow Tracking server backend - [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging ### What interface(s) does this bug affect? - [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server - [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models - [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry - [ ] `area/windows`: Windows support ### What language(s) does this bug affect? - [ ] `language/r`: R APIs and clients - [ ] `language/java`: Java APIs and clients - [ ] `language/new`: Proposals for new client languages ### What integration(s) does this bug affect? - [ ] `integrations/azure`: Azure and Azure ML integrations - [ ] `integrations/sagemaker`: SageMaker integrations - [ ] `integrations/databricks`: Databricks integrations </issue> <code> [start of mlflow/data/uc_volume_dataset_source.py] 1 import logging 2 from typing import Any, Dict 3 4 from mlflow.data.dataset_source import DatasetSource 5 from mlflow.exceptions import MlflowException 6 7 _logger = logging.getLogger(__name__) 8 9 10 class UCVolumeDatasetSource(DatasetSource): 11 """Represents the source of a dataset stored in Databricks Unified Catalog Volume. 12 13 If you are using a delta table, please use `mlflow.data.delta_dataset_source.DeltaDatasetSource` 14 instead. This `UCVolumeDatasetSource` does not provide loading function, and is mostly useful 15 when you are logging a `mlflow.data.meta_dataset.MetaDataset` to MLflow, i.e., you want 16 to log the source of dataset to MLflow without loading the dataset. 17 18 Args: 19 path: the UC path of your data. It should be a valid UC path following the pattern 20 "/Volumes/{catalog}/{schema}/{volume}/{file_path}". For example, 21 "/Volumes/MyCatalog/MySchema/MyVolume/MyFile.json". 22 """ 23 24 def __init__(self, path: str): 25 self._verify_uc_path_is_valid(path) 26 self.path = path 27 28 def _verify_uc_path_is_valid(self, path): 29 """Verify if the path exists in Databricks Unified Catalog.""" 30 try: 31 from databricks.sdk import WorkspaceClient 32 33 w = WorkspaceClient() 34 except ImportError: 35 _logger.warning( 36 "Cannot verify the path of `UCVolumeDatasetSource` because of missing" 37 "`databricks-sdk`. Please install `databricks-sdk` via " 38 "`pip install -U databricks-sdk`. This does not block creating " 39 "`UCVolumeDatasetSource`, but your `UCVolumeDatasetSource` might be invalid." 40 ) 41 return 42 except Exception: 43 _logger.warning( 44 "Cannot verify the path of `UCVolumeDatasetSource` due to a connection failure " 45 "with Databricks workspace. Please run `mlflow.login()` to log in to Databricks. " 46 "This does not block creating `UCVolumeDatasetSource`, but your " 47 "`UCVolumeDatasetSource` might be invalid." 48 ) 49 return 50 51 try: 52 w.files.get_metadata(path) 53 except Exception: 54 raise MlflowException(f"{path} does not exist in Databricks Unified Catalog.") 55 56 @staticmethod 57 def _get_source_type() -> str: 58 return "uc_volume" 59 60 @staticmethod 61 def _can_resolve(raw_source: Any): 62 raise NotImplementedError 63 64 @classmethod 65 def _resolve(cls, raw_source: str): 66 raise NotImplementedError 67 68 def to_dict(self) -> Dict[Any, Any]: 69 return {"path": self.path} 70 71 @classmethod 72 def from_dict(cls, source_dict: Dict[Any, Any]) -> "UCVolumeDatasetSource": 73 return cls(**source_dict) 74 [end of mlflow/data/uc_volume_dataset_source.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mlflow/data/uc_volume_dataset_source.py b/mlflow/data/uc_volume_dataset_source.py --- a/mlflow/data/uc_volume_dataset_source.py +++ b/mlflow/data/uc_volume_dataset_source.py @@ -22,10 +22,10 @@ """ def __init__(self, path: str): - self._verify_uc_path_is_valid(path) self.path = path + self._verify_uc_path_is_valid() - def _verify_uc_path_is_valid(self, path): + def _verify_uc_path_is_valid(self): """Verify if the path exists in Databricks Unified Catalog.""" try: from databricks.sdk import WorkspaceClient @@ -49,9 +49,17 @@ return try: - w.files.get_metadata(path) + # Check if `self.path` points to a valid UC file. + w.files.get_metadata(self.path) except Exception: - raise MlflowException(f"{path} does not exist in Databricks Unified Catalog.") + try: + # Check if `self.path` points to a valid UC directory. + w.files.get_directory_metadata(self.path) + # Append a slash to `self.path` to indicate it's a directory. + self.path += "/" if not self.path.endswith("/") else "" + except Exception: + # Neither file nor directory exists, we throw an exception. + raise MlflowException(f"{self.path} does not exist in Databricks Unified Catalog.") @staticmethod def _get_source_type() -> str:
{"golden_diff": "diff --git a/mlflow/data/uc_volume_dataset_source.py b/mlflow/data/uc_volume_dataset_source.py\n--- a/mlflow/data/uc_volume_dataset_source.py\n+++ b/mlflow/data/uc_volume_dataset_source.py\n@@ -22,10 +22,10 @@\n \"\"\"\n \n def __init__(self, path: str):\n- self._verify_uc_path_is_valid(path)\n self.path = path\n+ self._verify_uc_path_is_valid()\n \n- def _verify_uc_path_is_valid(self, path):\n+ def _verify_uc_path_is_valid(self):\n \"\"\"Verify if the path exists in Databricks Unified Catalog.\"\"\"\n try:\n from databricks.sdk import WorkspaceClient\n@@ -49,9 +49,17 @@\n return\n \n try:\n- w.files.get_metadata(path)\n+ # Check if `self.path` points to a valid UC file.\n+ w.files.get_metadata(self.path)\n except Exception:\n- raise MlflowException(f\"{path} does not exist in Databricks Unified Catalog.\")\n+ try:\n+ # Check if `self.path` points to a valid UC directory.\n+ w.files.get_directory_metadata(self.path)\n+ # Append a slash to `self.path` to indicate it's a directory.\n+ self.path += \"/\" if not self.path.endswith(\"/\") else \"\"\n+ except Exception:\n+ # Neither file nor directory exists, we throw an exception.\n+ raise MlflowException(f\"{self.path} does not exist in Databricks Unified Catalog.\")\n \n @staticmethod\n def _get_source_type() -> str:\n", "issue": "[BUG] uc_volume_dataset_source only validates file paths, not folder paths\n### Issues Policy acknowledgement\n\n- [X] I have read and agree to submit bug reports in accordance with the [issues policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md)\n\n### Where did you encounter this bug?\n\nLocal machine\n\n### Willingness to contribute\n\nYes. I would be willing to contribute a fix for this bug with guidance from the MLflow community.\n\n### MLflow version\n\nmlflow-2.12.2\n\n### System information\n\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:\r\n- **Python version**:\r\n- **yarn version, if running the dev UI**:\r\n\n\n### Describe the problem\n\nhttps://github.com/mlflow/mlflow/blob/72df4a2a0f44c52179dfbdc7d47ad10f58ceec39/mlflow/data/uc_volume_dataset_source.py#L28 doesn't verify folder paths, only file paths\n\n### Tracking information\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```shell\r\nREPLACE_ME\r\n```\r\n\n\n### Code to reproduce issue\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```\r\nREPLACE_ME\r\n```\r\n\n\n### Stack trace\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```\r\nREPLACE_ME\r\n```\r\n\n\n### Other info / logs\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```\r\nREPLACE_ME\r\n```\r\n\n\n### What component(s) does this bug affect?\n\n- [ ] `area/artifacts`: Artifact stores and artifact logging\n- [ ] `area/build`: Build and test infrastructure for MLflow\n- [ ] `area/deployments`: MLflow Deployments client APIs, server, and third-party Deployments integrations\n- [ ] `area/docs`: MLflow documentation pages\n- [ ] `area/examples`: Example code\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\n- [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates\n- [ ] `area/projects`: MLproject format, project running backends\n- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs\n- [ ] `area/server-infra`: MLflow Tracking server backend\n- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging\n\n### What interface(s) does this bug affect?\n\n- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\n- [ ] `area/windows`: Windows support\n\n### What language(s) does this bug affect?\n\n- [ ] `language/r`: R APIs and clients\n- [ ] `language/java`: Java APIs and clients\n- [ ] `language/new`: Proposals for new client languages\n\n### What integration(s) does this bug affect?\n\n- [ ] `integrations/azure`: Azure and Azure ML integrations\n- [ ] `integrations/sagemaker`: SageMaker integrations\n- [ ] `integrations/databricks`: Databricks integrations\n", "before_files": [{"content": "import logging\nfrom typing import Any, Dict\n\nfrom mlflow.data.dataset_source import DatasetSource\nfrom mlflow.exceptions import MlflowException\n\n_logger = logging.getLogger(__name__)\n\n\nclass UCVolumeDatasetSource(DatasetSource):\n \"\"\"Represents the source of a dataset stored in Databricks Unified Catalog Volume.\n\n If you are using a delta table, please use `mlflow.data.delta_dataset_source.DeltaDatasetSource`\n instead. This `UCVolumeDatasetSource` does not provide loading function, and is mostly useful\n when you are logging a `mlflow.data.meta_dataset.MetaDataset` to MLflow, i.e., you want\n to log the source of dataset to MLflow without loading the dataset.\n\n Args:\n path: the UC path of your data. It should be a valid UC path following the pattern\n \"/Volumes/{catalog}/{schema}/{volume}/{file_path}\". For example,\n \"/Volumes/MyCatalog/MySchema/MyVolume/MyFile.json\".\n \"\"\"\n\n def __init__(self, path: str):\n self._verify_uc_path_is_valid(path)\n self.path = path\n\n def _verify_uc_path_is_valid(self, path):\n \"\"\"Verify if the path exists in Databricks Unified Catalog.\"\"\"\n try:\n from databricks.sdk import WorkspaceClient\n\n w = WorkspaceClient()\n except ImportError:\n _logger.warning(\n \"Cannot verify the path of `UCVolumeDatasetSource` because of missing\"\n \"`databricks-sdk`. Please install `databricks-sdk` via \"\n \"`pip install -U databricks-sdk`. This does not block creating \"\n \"`UCVolumeDatasetSource`, but your `UCVolumeDatasetSource` might be invalid.\"\n )\n return\n except Exception:\n _logger.warning(\n \"Cannot verify the path of `UCVolumeDatasetSource` due to a connection failure \"\n \"with Databricks workspace. Please run `mlflow.login()` to log in to Databricks. \"\n \"This does not block creating `UCVolumeDatasetSource`, but your \"\n \"`UCVolumeDatasetSource` might be invalid.\"\n )\n return\n\n try:\n w.files.get_metadata(path)\n except Exception:\n raise MlflowException(f\"{path} does not exist in Databricks Unified Catalog.\")\n\n @staticmethod\n def _get_source_type() -> str:\n return \"uc_volume\"\n\n @staticmethod\n def _can_resolve(raw_source: Any):\n raise NotImplementedError\n\n @classmethod\n def _resolve(cls, raw_source: str):\n raise NotImplementedError\n\n def to_dict(self) -> Dict[Any, Any]:\n return {\"path\": self.path}\n\n @classmethod\n def from_dict(cls, source_dict: Dict[Any, Any]) -> \"UCVolumeDatasetSource\":\n return cls(**source_dict)\n", "path": "mlflow/data/uc_volume_dataset_source.py"}]}
2,033
350
gh_patches_debug_24966
rasdani/github-patches
git_diff
chainer__chainer-2721
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> resuming issue of LinearShift Same issue as #2680 ``` import chainer from chainer import iterators from chainer import optimizers from chainer import training from chainer.training import extensions from chainer import serializers class DummyModel(chainer.Chain): def __call__(self, x): return x def setup_trainer(iteration): model = DummyModel() optimizer = optimizers.SGD() optimizer.setup(model) iterator = iterators.SerialIterator([1, 2, 3], 1) updater = training.StandardUpdater(iterator, optimizer) trainer = training.Trainer(updater, (iteration, 'iteration'), out='.') trainer.extend(extensions.LogReport(trigger=(1, 'iteration'))) trainer.extend(extensions.observe_lr(), trigger=(1, 'iteration')) trainer.extend( extensions.PrintReport(['iteration', 'lr']), trigger=(1, 'iteration')) trainer.extend( extensions.LinearShift('lr', (2, 1), (5, 15)), trigger=(1, 'iteration')) return trainer trainer = setup_trainer(10) trainer.run() serializers.save_npz('tmp', trainer) # iteration lr # 1 2 # 2 2 # 3 2 # 4 2 # 5 2 # 6 2 # 7 1.9 # 8 1.8 # 9 1.7 # 10 1.6 resumed_trainer = setup_trainer(20) serializers.load_npz('tmp', resumed_trainer) resumed_trainer.run() # iteration lr # 1 2 # 2 2 # 3 2 # 4 2 # 5 2 # 6 2 # 7 1.9 # 8 1.8 # 9 1.7 # 10 1.6 # 11 1.4 (lr = 1.5 is skipped) # 12 1.3 # 13 1.2 # 14 1.1 # 15 1 # 16 1 # 17 1 # 18 1 # 19 1 # 20 1 ``` </issue> <code> [start of chainer/training/extensions/linear_shift.py] 1 from __future__ import division 2 3 from chainer.training import extension 4 5 6 class LinearShift(extension.Extension): 7 8 """Trainer extension to change an optimizer attribute linearly. 9 10 This extension changes an optimizer attribute from the first value to the 11 last value linearly within a specified duration. The typical use case is 12 warming up of the momentum coefficient. 13 14 For example, suppose that this extension is called at every iteration, and 15 ``value_range == (x, y)`` and ``time_range == (i, j)``. Then, this 16 extension keeps the attribute to be ``x`` up to the ``i``-th iteration, 17 linearly shifts the value to ``y`` by the ``j``-th iteration, and then 18 keeps the value to be ``y`` after the ``j``-th iteration. 19 20 This extension is also called before the training loop starts by default. 21 22 Args: 23 attr (str): Name of the optimizer attribute to adjust. 24 value_range (tuple of float): The first and the last values of the 25 attribute. 26 time_range (tuple of ints): The first and last counts of calls in which 27 the attribute is adjusted. 28 optimizer (~chainer.Optimizer): Target optimizer object. If it is None, 29 the main optimizer of the trainer is used. 30 31 """ 32 invoke_before_training = True 33 34 def __init__(self, attr, value_range, time_range, optimizer=None): 35 self._attr = attr 36 self._value_range = value_range 37 self._time_range = time_range 38 self._optimizer = optimizer 39 self._t = 0 40 41 def __call__(self, trainer): 42 optimizer = self._optimizer or trainer.updater.get_optimizer('main') 43 t1, t2 = self._time_range 44 v1, v2 = self._value_range 45 46 if self._t <= t1: 47 value = v1 48 elif self._t >= t2: 49 value = v2 50 else: 51 rate = (self._t - t1) / (t2 - t1) 52 value = v1 + rate * (v2 - v1) 53 setattr(optimizer, self._attr, value) 54 55 self._t += 1 56 57 def serialize(self, serializer): 58 self._t = serializer('_t', self._t) 59 [end of chainer/training/extensions/linear_shift.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/training/extensions/linear_shift.py b/chainer/training/extensions/linear_shift.py --- a/chainer/training/extensions/linear_shift.py +++ b/chainer/training/extensions/linear_shift.py @@ -36,23 +36,34 @@ self._value_range = value_range self._time_range = time_range self._optimizer = optimizer - self._t = 0 + self._t = 1 + self._before_training = True def __call__(self, trainer): optimizer = self._optimizer or trainer.updater.get_optimizer('main') + + if self._before_training: + self._before_training = False + value = self._compute_value(self._t - 1) + else: + value = self._compute_value(self._t) + self._t += 1 + + setattr(optimizer, self._attr, value) + + def serialize(self, serializer): + self._t = serializer('_t', self._t) + + def _compute_value(self, t): t1, t2 = self._time_range v1, v2 = self._value_range - if self._t <= t1: + if t <= t1: value = v1 - elif self._t >= t2: + elif t >= t2: value = v2 else: - rate = (self._t - t1) / (t2 - t1) + rate = (t - t1) / (t2 - t1) value = v1 + rate * (v2 - v1) - setattr(optimizer, self._attr, value) - self._t += 1 - - def serialize(self, serializer): - self._t = serializer('_t', self._t) + return value
{"golden_diff": "diff --git a/chainer/training/extensions/linear_shift.py b/chainer/training/extensions/linear_shift.py\n--- a/chainer/training/extensions/linear_shift.py\n+++ b/chainer/training/extensions/linear_shift.py\n@@ -36,23 +36,34 @@\n self._value_range = value_range\n self._time_range = time_range\n self._optimizer = optimizer\n- self._t = 0\n+ self._t = 1\n+ self._before_training = True\n \n def __call__(self, trainer):\n optimizer = self._optimizer or trainer.updater.get_optimizer('main')\n+\n+ if self._before_training:\n+ self._before_training = False\n+ value = self._compute_value(self._t - 1)\n+ else:\n+ value = self._compute_value(self._t)\n+ self._t += 1\n+\n+ setattr(optimizer, self._attr, value)\n+\n+ def serialize(self, serializer):\n+ self._t = serializer('_t', self._t)\n+\n+ def _compute_value(self, t):\n t1, t2 = self._time_range\n v1, v2 = self._value_range\n \n- if self._t <= t1:\n+ if t <= t1:\n value = v1\n- elif self._t >= t2:\n+ elif t >= t2:\n value = v2\n else:\n- rate = (self._t - t1) / (t2 - t1)\n+ rate = (t - t1) / (t2 - t1)\n value = v1 + rate * (v2 - v1)\n- setattr(optimizer, self._attr, value)\n \n- self._t += 1\n-\n- def serialize(self, serializer):\n- self._t = serializer('_t', self._t)\n+ return value\n", "issue": "resuming issue of LinearShift\nSame issue as #2680\r\n\r\n```\r\nimport chainer\r\nfrom chainer import iterators\r\nfrom chainer import optimizers\r\nfrom chainer import training\r\nfrom chainer.training import extensions\r\nfrom chainer import serializers\r\n\r\n\r\nclass DummyModel(chainer.Chain):\r\n\r\n def __call__(self, x):\r\n return x\r\n\r\n\r\ndef setup_trainer(iteration):\r\n model = DummyModel()\r\n optimizer = optimizers.SGD()\r\n optimizer.setup(model)\r\n\r\n iterator = iterators.SerialIterator([1, 2, 3], 1)\r\n\r\n updater = training.StandardUpdater(iterator, optimizer)\r\n trainer = training.Trainer(updater, (iteration, 'iteration'), out='.')\r\n\r\n trainer.extend(extensions.LogReport(trigger=(1, 'iteration')))\r\n trainer.extend(extensions.observe_lr(), trigger=(1, 'iteration'))\r\n trainer.extend(\r\n extensions.PrintReport(['iteration', 'lr']),\r\n trigger=(1, 'iteration'))\r\n\r\n trainer.extend(\r\n extensions.LinearShift('lr', (2, 1), (5, 15)),\r\n trigger=(1, 'iteration'))\r\n\r\n return trainer\r\n\r\n\r\ntrainer = setup_trainer(10)\r\ntrainer.run()\r\nserializers.save_npz('tmp', trainer)\r\n# iteration lr\r\n# 1 2\r\n# 2 2\r\n# 3 2\r\n# 4 2\r\n# 5 2\r\n# 6 2\r\n# 7 1.9\r\n# 8 1.8\r\n# 9 1.7\r\n# 10 1.6\r\n\r\nresumed_trainer = setup_trainer(20)\r\nserializers.load_npz('tmp', resumed_trainer)\r\nresumed_trainer.run()\r\n# iteration lr\r\n# 1 2\r\n# 2 2\r\n# 3 2\r\n# 4 2\r\n# 5 2\r\n# 6 2\r\n# 7 1.9\r\n# 8 1.8\r\n# 9 1.7\r\n# 10 1.6\r\n# 11 1.4 (lr = 1.5 is skipped)\r\n# 12 1.3\r\n# 13 1.2\r\n# 14 1.1\r\n# 15 1\r\n# 16 1\r\n# 17 1\r\n# 18 1\r\n# 19 1\r\n# 20 1\r\n```\n", "before_files": [{"content": "from __future__ import division\n\nfrom chainer.training import extension\n\n\nclass LinearShift(extension.Extension):\n\n \"\"\"Trainer extension to change an optimizer attribute linearly.\n\n This extension changes an optimizer attribute from the first value to the\n last value linearly within a specified duration. The typical use case is\n warming up of the momentum coefficient.\n\n For example, suppose that this extension is called at every iteration, and\n ``value_range == (x, y)`` and ``time_range == (i, j)``. Then, this\n extension keeps the attribute to be ``x`` up to the ``i``-th iteration,\n linearly shifts the value to ``y`` by the ``j``-th iteration, and then\n keeps the value to be ``y`` after the ``j``-th iteration.\n\n This extension is also called before the training loop starts by default.\n\n Args:\n attr (str): Name of the optimizer attribute to adjust.\n value_range (tuple of float): The first and the last values of the\n attribute.\n time_range (tuple of ints): The first and last counts of calls in which\n the attribute is adjusted.\n optimizer (~chainer.Optimizer): Target optimizer object. If it is None,\n the main optimizer of the trainer is used.\n\n \"\"\"\n invoke_before_training = True\n\n def __init__(self, attr, value_range, time_range, optimizer=None):\n self._attr = attr\n self._value_range = value_range\n self._time_range = time_range\n self._optimizer = optimizer\n self._t = 0\n\n def __call__(self, trainer):\n optimizer = self._optimizer or trainer.updater.get_optimizer('main')\n t1, t2 = self._time_range\n v1, v2 = self._value_range\n\n if self._t <= t1:\n value = v1\n elif self._t >= t2:\n value = v2\n else:\n rate = (self._t - t1) / (t2 - t1)\n value = v1 + rate * (v2 - v1)\n setattr(optimizer, self._attr, value)\n\n self._t += 1\n\n def serialize(self, serializer):\n self._t = serializer('_t', self._t)\n", "path": "chainer/training/extensions/linear_shift.py"}]}
1,719
420
gh_patches_debug_502
rasdani/github-patches
git_diff
google__flax-2827
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cannot import flax.training.checkpoints in 0.6.4 ### System information - OS Platform and Distribution: Ubuntu 22.04.1 LTS, also in Colab environment - Flax, jax, jaxlib versions: * flax 0.6.4 * jax 0.3.25 * jaxlib 0.3.25 - Python version: 3.10.6 - GPU/TPU model and memory: No Accelerator / 16GB ### Problem you have encountered: With FLAX v0.6.4 I can't import `flax.training.checkpoints` module due to following error: ``` ImportError: cannot import name 'monitoring' from 'jax' (/usr/local/lib/python3.8/dist-packages/jax/__init__.py) ``` This does not happen in v0.6.3. ### What you expected to happen: The module should be imported. ### Logs, error messages, etc: Error message from jupyter notebook: ``` ImportError Traceback (most recent call last) [<ipython-input-3-9a234296e658>](https://localhost:8080/#) in <module> 1 import flax ----> 2 from flax.training import checkpoints [/usr/local/lib/python3.8/dist-packages/flax/training/checkpoints.py](https://localhost:8080/#) in <module> 36 from flax import traverse_util 37 import jax ---> 38 from jax import monitoring 39 from jax import process_index 40 from jax import sharding ImportError: cannot import name 'monitoring' from 'jax' (/usr/local/lib/python3.8/dist-packages/jax/__init__.py) ``` ### Steps to reproduce: [Colab notebook](https://colab.research.google.com/drive/1ZLR1JSJPfaaoTmL7bow8oebqyhhxrqSo?usp=sharing) </issue> <code> [start of setup.py] 1 # Copyright 2022 The Flax Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """setup.py for Flax.""" 16 17 import os 18 from setuptools import find_packages 19 from setuptools import setup 20 21 here = os.path.abspath(os.path.dirname(__file__)) 22 try: 23 README = open(os.path.join(here, "README.md"), encoding="utf-8").read() 24 except OSError: 25 README = "" 26 27 install_requires = [ 28 "numpy>=1.12", 29 "jax>=0.3.16", 30 "matplotlib", # only needed for tensorboard export 31 "msgpack", 32 "optax", 33 "orbax", 34 "tensorstore", 35 "rich>=11.1", 36 "typing_extensions>=4.1.1", 37 "PyYAML>=5.4.1", 38 ] 39 40 tests_require = [ 41 "atari-py==0.2.5", # Last version does not have the ROMs we test on pre-packaged 42 "clu", # All examples. 43 "gym==0.18.3", 44 "jaxlib", 45 "jraph>=0.0.6dev0", 46 "ml-collections", 47 "mypy", 48 "opencv-python", 49 "pytest", 50 "pytest-cov", 51 "pytest-custom_exit_code", 52 "pytest-xdist==1.34.0", # upgrading to 2.0 broke tests, need to investigate 53 "pytype", 54 "sentencepiece", # WMT example. 55 "tensorflow_text>=2.4.0", # WMT example. 56 "tensorflow_datasets", 57 "tensorflow", 58 "torch", 59 ] 60 61 __version__ = None 62 63 with open("flax/version.py") as f: 64 exec(f.read(), globals()) 65 66 setup( 67 name="flax", 68 version=__version__, 69 description="Flax: A neural network library for JAX designed for flexibility", 70 long_description="\n\n".join([README]), 71 long_description_content_type="text/markdown", 72 classifiers=[ 73 "Development Status :: 3 - Alpha", 74 "Intended Audience :: Developers", 75 "Intended Audience :: Science/Research", 76 "License :: OSI Approved :: Apache Software License", 77 "Programming Language :: Python :: 3.7", 78 "Topic :: Scientific/Engineering :: Artificial Intelligence", 79 ], 80 keywords="", 81 author="Flax team", 82 author_email="[email protected]", 83 url="https://github.com/google/flax", 84 packages=find_packages(), 85 package_data={"flax": ["py.typed"]}, 86 zip_safe=False, 87 install_requires=install_requires, 88 extras_require={ 89 "testing": tests_require, 90 }, 91 ) 92 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -26,7 +26,7 @@ install_requires = [ "numpy>=1.12", - "jax>=0.3.16", + "jax>=0.4.2", "matplotlib", # only needed for tensorboard export "msgpack", "optax",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,7 @@\n \n install_requires = [\n \"numpy>=1.12\",\n- \"jax>=0.3.16\",\n+ \"jax>=0.4.2\",\n \"matplotlib\", # only needed for tensorboard export\n \"msgpack\",\n \"optax\",\n", "issue": "Cannot import flax.training.checkpoints in 0.6.4\n### System information\r\n- OS Platform and Distribution: Ubuntu 22.04.1 LTS, also in Colab environment\r\n- Flax, jax, jaxlib versions:\r\n * flax 0.6.4\r\n * jax 0.3.25\r\n * jaxlib 0.3.25\r\n- Python version: 3.10.6\r\n- GPU/TPU model and memory: No Accelerator / 16GB\r\n\r\n### Problem you have encountered:\r\nWith FLAX v0.6.4 I can't import `flax.training.checkpoints` module due to following error:\r\n```\r\nImportError: cannot import name 'monitoring' from 'jax' (/usr/local/lib/python3.8/dist-packages/jax/__init__.py)\r\n```\r\nThis does not happen in v0.6.3.\r\n\r\n### What you expected to happen:\r\nThe module should be imported.\r\n\r\n### Logs, error messages, etc:\r\nError message from jupyter notebook:\r\n```\r\nImportError Traceback (most recent call last)\r\n\r\n[<ipython-input-3-9a234296e658>](https://localhost:8080/#) in <module>\r\n 1 import flax\r\n----> 2 from flax.training import checkpoints\r\n\r\n[/usr/local/lib/python3.8/dist-packages/flax/training/checkpoints.py](https://localhost:8080/#) in <module>\r\n 36 from flax import traverse_util\r\n 37 import jax\r\n---> 38 from jax import monitoring\r\n 39 from jax import process_index\r\n 40 from jax import sharding\r\n\r\nImportError: cannot import name 'monitoring' from 'jax' (/usr/local/lib/python3.8/dist-packages/jax/__init__.py)\r\n```\r\n\r\n### Steps to reproduce:\r\n[Colab notebook](https://colab.research.google.com/drive/1ZLR1JSJPfaaoTmL7bow8oebqyhhxrqSo?usp=sharing)\r\n\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"setup.py for Flax.\"\"\"\n\nimport os\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, \"README.md\"), encoding=\"utf-8\").read()\nexcept OSError:\n README = \"\"\n\ninstall_requires = [\n \"numpy>=1.12\",\n \"jax>=0.3.16\",\n \"matplotlib\", # only needed for tensorboard export\n \"msgpack\",\n \"optax\",\n \"orbax\",\n \"tensorstore\",\n \"rich>=11.1\",\n \"typing_extensions>=4.1.1\",\n \"PyYAML>=5.4.1\",\n]\n\ntests_require = [\n \"atari-py==0.2.5\", # Last version does not have the ROMs we test on pre-packaged\n \"clu\", # All examples.\n \"gym==0.18.3\",\n \"jaxlib\",\n \"jraph>=0.0.6dev0\",\n \"ml-collections\",\n \"mypy\",\n \"opencv-python\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-custom_exit_code\",\n \"pytest-xdist==1.34.0\", # upgrading to 2.0 broke tests, need to investigate\n \"pytype\",\n \"sentencepiece\", # WMT example.\n \"tensorflow_text>=2.4.0\", # WMT example.\n \"tensorflow_datasets\",\n \"tensorflow\",\n \"torch\",\n]\n\n__version__ = None\n\nwith open(\"flax/version.py\") as f:\n exec(f.read(), globals())\n\nsetup(\n name=\"flax\",\n version=__version__,\n description=\"Flax: A neural network library for JAX designed for flexibility\",\n long_description=\"\\n\\n\".join([README]),\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"\",\n author=\"Flax team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/google/flax\",\n packages=find_packages(),\n package_data={\"flax\": [\"py.typed\"]},\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n \"testing\": tests_require,\n },\n )\n", "path": "setup.py"}]}
1,861
93
gh_patches_debug_14738
rasdani/github-patches
git_diff
crytic__slither-530
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Suicidal detector fails on external functions If the [example](https://github.com/crytic/slither/wiki/Detector-Documentation#suicidal) function for the suicidal detector is changed from `public` to `external` the issue is no longer flagged. ``` pragma solidity ^0.5.0; contract Suicidal{ function kill() external{ selfdestruct(msg.sender); } } ``` `slither --version`: 0.6.12 `solc --version`: 0.5.15 Suicidal detector fails on external functions If the [example](https://github.com/crytic/slither/wiki/Detector-Documentation#suicidal) function for the suicidal detector is changed from `public` to `external` the issue is no longer flagged. ``` pragma solidity ^0.5.0; contract Suicidal{ function kill() external{ selfdestruct(msg.sender); } } ``` `slither --version`: 0.6.12 `solc --version`: 0.5.15 </issue> <code> [start of slither/detectors/functions/suicidal.py] 1 """ 2 Module detecting suicidal contract 3 4 A suicidal contract is an unprotected function that calls selfdestruct 5 """ 6 7 from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification 8 9 10 class Suicidal(AbstractDetector): 11 """ 12 Unprotected function detector 13 """ 14 15 ARGUMENT = 'suicidal' 16 HELP = 'Functions allowing anyone to destruct the contract' 17 IMPACT = DetectorClassification.HIGH 18 CONFIDENCE = DetectorClassification.HIGH 19 20 WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#suicidal' 21 22 23 WIKI_TITLE = 'Suicidal' 24 WIKI_DESCRIPTION = 'Unprotected call to a function executing `selfdestruct`/`suicide`.' 25 WIKI_EXPLOIT_SCENARIO = ''' 26 ```solidity 27 contract Suicidal{ 28 function kill() public{ 29 selfdestruct(msg.sender); 30 } 31 } 32 ``` 33 Bob calls `kill` and destructs the contract.''' 34 35 WIKI_RECOMMENDATION = 'Protect access to all sensitive functions.' 36 37 @staticmethod 38 def detect_suicidal_func(func): 39 """ Detect if the function is suicidal 40 41 Detect the public functions calling suicide/selfdestruct without protection 42 Returns: 43 (bool): True if the function is suicidal 44 """ 45 46 if func.is_constructor: 47 return False 48 49 if func.visibility != 'public': 50 return False 51 52 calls = [c.name for c in func.internal_calls] 53 if not ('suicide(address)' in calls or 'selfdestruct(address)' in calls): 54 return False 55 56 if func.is_protected(): 57 return False 58 59 return True 60 61 def detect_suicidal(self, contract): 62 ret = [] 63 for f in [f for f in contract.functions if f.contract_declarer == contract]: 64 if self.detect_suicidal_func(f): 65 ret.append(f) 66 return ret 67 68 def _detect(self): 69 """ Detect the suicidal functions 70 """ 71 results = [] 72 for c in self.contracts: 73 functions = self.detect_suicidal(c) 74 for func in functions: 75 76 info = [func, " allows anyone to destruct the contract\n"] 77 78 res = self.generate_result(info) 79 80 results.append(res) 81 82 return results 83 [end of slither/detectors/functions/suicidal.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/slither/detectors/functions/suicidal.py b/slither/detectors/functions/suicidal.py --- a/slither/detectors/functions/suicidal.py +++ b/slither/detectors/functions/suicidal.py @@ -46,7 +46,7 @@ if func.is_constructor: return False - if func.visibility != 'public': + if func.visibility not in ['public', 'external']: return False calls = [c.name for c in func.internal_calls] @@ -60,7 +60,7 @@ def detect_suicidal(self, contract): ret = [] - for f in [f for f in contract.functions if f.contract_declarer == contract]: + for f in contract.functions_declared: if self.detect_suicidal_func(f): ret.append(f) return ret
{"golden_diff": "diff --git a/slither/detectors/functions/suicidal.py b/slither/detectors/functions/suicidal.py\n--- a/slither/detectors/functions/suicidal.py\n+++ b/slither/detectors/functions/suicidal.py\n@@ -46,7 +46,7 @@\n if func.is_constructor:\n return False\n \n- if func.visibility != 'public':\n+ if func.visibility not in ['public', 'external']:\n return False\n \n calls = [c.name for c in func.internal_calls]\n@@ -60,7 +60,7 @@\n \n def detect_suicidal(self, contract):\n ret = []\n- for f in [f for f in contract.functions if f.contract_declarer == contract]:\n+ for f in contract.functions_declared:\n if self.detect_suicidal_func(f):\n ret.append(f)\n return ret\n", "issue": "Suicidal detector fails on external functions\nIf the [example](https://github.com/crytic/slither/wiki/Detector-Documentation#suicidal) function for the suicidal detector is changed from `public` to `external` the issue is no longer flagged.\r\n\r\n```\r\npragma solidity ^0.5.0;\r\ncontract Suicidal{\r\n function kill() external{\r\n selfdestruct(msg.sender);\r\n }\r\n}\r\n```\r\n\r\n`slither --version`: 0.6.12\r\n`solc --version`: 0.5.15\nSuicidal detector fails on external functions\nIf the [example](https://github.com/crytic/slither/wiki/Detector-Documentation#suicidal) function for the suicidal detector is changed from `public` to `external` the issue is no longer flagged.\r\n\r\n```\r\npragma solidity ^0.5.0;\r\ncontract Suicidal{\r\n function kill() external{\r\n selfdestruct(msg.sender);\r\n }\r\n}\r\n```\r\n\r\n`slither --version`: 0.6.12\r\n`solc --version`: 0.5.15\n", "before_files": [{"content": "\"\"\"\nModule detecting suicidal contract\n\nA suicidal contract is an unprotected function that calls selfdestruct\n\"\"\"\n\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\n\n\nclass Suicidal(AbstractDetector):\n \"\"\"\n Unprotected function detector\n \"\"\"\n\n ARGUMENT = 'suicidal'\n HELP = 'Functions allowing anyone to destruct the contract'\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#suicidal'\n\n\n WIKI_TITLE = 'Suicidal'\n WIKI_DESCRIPTION = 'Unprotected call to a function executing `selfdestruct`/`suicide`.'\n WIKI_EXPLOIT_SCENARIO = '''\n```solidity\ncontract Suicidal{\n function kill() public{\n selfdestruct(msg.sender);\n }\n}\n```\nBob calls `kill` and destructs the contract.'''\n\n WIKI_RECOMMENDATION = 'Protect access to all sensitive functions.'\n\n @staticmethod\n def detect_suicidal_func(func):\n \"\"\" Detect if the function is suicidal\n\n Detect the public functions calling suicide/selfdestruct without protection\n Returns:\n (bool): True if the function is suicidal\n \"\"\"\n\n if func.is_constructor:\n return False\n\n if func.visibility != 'public':\n return False\n\n calls = [c.name for c in func.internal_calls]\n if not ('suicide(address)' in calls or 'selfdestruct(address)' in calls):\n return False\n\n if func.is_protected():\n return False\n\n return True\n\n def detect_suicidal(self, contract):\n ret = []\n for f in [f for f in contract.functions if f.contract_declarer == contract]:\n if self.detect_suicidal_func(f):\n ret.append(f)\n return ret\n\n def _detect(self):\n \"\"\" Detect the suicidal functions\n \"\"\"\n results = []\n for c in self.contracts:\n functions = self.detect_suicidal(c)\n for func in functions:\n\n info = [func, \" allows anyone to destruct the contract\\n\"]\n\n res = self.generate_result(info)\n\n results.append(res)\n\n return results\n", "path": "slither/detectors/functions/suicidal.py"}]}
1,427
195
gh_patches_debug_57650
rasdani/github-patches
git_diff
facebookresearch__ParlAI-1956
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Quickstart AttributeError: 'HogwildWorld' object has no attribute 'acts' **Bug description** When going through the ParlAI [quickstart](https://parl.ai/docs/tutorial_quick.html#install), I got the following error: ``` python Traceback (most recent call last): File "examples/interactive.py", line 18, in <module> interactive(opt, print_parser=parser) File "/root/ParlAI/parlai/scripts/interactive.py", line 68, in interactive agent = create_agent(opt, requireModelExists=True) File "/root/ParlAI/parlai/core/agents.py", line 683, in create_agent model = load_agent_module(opt) File "/root/ParlAI/parlai/core/agents.py", line 548, in load_agent_module return model_class(new_opt) File "/root/ParlAI/parlai/agents/memnn/memnn.py", line 86, in __init__ super().__init__(opt, shared) File "/root/ParlAI/parlai/core/torch_ranker_agent.py", line 135, in __init__ super().__init__(opt, shared) File "/root/ParlAI/parlai/core/torch_agent.py", line 737, in __init__ self.set_interactive_mode(opt['interactive_mode'], shared) File "/root/ParlAI/parlai/core/torch_ranker_agent.py", line 206, in set_interactive_mode path = self.get_task_candidates_path() File "/root/ParlAI/parlai/core/torch_ranker_agent.py", line 230, in get_task_candidates_path build_cands(opt) File "/root/ParlAI/parlai/scripts/build_candidates.py", line 47, in build_cands acts = world.get_acts()[0] File "/root/ParlAI/parlai/core/worlds.py", line 162, in get_acts return self.acts AttributeError: 'HogwildWorld' object has no attribute 'acts' ``` **While running** ```python python examples/interactive.py -mf /tmp/babi_memnn -ecands vocab ``` </issue> <code> [start of parlai/scripts/build_candidates.py] 1 #!/usr/bin/env python3 2 3 # Copyright (c) Facebook, Inc. and its affiliates. 4 # This source code is licensed under the MIT license found in the 5 # LICENSE file in the root directory of this source tree. 6 """Build the candidate responses for a retrieval model. 7 8 Examples 9 -------- 10 11 .. code-block:: shell 12 13 python build_candidates.py -t convai2 --outfile /tmp/cands.txt 14 """ 15 16 from parlai.core.params import ParlaiParser 17 from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent 18 from parlai.core.worlds import create_task 19 from parlai.core.utils import TimeLogger 20 import random 21 import tempfile 22 23 24 def build_cands(opt): 25 # create repeat label agent and assign it to the specified task 26 agent = RepeatLabelAgent(opt) 27 world = create_task(opt, agent) 28 if opt['outfile'] is None: 29 outfile = tempfile.mkstemp( 30 prefix='{}_{}_'.format(opt['task'], opt['datatype']), suffix='.txt' 31 )[1] 32 else: 33 outfile = opt['outfile'] 34 35 if opt.get('num_examples', -1) == -1: 36 num_examples = world.num_examples() 37 else: 38 num_examples = opt['num_examples'] 39 log_timer = TimeLogger() 40 41 print('[ starting to build candidates from task.. (ex:' + str(num_examples) + ')]') 42 print('[ saving output to {} ]'.format(outfile)) 43 cands = [] 44 for _ in range(num_examples): 45 world.parley() 46 # We get the acts of the first agent, which is the teacher. 47 acts = world.get_acts()[0] 48 if isinstance(acts, dict): 49 # We turn into a batch of 1 example, in case batching is being used. 50 acts = [acts] 51 for a in acts: 52 candidate = a.get('labels', a.get('eval_labels', None)) 53 if candidate is not None: 54 candidate = candidate[0] 55 cands.append(candidate) 56 if log_timer.time() > opt['log_every_n_secs']: 57 text, _log = log_timer.log(world.total_parleys, world.num_examples()) 58 print(text) 59 if world.epoch_done(): 60 print('EPOCH DONE') 61 break 62 fw = open(outfile, 'w') 63 fw.write('\n'.join(cands)) 64 fw.close() 65 66 67 def main(): 68 random.seed(42) 69 # Get command line arguments 70 parser = ParlaiParser() 71 parser.add_argument( 72 '-n', 73 '--num-examples', 74 default=-1, 75 type=int, 76 help='Total number of exs to convert, -1 to convert all examples', 77 ) 78 parser.add_argument( 79 '-of', 80 '--outfile', 81 default=None, 82 type=str, 83 help='Output file where to save, by default will be created in /tmp', 84 ) 85 parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) 86 parser.set_defaults(datatype='train:evalmode') 87 opt = parser.parse_args() 88 build_cands(opt) 89 90 91 if __name__ == '__main__': 92 main() 93 [end of parlai/scripts/build_candidates.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/parlai/scripts/build_candidates.py b/parlai/scripts/build_candidates.py --- a/parlai/scripts/build_candidates.py +++ b/parlai/scripts/build_candidates.py @@ -23,6 +23,9 @@ def build_cands(opt): # create repeat label agent and assign it to the specified task + if opt['numthreads'] > 1: + # Broken in hogwild mode. Just fall back to single processing mode + opt['numthreads'] = 1 agent = RepeatLabelAgent(opt) world = create_task(opt, agent) if opt['outfile'] is None:
{"golden_diff": "diff --git a/parlai/scripts/build_candidates.py b/parlai/scripts/build_candidates.py\n--- a/parlai/scripts/build_candidates.py\n+++ b/parlai/scripts/build_candidates.py\n@@ -23,6 +23,9 @@\n \n def build_cands(opt):\n # create repeat label agent and assign it to the specified task\n+ if opt['numthreads'] > 1:\n+ # Broken in hogwild mode. Just fall back to single processing mode\n+ opt['numthreads'] = 1\n agent = RepeatLabelAgent(opt)\n world = create_task(opt, agent)\n if opt['outfile'] is None:\n", "issue": "Quickstart AttributeError: 'HogwildWorld' object has no attribute 'acts'\n**Bug description**\r\nWhen going through the ParlAI [quickstart](https://parl.ai/docs/tutorial_quick.html#install), I got the following error:\r\n\r\n``` python\r\nTraceback (most recent call last):\r\n File \"examples/interactive.py\", line 18, in <module>\r\n interactive(opt, print_parser=parser)\r\n File \"/root/ParlAI/parlai/scripts/interactive.py\", line 68, in interactive\r\n agent = create_agent(opt, requireModelExists=True)\r\n File \"/root/ParlAI/parlai/core/agents.py\", line 683, in create_agent\r\n model = load_agent_module(opt)\r\n File \"/root/ParlAI/parlai/core/agents.py\", line 548, in load_agent_module\r\n return model_class(new_opt)\r\n File \"/root/ParlAI/parlai/agents/memnn/memnn.py\", line 86, in __init__\r\n super().__init__(opt, shared)\r\n File \"/root/ParlAI/parlai/core/torch_ranker_agent.py\", line 135, in __init__\r\n super().__init__(opt, shared)\r\n File \"/root/ParlAI/parlai/core/torch_agent.py\", line 737, in __init__\r\n self.set_interactive_mode(opt['interactive_mode'], shared)\r\n File \"/root/ParlAI/parlai/core/torch_ranker_agent.py\", line 206, in set_interactive_mode\r\n path = self.get_task_candidates_path()\r\n File \"/root/ParlAI/parlai/core/torch_ranker_agent.py\", line 230, in get_task_candidates_path\r\n build_cands(opt)\r\n File \"/root/ParlAI/parlai/scripts/build_candidates.py\", line 47, in build_cands\r\n acts = world.get_acts()[0]\r\n File \"/root/ParlAI/parlai/core/worlds.py\", line 162, in get_acts\r\n return self.acts\r\nAttributeError: 'HogwildWorld' object has no attribute 'acts'\r\n```\r\n\r\n**While running**\r\n```python\r\npython examples/interactive.py -mf /tmp/babi_memnn -ecands vocab\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Build the candidate responses for a retrieval model.\n\nExamples\n--------\n\n.. code-block:: shell\n\n python build_candidates.py -t convai2 --outfile /tmp/cands.txt\n\"\"\"\n\nfrom parlai.core.params import ParlaiParser\nfrom parlai.agents.repeat_label.repeat_label import RepeatLabelAgent\nfrom parlai.core.worlds import create_task\nfrom parlai.core.utils import TimeLogger\nimport random\nimport tempfile\n\n\ndef build_cands(opt):\n # create repeat label agent and assign it to the specified task\n agent = RepeatLabelAgent(opt)\n world = create_task(opt, agent)\n if opt['outfile'] is None:\n outfile = tempfile.mkstemp(\n prefix='{}_{}_'.format(opt['task'], opt['datatype']), suffix='.txt'\n )[1]\n else:\n outfile = opt['outfile']\n\n if opt.get('num_examples', -1) == -1:\n num_examples = world.num_examples()\n else:\n num_examples = opt['num_examples']\n log_timer = TimeLogger()\n\n print('[ starting to build candidates from task.. (ex:' + str(num_examples) + ')]')\n print('[ saving output to {} ]'.format(outfile))\n cands = []\n for _ in range(num_examples):\n world.parley()\n # We get the acts of the first agent, which is the teacher.\n acts = world.get_acts()[0]\n if isinstance(acts, dict):\n # We turn into a batch of 1 example, in case batching is being used.\n acts = [acts]\n for a in acts:\n candidate = a.get('labels', a.get('eval_labels', None))\n if candidate is not None:\n candidate = candidate[0]\n cands.append(candidate)\n if log_timer.time() > opt['log_every_n_secs']:\n text, _log = log_timer.log(world.total_parleys, world.num_examples())\n print(text)\n if world.epoch_done():\n print('EPOCH DONE')\n break\n fw = open(outfile, 'w')\n fw.write('\\n'.join(cands))\n fw.close()\n\n\ndef main():\n random.seed(42)\n # Get command line arguments\n parser = ParlaiParser()\n parser.add_argument(\n '-n',\n '--num-examples',\n default=-1,\n type=int,\n help='Total number of exs to convert, -1 to convert all examples',\n )\n parser.add_argument(\n '-of',\n '--outfile',\n default=None,\n type=str,\n help='Output file where to save, by default will be created in /tmp',\n )\n parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)\n parser.set_defaults(datatype='train:evalmode')\n opt = parser.parse_args()\n build_cands(opt)\n\n\nif __name__ == '__main__':\n main()\n", "path": "parlai/scripts/build_candidates.py"}]}
1,896
144
gh_patches_debug_20846
rasdani/github-patches
git_diff
wagtail__wagtail-1147
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Wagtail doesn't gracefully support session invalidation on password change According to [Django's documentation](https://docs.djangoproject.com/en/1.7/topics/auth/default/#session-invalidation-on-password-change), SessionAuthenticationMiddleware is new in Django 1.7, enabled by default, and will be mandatory in Django 2.0. Currently, when the middleware is loaded and the user changes their password, they are immediately kicked out to the sign in screen. The user's session is most likely invalidated. This is very obtrusive and the user is not informed if their password was successfully updated. I believe the offending code is in [account.py](https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailadmin/views/account.py#L26) and attempted to modify the code from the example to make it work, but the outcome was the same: ``` python # ... from django.contrib.auth import update_session_auth_hash # new code # ... def change_password(request): can_change_password = request.user.has_usable_password() if can_change_password: if request.POST: form = SetPasswordForm(request.user, request.POST) if form.is_valid(): form.save() update_session_auth_hash(request, form.user) # new code messages.success(request, _("Your password has been changed successfully!")) return redirect('wagtailadmin_account') else: form = SetPasswordForm(request.user) else: form = None return render(request, 'wagtailadmin/account/change_password.html', { 'form': form, 'can_change_password': can_change_password, }) ``` I am, currently, a Django novice, so that's as far as I was able to get. Hope this is an easy fix! </issue> <code> [start of wagtail/wagtailadmin/views/account.py] 1 from django.conf import settings 2 from django.shortcuts import render, redirect 3 from django.contrib import messages 4 from django.contrib.auth.forms import SetPasswordForm 5 from django.contrib.auth.views import logout as auth_logout, login as auth_login 6 from django.utils.translation import ugettext as _ 7 from django.views.decorators.debug import sensitive_post_parameters 8 from django.views.decorators.cache import never_cache 9 10 from wagtail.wagtailadmin import forms 11 from wagtail.wagtailusers.forms import NotificationPreferencesForm 12 from wagtail.wagtailusers.models import UserProfile 13 from wagtail.wagtailcore.models import UserPagePermissionsProxy 14 15 16 def account(request): 17 user_perms = UserPagePermissionsProxy(request.user) 18 show_notification_preferences = user_perms.can_edit_pages() or user_perms.can_publish_pages() 19 20 return render(request, 'wagtailadmin/account/account.html', { 21 'show_change_password': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True) and request.user.has_usable_password(), 22 'show_notification_preferences': show_notification_preferences 23 }) 24 25 26 def change_password(request): 27 can_change_password = request.user.has_usable_password() 28 29 if can_change_password: 30 if request.POST: 31 form = SetPasswordForm(request.user, request.POST) 32 33 if form.is_valid(): 34 form.save() 35 36 messages.success(request, _("Your password has been changed successfully!")) 37 return redirect('wagtailadmin_account') 38 else: 39 form = SetPasswordForm(request.user) 40 else: 41 form = None 42 43 return render(request, 'wagtailadmin/account/change_password.html', { 44 'form': form, 45 'can_change_password': can_change_password, 46 }) 47 48 49 def notification_preferences(request): 50 51 if request.POST: 52 form = NotificationPreferencesForm(request.POST, instance=UserProfile.get_for_user(request.user)) 53 54 if form.is_valid(): 55 form.save() 56 messages.success(request, _("Your preferences have been updated successfully!")) 57 return redirect('wagtailadmin_account') 58 else: 59 form = NotificationPreferencesForm(instance=UserProfile.get_for_user(request.user)) 60 61 # quick-and-dirty catch-all in case the form has been rendered with no 62 # fields, as the user has no customisable permissions 63 if not form.fields: 64 return redirect('wagtailadmin_account') 65 66 return render(request, 'wagtailadmin/account/notification_preferences.html', { 67 'form': form, 68 }) 69 70 71 @sensitive_post_parameters() 72 @never_cache 73 def login(request): 74 if request.user.is_authenticated() and request.user.has_perm('wagtailadmin.access_admin'): 75 return redirect('wagtailadmin_home') 76 else: 77 from django.contrib.auth import get_user_model 78 return auth_login(request, 79 template_name='wagtailadmin/login.html', 80 authentication_form=forms.LoginForm, 81 extra_context={ 82 'show_password_reset': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True), 83 'username_field': get_user_model().USERNAME_FIELD, 84 }, 85 ) 86 87 88 def logout(request): 89 response = auth_logout(request, next_page='wagtailadmin_login') 90 91 # By default, logging out will generate a fresh sessionid cookie. We want to use the 92 # absence of sessionid as an indication that front-end pages are being viewed by a 93 # non-logged-in user and are therefore cacheable, so we forcibly delete the cookie here. 94 response.delete_cookie(settings.SESSION_COOKIE_NAME, 95 domain=settings.SESSION_COOKIE_DOMAIN, 96 path=settings.SESSION_COOKIE_PATH) 97 98 # HACK: pretend that the session hasn't been modified, so that SessionMiddleware 99 # won't override the above and write a new cookie. 100 request.session.modified = False 101 102 return response 103 [end of wagtail/wagtailadmin/views/account.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/wagtailadmin/views/account.py b/wagtail/wagtailadmin/views/account.py --- a/wagtail/wagtailadmin/views/account.py +++ b/wagtail/wagtailadmin/views/account.py @@ -3,6 +3,7 @@ from django.contrib import messages from django.contrib.auth.forms import SetPasswordForm from django.contrib.auth.views import logout as auth_logout, login as auth_login +from django.contrib.auth import update_session_auth_hash from django.utils.translation import ugettext as _ from django.views.decorators.debug import sensitive_post_parameters from django.views.decorators.cache import never_cache @@ -32,6 +33,7 @@ if form.is_valid(): form.save() + update_session_auth_hash(request, form.user) messages.success(request, _("Your password has been changed successfully!")) return redirect('wagtailadmin_account')
{"golden_diff": "diff --git a/wagtail/wagtailadmin/views/account.py b/wagtail/wagtailadmin/views/account.py\n--- a/wagtail/wagtailadmin/views/account.py\n+++ b/wagtail/wagtailadmin/views/account.py\n@@ -3,6 +3,7 @@\n from django.contrib import messages\n from django.contrib.auth.forms import SetPasswordForm\n from django.contrib.auth.views import logout as auth_logout, login as auth_login\n+from django.contrib.auth import update_session_auth_hash\n from django.utils.translation import ugettext as _ \n from django.views.decorators.debug import sensitive_post_parameters\n from django.views.decorators.cache import never_cache\n@@ -32,6 +33,7 @@\n \n if form.is_valid():\n form.save()\n+ update_session_auth_hash(request, form.user)\n \n messages.success(request, _(\"Your password has been changed successfully!\"))\n return redirect('wagtailadmin_account')\n", "issue": "Wagtail doesn't gracefully support session invalidation on password change\nAccording to [Django's documentation](https://docs.djangoproject.com/en/1.7/topics/auth/default/#session-invalidation-on-password-change), SessionAuthenticationMiddleware is new in Django 1.7, enabled by default, and will be mandatory in Django 2.0.\n\nCurrently, when the middleware is loaded and the user changes their password, they are immediately kicked out to the sign in screen. The user's session is most likely invalidated. This is very obtrusive and the user is not informed if their password was successfully updated. I believe the offending code is in\n[account.py](https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailadmin/views/account.py#L26) and attempted to modify the code from the example to make it work, but the outcome was the same:\n\n``` python\n# ...\nfrom django.contrib.auth import update_session_auth_hash # new code\n# ...\ndef change_password(request):\n can_change_password = request.user.has_usable_password()\n\n if can_change_password:\n if request.POST:\n form = SetPasswordForm(request.user, request.POST)\n\n if form.is_valid():\n form.save()\n update_session_auth_hash(request, form.user) # new code\n\n messages.success(request, _(\"Your password has been changed successfully!\"))\n return redirect('wagtailadmin_account')\n else:\n form = SetPasswordForm(request.user)\n else:\n form = None\n\n return render(request, 'wagtailadmin/account/change_password.html', {\n 'form': form,\n 'can_change_password': can_change_password,\n })\n```\n\nI am, currently, a Django novice, so that's as far as I was able to get. Hope this is an easy fix!\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.forms import SetPasswordForm\nfrom django.contrib.auth.views import logout as auth_logout, login as auth_login\nfrom django.utils.translation import ugettext as _ \nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.decorators.cache import never_cache\n\nfrom wagtail.wagtailadmin import forms\nfrom wagtail.wagtailusers.forms import NotificationPreferencesForm\nfrom wagtail.wagtailusers.models import UserProfile\nfrom wagtail.wagtailcore.models import UserPagePermissionsProxy\n\n\ndef account(request):\n user_perms = UserPagePermissionsProxy(request.user)\n show_notification_preferences = user_perms.can_edit_pages() or user_perms.can_publish_pages()\n\n return render(request, 'wagtailadmin/account/account.html', {\n 'show_change_password': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True) and request.user.has_usable_password(),\n 'show_notification_preferences': show_notification_preferences\n })\n\n\ndef change_password(request):\n can_change_password = request.user.has_usable_password()\n\n if can_change_password:\n if request.POST:\n form = SetPasswordForm(request.user, request.POST)\n\n if form.is_valid():\n form.save()\n\n messages.success(request, _(\"Your password has been changed successfully!\"))\n return redirect('wagtailadmin_account')\n else:\n form = SetPasswordForm(request.user)\n else:\n form = None\n\n return render(request, 'wagtailadmin/account/change_password.html', {\n 'form': form,\n 'can_change_password': can_change_password,\n })\n\n\ndef notification_preferences(request):\n\n if request.POST:\n form = NotificationPreferencesForm(request.POST, instance=UserProfile.get_for_user(request.user))\n\n if form.is_valid():\n form.save()\n messages.success(request, _(\"Your preferences have been updated successfully!\"))\n return redirect('wagtailadmin_account')\n else:\n form = NotificationPreferencesForm(instance=UserProfile.get_for_user(request.user))\n\n # quick-and-dirty catch-all in case the form has been rendered with no\n # fields, as the user has no customisable permissions\n if not form.fields:\n return redirect('wagtailadmin_account')\n\n return render(request, 'wagtailadmin/account/notification_preferences.html', {\n 'form': form,\n })\n\n\n@sensitive_post_parameters()\n@never_cache\ndef login(request):\n if request.user.is_authenticated() and request.user.has_perm('wagtailadmin.access_admin'):\n return redirect('wagtailadmin_home')\n else:\n from django.contrib.auth import get_user_model\n return auth_login(request,\n template_name='wagtailadmin/login.html',\n authentication_form=forms.LoginForm,\n extra_context={\n 'show_password_reset': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True),\n 'username_field': get_user_model().USERNAME_FIELD,\n },\n )\n\n\ndef logout(request):\n response = auth_logout(request, next_page='wagtailadmin_login')\n\n # By default, logging out will generate a fresh sessionid cookie. We want to use the\n # absence of sessionid as an indication that front-end pages are being viewed by a\n # non-logged-in user and are therefore cacheable, so we forcibly delete the cookie here.\n response.delete_cookie(settings.SESSION_COOKIE_NAME,\n domain=settings.SESSION_COOKIE_DOMAIN,\n path=settings.SESSION_COOKIE_PATH)\n\n # HACK: pretend that the session hasn't been modified, so that SessionMiddleware\n # won't override the above and write a new cookie.\n request.session.modified = False\n\n return response\n", "path": "wagtail/wagtailadmin/views/account.py"}]}
1,903
194
gh_patches_debug_28162
rasdani/github-patches
git_diff
Qiskit__qiskit-12069
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Documentation of RVGate is incorrect ### Environment N/A ### What is happening? Received this in an email: >Hi, I think I found some errors in the Qiskit documentation at <https://docs.quantum.ibm.com/api/qiskit/qiskit.circuit.library.RVGate> and I'm contacting you because you look like the two people who most recently edited the source file at <https://github.com/Qiskit/qiskit/blob/stable/0.46/qiskit/circuit/library/generalized_gates/rv.py> The matrix representation given in the documentation seems to be wrong. I compared it to the definition given in <https://arxiv.org/pdf/2104.14875.pdf> on page 4, equation 1, we see the definition of the rotation matrix. It almost matches the definition given in the documentation at <https://docs.quantum.ibm.com/api/qiskit/qiskit.circuit.library.RVGate> except for two mistakes: the "sinc" function should be "sin", and the angle should be divided by two. This can be compared to the source code at <https://github.com/Qiskit/qiskit/blob/stable/0.46/qiskit/circuit/library/generalized_gates/rv.py> at lines 86 and 87, where we see the angle divided by two, and we see the use of the sin and cos functions. ### How can we reproduce the issue? N/A ### What should happen? N/A ### Any suggestions? _No response_ </issue> <code> [start of qiskit/circuit/library/generalized_gates/rv.py] 1 # This code is part of Qiskit. 2 # 3 # (C) Copyright IBM 2017, 2020 4 # 5 # This code is licensed under the Apache License, Version 2.0. You may 6 # obtain a copy of this license in the LICENSE.txt file in the root directory 7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 # 9 # Any modifications or derivative works of this code must retain this 10 # copyright notice, and modified files need to carry a notice indicating 11 # that they have been altered from the originals. 12 13 """Rotation around an arbitrary axis on the Bloch sphere.""" 14 15 import numpy 16 from qiskit.circuit.gate import Gate 17 from qiskit.circuit.exceptions import CircuitError 18 19 20 class RVGate(Gate): 21 r"""Rotation around arbitrary rotation axis :math:`v` where :math:`|v|` is 22 angle of rotation in radians. 23 24 Can be applied to a :class:`~qiskit.circuit.QuantumCircuit` 25 with the :meth:`~qiskit.circuit.QuantumCircuit.rv` method. 26 27 **Circuit symbol:** 28 29 .. parsed-literal:: 30 31 ┌─────────────────┐ 32 q_0: ┤ RV(v_x,v_y,v_z) ├ 33 └─────────────────┘ 34 35 **Matrix Representation:** 36 37 .. math:: 38 39 \newcommand{\rotationangle}{|\vec{v}|} 40 \newcommand{\sinc}{\text{sinc}} 41 R(\vec{v}) = e^{-i \vec{v}\cdot\vec{\sigma}} = 42 \begin{pmatrix} 43 \cos\left(\rotationangle\right) -i v_z \sinc\left(\rotationangle\right) 44 & -(i v_x + v_y) \sinc\left(\rotationangle\right) \\ 45 -(i v_x - v_y) \sinc\left(\rotationangle\right) 46 & \cos\left(\rotationangle\right) + i v_z \sinc\left(\rotationangle\right) 47 \end{pmatrix} 48 """ 49 50 def __init__(self, v_x, v_y, v_z, basis="U"): 51 """Create new rv single-qubit gate. 52 53 Args: 54 v_x (float): x-component 55 v_y (float): y-component 56 v_z (float): z-component 57 basis (str, optional): basis (see 58 :class:`~qiskit.synthesis.one_qubit.one_qubit_decompose.OneQubitEulerDecomposer`) 59 """ 60 # pylint: disable=cyclic-import 61 from qiskit.synthesis.one_qubit.one_qubit_decompose import OneQubitEulerDecomposer 62 63 super().__init__("rv", 1, [v_x, v_y, v_z]) 64 self._decomposer = OneQubitEulerDecomposer(basis=basis) 65 66 def _define(self): 67 try: 68 self.definition = self._decomposer(self.to_matrix()) 69 except TypeError as ex: 70 raise CircuitError( 71 f"The {self.name} gate cannot be decomposed with unbound parameters" 72 ) from ex 73 74 def inverse(self): 75 """Invert this gate.""" 76 vx, vy, vz = self.params 77 return RVGate(-vx, -vy, -vz) 78 79 def to_matrix(self): 80 """Return a numpy.array for the R(v) gate.""" 81 v = numpy.asarray(self.params, dtype=float) 82 angle = numpy.sqrt(v.dot(v)) 83 if angle == 0: 84 return numpy.array([[1, 0], [0, 1]]) 85 nx, ny, nz = v / angle 86 sin = numpy.sin(angle / 2) 87 cos = numpy.cos(angle / 2) 88 return numpy.array( 89 [ 90 [cos - 1j * nz * sin, (-ny - 1j * nx) * sin], 91 [(ny - 1j * nx) * sin, cos + 1j * nz * sin], 92 ] 93 ) 94 [end of qiskit/circuit/library/generalized_gates/rv.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/qiskit/circuit/library/generalized_gates/rv.py b/qiskit/circuit/library/generalized_gates/rv.py --- a/qiskit/circuit/library/generalized_gates/rv.py +++ b/qiskit/circuit/library/generalized_gates/rv.py @@ -18,7 +18,7 @@ class RVGate(Gate): - r"""Rotation around arbitrary rotation axis :math:`v` where :math:`|v|` is + r"""Rotation around arbitrary rotation axis :math:`\vec{v}` where :math:`\|\vec{v}\|_2` is angle of rotation in radians. Can be applied to a :class:`~qiskit.circuit.QuantumCircuit` @@ -36,14 +36,17 @@ .. math:: - \newcommand{\rotationangle}{|\vec{v}|} - \newcommand{\sinc}{\text{sinc}} - R(\vec{v}) = e^{-i \vec{v}\cdot\vec{\sigma}} = + \newcommand{\rotationangle}{\frac{\|\vec{v}\|_2}{2}} + R(\vec{v}) = e^{-i \vec{v}\cdot\vec{\sigma} / 2} = \begin{pmatrix} - \cos\left(\rotationangle\right) -i v_z \sinc\left(\rotationangle\right) - & -(i v_x + v_y) \sinc\left(\rotationangle\right) \\ - -(i v_x - v_y) \sinc\left(\rotationangle\right) - & \cos\left(\rotationangle\right) + i v_z \sinc\left(\rotationangle\right) + \cos\left(\rotationangle\right) + -i \frac{v_z}{\|\vec{v}\|_2} \sin\left(\rotationangle\right) + & -(i \frac{v_x}{\|\vec{v}\|_2} + + \frac{v_y}{\|\vec{v}\|_2}) \sin\left(\rotationangle\right) \\ + -(i \frac{v_x}{\|\vec{v}\|_2} + - \frac{v_y}{\|\vec{v}\|_2}) \sin\left(\rotationangle\right) + & \cos\left(\rotationangle\right) + + i \frac{v_z}{\|\vec{v}\|_2} \sin\left(\rotationangle\right) \end{pmatrix} """
{"golden_diff": "diff --git a/qiskit/circuit/library/generalized_gates/rv.py b/qiskit/circuit/library/generalized_gates/rv.py\n--- a/qiskit/circuit/library/generalized_gates/rv.py\n+++ b/qiskit/circuit/library/generalized_gates/rv.py\n@@ -18,7 +18,7 @@\n \n \n class RVGate(Gate):\n- r\"\"\"Rotation around arbitrary rotation axis :math:`v` where :math:`|v|` is\n+ r\"\"\"Rotation around arbitrary rotation axis :math:`\\vec{v}` where :math:`\\|\\vec{v}\\|_2` is\n angle of rotation in radians.\n \n Can be applied to a :class:`~qiskit.circuit.QuantumCircuit`\n@@ -36,14 +36,17 @@\n \n .. math::\n \n- \\newcommand{\\rotationangle}{|\\vec{v}|}\n- \\newcommand{\\sinc}{\\text{sinc}}\n- R(\\vec{v}) = e^{-i \\vec{v}\\cdot\\vec{\\sigma}} =\n+ \\newcommand{\\rotationangle}{\\frac{\\|\\vec{v}\\|_2}{2}}\n+ R(\\vec{v}) = e^{-i \\vec{v}\\cdot\\vec{\\sigma} / 2} =\n \\begin{pmatrix}\n- \\cos\\left(\\rotationangle\\right) -i v_z \\sinc\\left(\\rotationangle\\right)\n- & -(i v_x + v_y) \\sinc\\left(\\rotationangle\\right) \\\\\n- -(i v_x - v_y) \\sinc\\left(\\rotationangle\\right)\n- & \\cos\\left(\\rotationangle\\right) + i v_z \\sinc\\left(\\rotationangle\\right)\n+ \\cos\\left(\\rotationangle\\right)\n+ -i \\frac{v_z}{\\|\\vec{v}\\|_2} \\sin\\left(\\rotationangle\\right)\n+ & -(i \\frac{v_x}{\\|\\vec{v}\\|_2}\n+ + \\frac{v_y}{\\|\\vec{v}\\|_2}) \\sin\\left(\\rotationangle\\right) \\\\\n+ -(i \\frac{v_x}{\\|\\vec{v}\\|_2}\n+ - \\frac{v_y}{\\|\\vec{v}\\|_2}) \\sin\\left(\\rotationangle\\right)\n+ & \\cos\\left(\\rotationangle\\right)\n+ + i \\frac{v_z}{\\|\\vec{v}\\|_2} \\sin\\left(\\rotationangle\\right)\n \\end{pmatrix}\n \"\"\"\n", "issue": "Documentation of RVGate is incorrect\n### Environment\n\nN/A\n\n### What is happening?\n\nReceived this in an email:\r\n>Hi, I think I found some errors in the Qiskit documentation at\r\n<https://docs.quantum.ibm.com/api/qiskit/qiskit.circuit.library.RVGate>\r\nand I'm contacting you because you look like the two people who most recently edited the source file at\r\n<https://github.com/Qiskit/qiskit/blob/stable/0.46/qiskit/circuit/library/generalized_gates/rv.py>\r\nThe matrix representation given in the documentation seems to be wrong. I compared it to the definition given in\r\n<https://arxiv.org/pdf/2104.14875.pdf>\r\non page 4, equation 1, we see the definition of the rotation matrix. It almost matches the definition given in the documentation at\r\n<https://docs.quantum.ibm.com/api/qiskit/qiskit.circuit.library.RVGate>\r\nexcept for two mistakes: the \"sinc\" function should be \"sin\", and the angle should be divided by two. This can be compared to the source code at\r\n<https://github.com/Qiskit/qiskit/blob/stable/0.46/qiskit/circuit/library/generalized_gates/rv.py>\r\nat lines 86 and 87, where we see the angle divided by two, and we see the use of the sin and cos functions.\n\n### How can we reproduce the issue?\n\nN/A\n\n### What should happen?\n\nN/A\n\n### Any suggestions?\n\n_No response_\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2020\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Rotation around an arbitrary axis on the Bloch sphere.\"\"\"\n\nimport numpy\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.exceptions import CircuitError\n\n\nclass RVGate(Gate):\n r\"\"\"Rotation around arbitrary rotation axis :math:`v` where :math:`|v|` is\n angle of rotation in radians.\n\n Can be applied to a :class:`~qiskit.circuit.QuantumCircuit`\n with the :meth:`~qiskit.circuit.QuantumCircuit.rv` method.\n\n **Circuit symbol:**\n\n .. parsed-literal::\n\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n q_0: \u2524 RV(v_x,v_y,v_z) \u251c\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n\n **Matrix Representation:**\n\n .. math::\n\n \\newcommand{\\rotationangle}{|\\vec{v}|}\n \\newcommand{\\sinc}{\\text{sinc}}\n R(\\vec{v}) = e^{-i \\vec{v}\\cdot\\vec{\\sigma}} =\n \\begin{pmatrix}\n \\cos\\left(\\rotationangle\\right) -i v_z \\sinc\\left(\\rotationangle\\right)\n & -(i v_x + v_y) \\sinc\\left(\\rotationangle\\right) \\\\\n -(i v_x - v_y) \\sinc\\left(\\rotationangle\\right)\n & \\cos\\left(\\rotationangle\\right) + i v_z \\sinc\\left(\\rotationangle\\right)\n \\end{pmatrix}\n \"\"\"\n\n def __init__(self, v_x, v_y, v_z, basis=\"U\"):\n \"\"\"Create new rv single-qubit gate.\n\n Args:\n v_x (float): x-component\n v_y (float): y-component\n v_z (float): z-component\n basis (str, optional): basis (see\n :class:`~qiskit.synthesis.one_qubit.one_qubit_decompose.OneQubitEulerDecomposer`)\n \"\"\"\n # pylint: disable=cyclic-import\n from qiskit.synthesis.one_qubit.one_qubit_decompose import OneQubitEulerDecomposer\n\n super().__init__(\"rv\", 1, [v_x, v_y, v_z])\n self._decomposer = OneQubitEulerDecomposer(basis=basis)\n\n def _define(self):\n try:\n self.definition = self._decomposer(self.to_matrix())\n except TypeError as ex:\n raise CircuitError(\n f\"The {self.name} gate cannot be decomposed with unbound parameters\"\n ) from ex\n\n def inverse(self):\n \"\"\"Invert this gate.\"\"\"\n vx, vy, vz = self.params\n return RVGate(-vx, -vy, -vz)\n\n def to_matrix(self):\n \"\"\"Return a numpy.array for the R(v) gate.\"\"\"\n v = numpy.asarray(self.params, dtype=float)\n angle = numpy.sqrt(v.dot(v))\n if angle == 0:\n return numpy.array([[1, 0], [0, 1]])\n nx, ny, nz = v / angle\n sin = numpy.sin(angle / 2)\n cos = numpy.cos(angle / 2)\n return numpy.array(\n [\n [cos - 1j * nz * sin, (-ny - 1j * nx) * sin],\n [(ny - 1j * nx) * sin, cos + 1j * nz * sin],\n ]\n )\n", "path": "qiskit/circuit/library/generalized_gates/rv.py"}]}
1,952
593
gh_patches_debug_8362
rasdani/github-patches
git_diff
getnikola__nikola-3036
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> RSS_PATH doesn't work as advertised (is path and filename, excluding .xml) * Python Version: 3.5.3 * Nikola Version: v7.8.14 * Operating System: Debian A fresh config says: ``` # Final location for the blog main RSS feed is: # output / TRANSLATION[lang] / RSS_PATH / rss.xml ``` which is in line with other `_PATH` variables. But it seems `RSS_PATH` is actually path+filename (and `.xml` is appended). With `RSS_PATH = "blog/`I get `render_taxonomies:output/blog/.xml` (instead of `blog/rss.xml`) With `RSS_PATH = blog/index.xml` I get `render_taxonomies:output/blog/index.xml.xml` </issue> <code> [start of nikola/plugins/task/indexes.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright © 2012-2018 Roberto Alsina and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 """Render the blog's main index.""" 28 29 30 from nikola.plugin_categories import Taxonomy 31 32 33 class Indexes(Taxonomy): 34 """Classify for the blog's main index.""" 35 36 name = "classify_indexes" 37 38 classification_name = "index" 39 overview_page_variable_name = None 40 more_than_one_classifications_per_post = False 41 has_hierarchy = False 42 show_list_as_index = True 43 template_for_single_list = "index.tmpl" 44 template_for_classification_overview = None 45 apply_to_posts = True 46 apply_to_pages = False 47 omit_empty_classifications = False 48 path_handler_docstrings = { 49 'index_index': False, 50 'index': """Link to a numbered index. 51 52 Example: 53 54 link://index/3 => /index-3.html""", 55 'index_atom': """Link to a numbered Atom index. 56 57 Example: 58 59 link://index_atom/3 => /index-3.atom""", 60 'index_rss': """A link to the RSS feed path. 61 62 Example: 63 64 link://rss => /blog/rss.xml""", 65 } 66 67 def set_site(self, site): 68 """Set Nikola site.""" 69 # Redirect automatically generated 'index_rss' path handler to 'rss' for compatibility with old rss plugin 70 site.register_path_handler('rss', lambda name, lang: site.path_handlers['index_rss'](name, lang)) 71 site.path_handlers['rss'].__doc__ = """A link to the RSS feed path. 72 73 Example: 74 75 link://rss => /blog/rss.xml 76 """.strip() 77 return super(Indexes, self).set_site(site) 78 79 def get_implicit_classifications(self, lang): 80 """Return a list of classification strings which should always appear in posts_per_classification.""" 81 return [""] 82 83 def classify(self, post, lang): 84 """Classify the given post for the given language.""" 85 return [""] 86 87 def get_classification_friendly_name(self, classification, lang, only_last_component=False): 88 """Extract a friendly name from the classification.""" 89 return self.site.config["BLOG_TITLE"](lang) 90 91 def get_path(self, classification, lang, dest_type='page'): 92 """Return a path for the given classification.""" 93 if dest_type == 'rss': 94 return [self.site.config['RSS_PATH'](lang)], True 95 # 'page' (index) or 'feed' (Atom) 96 page_number = None 97 if dest_type == 'page': 98 # Interpret argument as page number 99 try: 100 page_number = int(classification) 101 except (ValueError, TypeError): 102 pass 103 return [self.site.config['INDEX_PATH'](lang)], 'always', page_number 104 105 def provide_context_and_uptodate(self, classification, lang, node=None): 106 """Provide data for the context and the uptodate list for the list of the given classifiation.""" 107 kw = { 108 } 109 context = { 110 "title": self.site.config["INDEXES_TITLE"](lang) or self.site.config["BLOG_TITLE"](lang), 111 "description": self.site.config["BLOG_DESCRIPTION"](lang), 112 "pagekind": ["main_index", "index"], 113 } 114 kw.update(context) 115 return context, kw 116 117 def should_generate_classification_page(self, classification, post_list, lang): 118 """Only generates list of posts for classification if this function returns True.""" 119 return not self.site.config["DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED"] 120 121 def should_generate_rss_for_classification_page(self, classification, post_list, lang): 122 """Only generates RSS feed for list of posts for classification if this function returns True.""" 123 return not self.site.config["DISABLE_INDEXES_PLUGIN_RSS_FEED"] 124 [end of nikola/plugins/task/indexes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nikola/plugins/task/indexes.py b/nikola/plugins/task/indexes.py --- a/nikola/plugins/task/indexes.py +++ b/nikola/plugins/task/indexes.py @@ -91,7 +91,7 @@ def get_path(self, classification, lang, dest_type='page'): """Return a path for the given classification.""" if dest_type == 'rss': - return [self.site.config['RSS_PATH'](lang)], True + return [self.site.config['RSS_PATH'](lang), 'rss'], 'auto' # 'page' (index) or 'feed' (Atom) page_number = None if dest_type == 'page':
{"golden_diff": "diff --git a/nikola/plugins/task/indexes.py b/nikola/plugins/task/indexes.py\n--- a/nikola/plugins/task/indexes.py\n+++ b/nikola/plugins/task/indexes.py\n@@ -91,7 +91,7 @@\n def get_path(self, classification, lang, dest_type='page'):\n \"\"\"Return a path for the given classification.\"\"\"\n if dest_type == 'rss':\n- return [self.site.config['RSS_PATH'](lang)], True\n+ return [self.site.config['RSS_PATH'](lang), 'rss'], 'auto'\n # 'page' (index) or 'feed' (Atom)\n page_number = None\n if dest_type == 'page':\n", "issue": "RSS_PATH doesn't work as advertised (is path and filename, excluding .xml)\n* Python Version: 3.5.3\r\n* Nikola Version: v7.8.14\r\n* Operating System: Debian\r\n\r\nA fresh config says:\r\n\r\n```\r\n# Final location for the blog main RSS feed is:\r\n# output / TRANSLATION[lang] / RSS_PATH / rss.xml\r\n```\r\n\r\nwhich is in line with other `_PATH` variables.\r\n\r\nBut it seems `RSS_PATH` is actually path+filename (and `.xml` is appended).\r\n\r\nWith `RSS_PATH = \"blog/`I get `render_taxonomies:output/blog/.xml` (instead of `blog/rss.xml`)\r\n\r\nWith `RSS_PATH = blog/index.xml` I get `render_taxonomies:output/blog/index.xml.xml`\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2018 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Render the blog's main index.\"\"\"\n\n\nfrom nikola.plugin_categories import Taxonomy\n\n\nclass Indexes(Taxonomy):\n \"\"\"Classify for the blog's main index.\"\"\"\n\n name = \"classify_indexes\"\n\n classification_name = \"index\"\n overview_page_variable_name = None\n more_than_one_classifications_per_post = False\n has_hierarchy = False\n show_list_as_index = True\n template_for_single_list = \"index.tmpl\"\n template_for_classification_overview = None\n apply_to_posts = True\n apply_to_pages = False\n omit_empty_classifications = False\n path_handler_docstrings = {\n 'index_index': False,\n 'index': \"\"\"Link to a numbered index.\n\nExample:\n\nlink://index/3 => /index-3.html\"\"\",\n 'index_atom': \"\"\"Link to a numbered Atom index.\n\nExample:\n\nlink://index_atom/3 => /index-3.atom\"\"\",\n 'index_rss': \"\"\"A link to the RSS feed path.\n\nExample:\n\nlink://rss => /blog/rss.xml\"\"\",\n }\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n # Redirect automatically generated 'index_rss' path handler to 'rss' for compatibility with old rss plugin\n site.register_path_handler('rss', lambda name, lang: site.path_handlers['index_rss'](name, lang))\n site.path_handlers['rss'].__doc__ = \"\"\"A link to the RSS feed path.\n\nExample:\n\n link://rss => /blog/rss.xml\n \"\"\".strip()\n return super(Indexes, self).set_site(site)\n\n def get_implicit_classifications(self, lang):\n \"\"\"Return a list of classification strings which should always appear in posts_per_classification.\"\"\"\n return [\"\"]\n\n def classify(self, post, lang):\n \"\"\"Classify the given post for the given language.\"\"\"\n return [\"\"]\n\n def get_classification_friendly_name(self, classification, lang, only_last_component=False):\n \"\"\"Extract a friendly name from the classification.\"\"\"\n return self.site.config[\"BLOG_TITLE\"](lang)\n\n def get_path(self, classification, lang, dest_type='page'):\n \"\"\"Return a path for the given classification.\"\"\"\n if dest_type == 'rss':\n return [self.site.config['RSS_PATH'](lang)], True\n # 'page' (index) or 'feed' (Atom)\n page_number = None\n if dest_type == 'page':\n # Interpret argument as page number\n try:\n page_number = int(classification)\n except (ValueError, TypeError):\n pass\n return [self.site.config['INDEX_PATH'](lang)], 'always', page_number\n\n def provide_context_and_uptodate(self, classification, lang, node=None):\n \"\"\"Provide data for the context and the uptodate list for the list of the given classifiation.\"\"\"\n kw = {\n }\n context = {\n \"title\": self.site.config[\"INDEXES_TITLE\"](lang) or self.site.config[\"BLOG_TITLE\"](lang),\n \"description\": self.site.config[\"BLOG_DESCRIPTION\"](lang),\n \"pagekind\": [\"main_index\", \"index\"],\n }\n kw.update(context)\n return context, kw\n\n def should_generate_classification_page(self, classification, post_list, lang):\n \"\"\"Only generates list of posts for classification if this function returns True.\"\"\"\n return not self.site.config[\"DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED\"]\n\n def should_generate_rss_for_classification_page(self, classification, post_list, lang):\n \"\"\"Only generates RSS feed for list of posts for classification if this function returns True.\"\"\"\n return not self.site.config[\"DISABLE_INDEXES_PLUGIN_RSS_FEED\"]\n", "path": "nikola/plugins/task/indexes.py"}]}
2,014
153
gh_patches_debug_416
rasdani/github-patches
git_diff
automl__auto-sklearn-1361
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Check if test requirement `flaky` can be removed We currently have a test dependancy [flaky](https://pypi.org/project/flaky/) used to annotate a test `KernelPCAComponentTest::test_default_configuration_classify()`. This is the only place it's used. </issue> <code> [start of setup.py] 1 # -*- encoding: utf-8 -*- 2 import os 3 import sys 4 from setuptools import setup, find_packages 5 6 7 # Check if Auto-sklearn *could* run on the given system 8 if os.name != 'posix': 9 raise ValueError( 10 'Detected unsupported operating system: %s. Please check ' 11 'the compability information of auto-sklearn: https://automl.github.io' 12 '/auto-sklearn/master/installation.html#windows-osx-compatibility' % 13 sys.platform 14 ) 15 16 if sys.version_info < (3, 7): 17 raise ValueError( 18 'Unsupported Python version %d.%d.%d found. Auto-sklearn requires Python ' 19 '3.7 or higher.' % (sys.version_info.major, sys.version_info.minor, sys.version_info.micro) 20 ) 21 22 HERE = os.path.abspath(os.path.dirname(__file__)) 23 with open(os.path.join(HERE, 'requirements.txt')) as fp: 24 install_reqs = [r.rstrip() for r in fp.readlines() 25 if not r.startswith('#') and not r.startswith('git+')] 26 27 extras_reqs={ 28 "test": [ 29 "pytest>=4.6", 30 "mypy", 31 "pytest-xdist", 32 "pytest-timeout", 33 "flaky", 34 "openml", 35 "pre-commit", 36 "pytest-cov", 37 ], 38 "examples": [ 39 "matplotlib", 40 "jupyter", 41 "notebook", 42 "seaborn", 43 ], 44 "docs": [ 45 "sphinx<4.3", 46 "sphinx-gallery", 47 "sphinx_bootstrap_theme", 48 "numpydoc", 49 "sphinx_toolbox", 50 "docutils==0.16" 51 ], 52 } 53 54 with open(os.path.join(HERE, 'autosklearn', '__version__.py')) as fh: 55 version = fh.readlines()[-1].split()[-1].strip("\"'") 56 57 58 with open(os.path.join(HERE, 'README.md')) as fh: 59 long_description = fh.read() 60 61 62 setup( 63 name='auto-sklearn', 64 author='Matthias Feurer', 65 author_email='[email protected]', 66 description='Automated machine learning.', 67 long_description=long_description, 68 long_description_content_type='text/markdown', 69 version=version, 70 packages=find_packages(exclude=['test', 'scripts', 'examples']), 71 extras_require=extras_reqs, 72 install_requires=install_reqs, 73 include_package_data=True, 74 license='BSD3', 75 platforms=['Linux'], 76 classifiers=[ 77 "Environment :: Console", 78 "Intended Audience :: Developers", 79 "Intended Audience :: Education", 80 "Intended Audience :: Science/Research", 81 "Intended Audience :: Information Technology", 82 "License :: OSI Approved :: BSD License", 83 "Natural Language :: English", 84 "Operating System :: OS Independent", 85 "Topic :: Scientific/Engineering :: Artificial Intelligence", 86 "Topic :: Scientific/Engineering :: Information Analysis", 87 'Programming Language :: Python :: 3.7', 88 'Programming Language :: Python :: 3.8', 89 'Programming Language :: Python :: 3.9', 90 ], 91 python_requires='>=3.7', 92 url='https://automl.github.io/auto-sklearn', 93 ) 94 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -30,7 +30,6 @@ "mypy", "pytest-xdist", "pytest-timeout", - "flaky", "openml", "pre-commit", "pytest-cov",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,7 +30,6 @@\n \"mypy\",\n \"pytest-xdist\",\n \"pytest-timeout\",\n- \"flaky\",\n \"openml\",\n \"pre-commit\",\n \"pytest-cov\",\n", "issue": "Check if test requirement `flaky` can be removed\nWe currently have a test dependancy [flaky](https://pypi.org/project/flaky/) used to annotate a test `KernelPCAComponentTest::test_default_configuration_classify()`. This is the only place it's used.\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\n\n# Check if Auto-sklearn *could* run on the given system\nif os.name != 'posix':\n raise ValueError(\n 'Detected unsupported operating system: %s. Please check '\n 'the compability information of auto-sklearn: https://automl.github.io'\n '/auto-sklearn/master/installation.html#windows-osx-compatibility' %\n sys.platform\n )\n\nif sys.version_info < (3, 7):\n raise ValueError(\n 'Unsupported Python version %d.%d.%d found. Auto-sklearn requires Python '\n '3.7 or higher.' % (sys.version_info.major, sys.version_info.minor, sys.version_info.micro)\n )\n\nHERE = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(HERE, 'requirements.txt')) as fp:\n install_reqs = [r.rstrip() for r in fp.readlines()\n if not r.startswith('#') and not r.startswith('git+')]\n\nextras_reqs={\n \"test\": [\n \"pytest>=4.6\",\n \"mypy\",\n \"pytest-xdist\",\n \"pytest-timeout\",\n \"flaky\",\n \"openml\",\n \"pre-commit\",\n \"pytest-cov\",\n ],\n \"examples\": [\n \"matplotlib\",\n \"jupyter\",\n \"notebook\",\n \"seaborn\",\n ],\n \"docs\": [\n \"sphinx<4.3\",\n \"sphinx-gallery\",\n \"sphinx_bootstrap_theme\",\n \"numpydoc\",\n \"sphinx_toolbox\",\n \"docutils==0.16\"\n ],\n}\n\nwith open(os.path.join(HERE, 'autosklearn', '__version__.py')) as fh:\n version = fh.readlines()[-1].split()[-1].strip(\"\\\"'\")\n\n\nwith open(os.path.join(HERE, 'README.md')) as fh:\n long_description = fh.read()\n\n\nsetup(\n name='auto-sklearn',\n author='Matthias Feurer',\n author_email='[email protected]',\n description='Automated machine learning.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n version=version,\n packages=find_packages(exclude=['test', 'scripts', 'examples']),\n extras_require=extras_reqs,\n install_requires=install_reqs,\n include_package_data=True,\n license='BSD3',\n platforms=['Linux'],\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires='>=3.7',\n url='https://automl.github.io/auto-sklearn',\n)\n", "path": "setup.py"}]}
1,460
72
gh_patches_debug_5141
rasdani/github-patches
git_diff
scrapy__scrapy-2503
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> scrapy view <url> raise exc in v1.3.0 ```` (py35) wingyiu@mbp101:~$scrapy view http://www.scrapy.org 2017-01-19 22:13:54 [scrapy.utils.log] INFO: Scrapy 1.3.0 started (bot: scrapybot) 2017-01-19 22:13:54 [scrapy.utils.log] INFO: Overridden settings: {} Traceback (most recent call last): File "/Users/user/venv/py35/bin/scrapy", line 11, in <module> sys.exit(execute()) File "/Users/user/venv/py35/lib/python3.5/site-packages/scrapy/cmdline.py", line 142, in execute _run_print_help(parser, _run_command, cmd, args, opts) File "/Users/user/venv/py35/lib/python3.5/site-packages/scrapy/cmdline.py", line 88, in _run_print_help func(*a, **kw) File "/Users/user/venv/py35/lib/python3.5/site-packages/scrapy/cmdline.py", line 149, in _run_command cmd.run(args, opts) File "/Users/user/venv/py35/lib/python3.5/site-packages/scrapy/commands/fetch.py", line 58, in run if not opts.no_redirect: AttributeError: 'Values' object has no attribute 'no_redirect' ```` </issue> <code> [start of scrapy/commands/view.py] 1 from scrapy.commands import fetch, ScrapyCommand 2 from scrapy.utils.response import open_in_browser 3 4 class Command(fetch.Command): 5 6 def short_desc(self): 7 return "Open URL in browser, as seen by Scrapy" 8 9 def long_desc(self): 10 return "Fetch a URL using the Scrapy downloader and show its " \ 11 "contents in a browser" 12 13 def add_options(self, parser): 14 ScrapyCommand.add_options(self, parser) 15 parser.add_option("--spider", dest="spider", 16 help="use this spider") 17 18 def _print_response(self, response, opts): 19 open_in_browser(response) 20 [end of scrapy/commands/view.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/commands/view.py b/scrapy/commands/view.py --- a/scrapy/commands/view.py +++ b/scrapy/commands/view.py @@ -11,9 +11,8 @@ "contents in a browser" def add_options(self, parser): - ScrapyCommand.add_options(self, parser) - parser.add_option("--spider", dest="spider", - help="use this spider") + super(Command, self).add_options(parser) + parser.remove_option("--headers") def _print_response(self, response, opts): open_in_browser(response)
{"golden_diff": "diff --git a/scrapy/commands/view.py b/scrapy/commands/view.py\n--- a/scrapy/commands/view.py\n+++ b/scrapy/commands/view.py\n@@ -11,9 +11,8 @@\n \"contents in a browser\"\n \n def add_options(self, parser):\n- ScrapyCommand.add_options(self, parser)\n- parser.add_option(\"--spider\", dest=\"spider\",\n- help=\"use this spider\")\n+ super(Command, self).add_options(parser)\n+ parser.remove_option(\"--headers\")\n \n def _print_response(self, response, opts):\n open_in_browser(response)\n", "issue": "scrapy view <url> raise exc in v1.3.0\n````\r\n(py35) wingyiu@mbp101:~$scrapy view http://www.scrapy.org\r\n2017-01-19 22:13:54 [scrapy.utils.log] INFO: Scrapy 1.3.0 started (bot: scrapybot)\r\n2017-01-19 22:13:54 [scrapy.utils.log] INFO: Overridden settings: {}\r\nTraceback (most recent call last):\r\n File \"/Users/user/venv/py35/bin/scrapy\", line 11, in <module>\r\n sys.exit(execute())\r\n File \"/Users/user/venv/py35/lib/python3.5/site-packages/scrapy/cmdline.py\", line 142, in execute\r\n _run_print_help(parser, _run_command, cmd, args, opts)\r\n File \"/Users/user/venv/py35/lib/python3.5/site-packages/scrapy/cmdline.py\", line 88, in _run_print_help\r\n func(*a, **kw)\r\n File \"/Users/user/venv/py35/lib/python3.5/site-packages/scrapy/cmdline.py\", line 149, in _run_command\r\n cmd.run(args, opts)\r\n File \"/Users/user/venv/py35/lib/python3.5/site-packages/scrapy/commands/fetch.py\", line 58, in run\r\n if not opts.no_redirect:\r\nAttributeError: 'Values' object has no attribute 'no_redirect'\r\n````\r\n\n", "before_files": [{"content": "from scrapy.commands import fetch, ScrapyCommand\nfrom scrapy.utils.response import open_in_browser\n\nclass Command(fetch.Command):\n\n def short_desc(self):\n return \"Open URL in browser, as seen by Scrapy\"\n\n def long_desc(self):\n return \"Fetch a URL using the Scrapy downloader and show its \" \\\n \"contents in a browser\"\n\n def add_options(self, parser):\n ScrapyCommand.add_options(self, parser)\n parser.add_option(\"--spider\", dest=\"spider\",\n help=\"use this spider\")\n\n def _print_response(self, response, opts):\n open_in_browser(response)\n", "path": "scrapy/commands/view.py"}]}
1,051
135
gh_patches_debug_15924
rasdani/github-patches
git_diff
Kinto__kinto-119
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Using the _since querystring filter has no effect I've tried using the `_since` querystring filter as explained in the tutorial, but it seems to have no effect. `GET`ing any of those urls returns the exact same list (the full list of records) ``` http GET http://0.0.0.0:8888/v1/buckets/default/collections/tasks/records?_since=1436094288171 -v --auth 'user:password' http GET http://0.0.0.0:8888/v1/buckets/default/collections/tasks/records?_since=foobar -v --auth 'user:password' http GET http://0.0.0.0:8888/v1/buckets/default/collections/tasks/records?_since=`date +%s` -v --auth 'user:password' ``` The last one uses the current timestamp as the value, which means it should return an empty list. </issue> <code> [start of kinto/views/buckets.py] 1 from pyramid.httpexceptions import HTTPForbidden, HTTPPreconditionFailed 2 from pyramid.security import NO_PERMISSION_REQUIRED 3 from pyramid.view import view_config 4 5 from cliquet import resource 6 from cliquet.utils import hmac_digest, build_request 7 8 from kinto.views import NameGenerator 9 10 11 def create_bucket(request, bucket_id): 12 """Create a bucket if it doesn't exists.""" 13 bucket_put = (request.method.lower() == 'put' and 14 request.path.endswith('buckets/default')) 15 16 if not bucket_put: 17 subrequest = build_request(request, { 18 'method': 'PUT', 19 'path': '/buckets/%s' % bucket_id, 20 'body': {"data": {}}, 21 'headers': {'If-None-Match': '*'.encode('utf-8')} 22 }) 23 24 try: 25 request.invoke_subrequest(subrequest) 26 except HTTPPreconditionFailed: 27 # The bucket already exists 28 pass 29 30 31 def create_collection(request, bucket_id): 32 subpath = request.matchdict['subpath'] 33 if subpath.startswith('/collections/'): 34 collection_id = subpath.split('/')[2] 35 collection_put = (request.method.lower() == 'put' and 36 request.path.endswith(collection_id)) 37 if not collection_put: 38 subrequest = build_request(request, { 39 'method': 'PUT', 40 'path': '/buckets/%s/collections/%s' % ( 41 bucket_id, collection_id), 42 'body': {"data": {}}, 43 'headers': {'If-None-Match': '*'.encode('utf-8')} 44 }) 45 try: 46 request.invoke_subrequest(subrequest) 47 except HTTPPreconditionFailed: 48 # The collection already exists 49 pass 50 51 52 @view_config(route_name='default_bucket', permission=NO_PERMISSION_REQUIRED) 53 def default_bucket(request): 54 if getattr(request, 'prefixed_userid', None) is None: 55 raise HTTPForbidden # Pass through the forbidden_view_config 56 57 settings = request.registry.settings 58 hmac_secret = settings['cliquet.userid_hmac_secret'] 59 # Build the user unguessable bucket_id UUID from its user_id 60 bucket_id = hmac_digest(hmac_secret, request.prefixed_userid)[:32] 61 path = request.path.replace('default', bucket_id) 62 63 # Make sure bucket exists 64 create_bucket(request, bucket_id) 65 66 # Make sure the collection exists 67 create_collection(request, bucket_id) 68 69 subrequest = build_request(request, { 70 'method': request.method, 71 'path': path, 72 'body': request.body 73 }) 74 75 return request.invoke_subrequest(subrequest) 76 77 78 @resource.register(name='bucket', 79 collection_methods=('GET',), 80 collection_path='/buckets', 81 record_path='/buckets/{{id}}') 82 class Bucket(resource.ProtectedResource): 83 permissions = ('read', 'write', 'collection:create', 'group:create') 84 85 def __init__(self, *args, **kwargs): 86 super(Bucket, self).__init__(*args, **kwargs) 87 self.collection.id_generator = NameGenerator() 88 89 def get_parent_id(self, request): 90 # Buckets are not isolated by user, unlike Cliquet resources. 91 return '' 92 93 def delete(self): 94 result = super(Bucket, self).delete() 95 96 # Delete groups. 97 storage = self.collection.storage 98 parent_id = '/buckets/%s' % self.record_id 99 storage.delete_all(collection_id='group', parent_id=parent_id) 100 101 # Delete collections. 102 deleted = storage.delete_all(collection_id='collection', 103 parent_id=parent_id) 104 105 # Delete records. 106 id_field = self.collection.id_field 107 for collection in deleted: 108 parent_id = '/buckets/%s/collections/%s' % (self.record_id, 109 collection[id_field]) 110 storage.delete_all(collection_id='record', parent_id=parent_id) 111 112 return result 113 [end of kinto/views/buckets.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/views/buckets.py b/kinto/views/buckets.py --- a/kinto/views/buckets.py +++ b/kinto/views/buckets.py @@ -59,6 +59,8 @@ # Build the user unguessable bucket_id UUID from its user_id bucket_id = hmac_digest(hmac_secret, request.prefixed_userid)[:32] path = request.path.replace('default', bucket_id) + querystring = request.url[(request.url.index(request.path) + + len(request.path)):] # Make sure bucket exists create_bucket(request, bucket_id) @@ -68,7 +70,7 @@ subrequest = build_request(request, { 'method': request.method, - 'path': path, + 'path': path + querystring, 'body': request.body })
{"golden_diff": "diff --git a/kinto/views/buckets.py b/kinto/views/buckets.py\n--- a/kinto/views/buckets.py\n+++ b/kinto/views/buckets.py\n@@ -59,6 +59,8 @@\n # Build the user unguessable bucket_id UUID from its user_id\n bucket_id = hmac_digest(hmac_secret, request.prefixed_userid)[:32]\n path = request.path.replace('default', bucket_id)\n+ querystring = request.url[(request.url.index(request.path) +\n+ len(request.path)):]\n \n # Make sure bucket exists\n create_bucket(request, bucket_id)\n@@ -68,7 +70,7 @@\n \n subrequest = build_request(request, {\n 'method': request.method,\n- 'path': path,\n+ 'path': path + querystring,\n 'body': request.body\n })\n", "issue": "Using the _since querystring filter has no effect\nI've tried using the `_since` querystring filter as explained in the tutorial, but it seems to have no effect.\n\n`GET`ing any of those urls returns the exact same list (the full list of records)\n\n```\nhttp GET http://0.0.0.0:8888/v1/buckets/default/collections/tasks/records?_since=1436094288171 -v --auth 'user:password'\nhttp GET http://0.0.0.0:8888/v1/buckets/default/collections/tasks/records?_since=foobar -v --auth 'user:password'\nhttp GET http://0.0.0.0:8888/v1/buckets/default/collections/tasks/records?_since=`date +%s` -v --auth 'user:password'\n```\n\nThe last one uses the current timestamp as the value, which means it should return an empty list.\n\n", "before_files": [{"content": "from pyramid.httpexceptions import HTTPForbidden, HTTPPreconditionFailed\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom pyramid.view import view_config\n\nfrom cliquet import resource\nfrom cliquet.utils import hmac_digest, build_request\n\nfrom kinto.views import NameGenerator\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n\n if not bucket_put:\n subrequest = build_request(request, {\n 'method': 'PUT',\n 'path': '/buckets/%s' % bucket_id,\n 'body': {\"data\": {}},\n 'headers': {'If-None-Match': '*'.encode('utf-8')}\n })\n\n try:\n request.invoke_subrequest(subrequest)\n except HTTPPreconditionFailed:\n # The bucket already exists\n pass\n\n\ndef create_collection(request, bucket_id):\n subpath = request.matchdict['subpath']\n if subpath.startswith('/collections/'):\n collection_id = subpath.split('/')[2]\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if not collection_put:\n subrequest = build_request(request, {\n 'method': 'PUT',\n 'path': '/buckets/%s/collections/%s' % (\n bucket_id, collection_id),\n 'body': {\"data\": {}},\n 'headers': {'If-None-Match': '*'.encode('utf-8')}\n })\n try:\n request.invoke_subrequest(subrequest)\n except HTTPPreconditionFailed:\n # The collection already exists\n pass\n\n\n@view_config(route_name='default_bucket', permission=NO_PERMISSION_REQUIRED)\ndef default_bucket(request):\n if getattr(request, 'prefixed_userid', None) is None:\n raise HTTPForbidden # Pass through the forbidden_view_config\n\n settings = request.registry.settings\n hmac_secret = settings['cliquet.userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n bucket_id = hmac_digest(hmac_secret, request.prefixed_userid)[:32]\n path = request.path.replace('default', bucket_id)\n\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path,\n 'body': request.body\n })\n\n return request.invoke_subrequest(subrequest)\n\n\[email protected](name='bucket',\n collection_methods=('GET',),\n collection_path='/buckets',\n record_path='/buckets/{{id}}')\nclass Bucket(resource.ProtectedResource):\n permissions = ('read', 'write', 'collection:create', 'group:create')\n\n def __init__(self, *args, **kwargs):\n super(Bucket, self).__init__(*args, **kwargs)\n self.collection.id_generator = NameGenerator()\n\n def get_parent_id(self, request):\n # Buckets are not isolated by user, unlike Cliquet resources.\n return ''\n\n def delete(self):\n result = super(Bucket, self).delete()\n\n # Delete groups.\n storage = self.collection.storage\n parent_id = '/buckets/%s' % self.record_id\n storage.delete_all(collection_id='group', parent_id=parent_id)\n\n # Delete collections.\n deleted = storage.delete_all(collection_id='collection',\n parent_id=parent_id)\n\n # Delete records.\n id_field = self.collection.id_field\n for collection in deleted:\n parent_id = '/buckets/%s/collections/%s' % (self.record_id,\n collection[id_field])\n storage.delete_all(collection_id='record', parent_id=parent_id)\n\n return result\n", "path": "kinto/views/buckets.py"}]}
1,806
188
gh_patches_debug_49039
rasdani/github-patches
git_diff
facebookresearch__hydra-279
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Bug] Documentation inconsistency for `utils.get_original_cwd` # 🐛 Bug The tutorial for working directories has a few commands for setting the working directory [see here](https://cli.dev/docs/tutorial/working_directory), but the version of hydra on pip does not have these functions. Additionally, the install instructions do not include instructions on how to install from source (even if that's fairly trivial). The simple solution is to update the wheels on pip. Another alternative would be to put on the installation page that hydra is rapidly developing and suggest that one can install from source directly. ## System information - 0.10.0 from pip - python 3.7 - arch linux ## One more thing... This is very minor but the pip version is `0.10.0` and the github master version is also `0.10.0`, but they not the same as evidenced by this issue. You should probably bump the version of git master. Keep up the good work, I think this is a great idea. </issue> <code> [start of hydra/__init__.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 from . import utils 3 from .errors import MissingConfigException 4 from .main import main 5 6 # Source of truth for Hydra's version 7 __version__ = "0.10.0" 8 9 __all__ = ["__version__", "MissingConfigException", "main", "utils"] 10 [end of hydra/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hydra/__init__.py b/hydra/__init__.py --- a/hydra/__init__.py +++ b/hydra/__init__.py @@ -4,6 +4,6 @@ from .main import main # Source of truth for Hydra's version -__version__ = "0.10.0" +__version__ = "0.11.0-pre1" __all__ = ["__version__", "MissingConfigException", "main", "utils"]
{"golden_diff": "diff --git a/hydra/__init__.py b/hydra/__init__.py\n--- a/hydra/__init__.py\n+++ b/hydra/__init__.py\n@@ -4,6 +4,6 @@\n from .main import main\n \n # Source of truth for Hydra's version\n-__version__ = \"0.10.0\"\n+__version__ = \"0.11.0-pre1\"\n \n __all__ = [\"__version__\", \"MissingConfigException\", \"main\", \"utils\"]\n", "issue": "[Bug] Documentation inconsistency for `utils.get_original_cwd`\n# \ud83d\udc1b Bug\r\n\r\nThe tutorial for working directories has a few commands for setting the working directory [see here](https://cli.dev/docs/tutorial/working_directory), but the version of hydra on pip does not have these functions. Additionally, the install instructions do not include instructions on how to install from source (even if that's fairly trivial). The simple solution is to update the wheels on pip. Another alternative would be to put on the installation page that hydra is rapidly developing and suggest that one can install from source directly.\r\n\r\n## System information\r\n- 0.10.0 from pip\r\n- python 3.7\r\n- arch linux\r\n\r\n## One more thing...\r\nThis is very minor but the pip version is `0.10.0` and the github master version is also `0.10.0`, but they not the same as evidenced by this issue. You should probably bump the version of git master. Keep up the good work, I think this is a great idea.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom . import utils\nfrom .errors import MissingConfigException\nfrom .main import main\n\n# Source of truth for Hydra's version\n__version__ = \"0.10.0\"\n\n__all__ = [\"__version__\", \"MissingConfigException\", \"main\", \"utils\"]\n", "path": "hydra/__init__.py"}]}
852
115
gh_patches_debug_34543
rasdani/github-patches
git_diff
UTNkar__moore-154
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Menu Translations <!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] --> ### Description Not all menu pages are using `translated_title` when being added to the menu. <!-- Please select the appropriate "topic category"/blue and "issue type"/yellow label --> </issue> <code> [start of website/website/templatetags/site_tags.py] 1 from django import template 2 3 register = template.Library() 4 5 6 @register.simple_tag(takes_context=True) 7 def get_site_root(context): 8 # NB this returns a core.Page, not the implementation-specific model used 9 # so object-comparison to self will return false as objects would differ 10 return context['request'].site.root_page 11 12 13 def has_menu_children(page): 14 return page.get_children().live().in_menu().exists() 15 16 17 # Retrieves the top menu items - the immediate children of the parent page 18 # The has_menu_children method is necessary because the bootstrap menu requires 19 # a dropdown class to be applied to a parent 20 @register.inclusion_tag('tags/menu.html', takes_context=True) 21 def menu_items(context, parent, calling_page=None, sidenav=False): 22 menuitems = parent.get_children().live().in_menu() 23 for menuitem in menuitems: 24 menuitem.show_dropdown = has_menu_children(menuitem) 25 # TODO: There has to be a better alternative! 26 if hasattr(menuitem, 'googleformindex'): 27 menuitem.translated_title = menuitem.googleformindex\ 28 .translated_title 29 elif hasattr(menuitem, 'googleformpage'): 30 menuitem.translated_title = menuitem.googleformpage\ 31 .translated_title 32 elif hasattr(menuitem, 'homepage'): 33 menuitem.translated_title = menuitem.homepage.translated_title 34 elif hasattr(menuitem, 'recruitmentpage'): 35 menuitem.translated_title = menuitem.recruitmentpage\ 36 .translated_title 37 elif hasattr(menuitem, 'newsindexpage'): 38 menuitem.translated_title = menuitem.newsindexpage.translated_title 39 elif hasattr(menuitem, 'newspage'): 40 menuitem.translated_title = menuitem.newspage.translated_title 41 elif hasattr(menuitem, 'webpage'): 42 menuitem.translated_title = menuitem.webpage.translated_title 43 # We don't directly check if calling_page is None since the template 44 # engine can pass an empty string to calling_page 45 # if the variable passed as calling_page does not exist. 46 menuitem.active = (calling_page.url.startswith(menuitem.url) 47 if calling_page else False) 48 return { 49 'calling_page': calling_page, 50 'menuitems': menuitems, 51 'sidenav': sidenav, 52 # required by the pageurl tag that we want to use within this template 53 'request': context['request'], 54 } 55 56 57 # Retrieves the children of the top menu items for the drop downs 58 @register.inclusion_tag('tags/menu_children.html', takes_context=True) 59 def menu_children(context, parent, sidenav=False): 60 children = parent.get_children() 61 children = children.live().in_menu() 62 return { 63 'parent': parent, 64 'children': children, 65 'sidenav': sidenav, 66 # required by the pageurl tag that we want to use within this template 67 'request': context['request'], 68 } 69 [end of website/website/templatetags/site_tags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/website/templatetags/site_tags.py b/website/website/templatetags/site_tags.py --- a/website/website/templatetags/site_tags.py +++ b/website/website/templatetags/site_tags.py @@ -20,26 +20,9 @@ @register.inclusion_tag('tags/menu.html', takes_context=True) def menu_items(context, parent, calling_page=None, sidenav=False): menuitems = parent.get_children().live().in_menu() + menuitems = [m.specific for m in menuitems] for menuitem in menuitems: menuitem.show_dropdown = has_menu_children(menuitem) - # TODO: There has to be a better alternative! - if hasattr(menuitem, 'googleformindex'): - menuitem.translated_title = menuitem.googleformindex\ - .translated_title - elif hasattr(menuitem, 'googleformpage'): - menuitem.translated_title = menuitem.googleformpage\ - .translated_title - elif hasattr(menuitem, 'homepage'): - menuitem.translated_title = menuitem.homepage.translated_title - elif hasattr(menuitem, 'recruitmentpage'): - menuitem.translated_title = menuitem.recruitmentpage\ - .translated_title - elif hasattr(menuitem, 'newsindexpage'): - menuitem.translated_title = menuitem.newsindexpage.translated_title - elif hasattr(menuitem, 'newspage'): - menuitem.translated_title = menuitem.newspage.translated_title - elif hasattr(menuitem, 'webpage'): - menuitem.translated_title = menuitem.webpage.translated_title # We don't directly check if calling_page is None since the template # engine can pass an empty string to calling_page # if the variable passed as calling_page does not exist. @@ -57,8 +40,8 @@ # Retrieves the children of the top menu items for the drop downs @register.inclusion_tag('tags/menu_children.html', takes_context=True) def menu_children(context, parent, sidenav=False): - children = parent.get_children() - children = children.live().in_menu() + children = parent.get_children().live().in_menu() + children = [c.specific for c in children] return { 'parent': parent, 'children': children,
{"golden_diff": "diff --git a/website/website/templatetags/site_tags.py b/website/website/templatetags/site_tags.py\n--- a/website/website/templatetags/site_tags.py\n+++ b/website/website/templatetags/site_tags.py\n@@ -20,26 +20,9 @@\n @register.inclusion_tag('tags/menu.html', takes_context=True)\n def menu_items(context, parent, calling_page=None, sidenav=False):\n menuitems = parent.get_children().live().in_menu()\n+ menuitems = [m.specific for m in menuitems]\n for menuitem in menuitems:\n menuitem.show_dropdown = has_menu_children(menuitem)\n- # TODO: There has to be a better alternative!\n- if hasattr(menuitem, 'googleformindex'):\n- menuitem.translated_title = menuitem.googleformindex\\\n- .translated_title\n- elif hasattr(menuitem, 'googleformpage'):\n- menuitem.translated_title = menuitem.googleformpage\\\n- .translated_title\n- elif hasattr(menuitem, 'homepage'):\n- menuitem.translated_title = menuitem.homepage.translated_title\n- elif hasattr(menuitem, 'recruitmentpage'):\n- menuitem.translated_title = menuitem.recruitmentpage\\\n- .translated_title\n- elif hasattr(menuitem, 'newsindexpage'):\n- menuitem.translated_title = menuitem.newsindexpage.translated_title\n- elif hasattr(menuitem, 'newspage'):\n- menuitem.translated_title = menuitem.newspage.translated_title\n- elif hasattr(menuitem, 'webpage'):\n- menuitem.translated_title = menuitem.webpage.translated_title\n # We don't directly check if calling_page is None since the template\n # engine can pass an empty string to calling_page\n # if the variable passed as calling_page does not exist.\n@@ -57,8 +40,8 @@\n # Retrieves the children of the top menu items for the drop downs\n @register.inclusion_tag('tags/menu_children.html', takes_context=True)\n def menu_children(context, parent, sidenav=False):\n- children = parent.get_children()\n- children = children.live().in_menu()\n+ children = parent.get_children().live().in_menu()\n+ children = [c.specific for c in children]\n return {\n 'parent': parent,\n 'children': children,\n", "issue": "Menu Translations\n<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->\r\n\r\n### Description\r\n\r\nNot all menu pages are using `translated_title` when being added to the menu.\r\n\r\n<!-- Please select the appropriate \"topic category\"/blue and \"issue type\"/yellow label -->\r\n\n", "before_files": [{"content": "from django import template\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=True)\ndef get_site_root(context):\n # NB this returns a core.Page, not the implementation-specific model used\n # so object-comparison to self will return false as objects would differ\n return context['request'].site.root_page\n\n\ndef has_menu_children(page):\n return page.get_children().live().in_menu().exists()\n\n\n# Retrieves the top menu items - the immediate children of the parent page\n# The has_menu_children method is necessary because the bootstrap menu requires\n# a dropdown class to be applied to a parent\[email protected]_tag('tags/menu.html', takes_context=True)\ndef menu_items(context, parent, calling_page=None, sidenav=False):\n menuitems = parent.get_children().live().in_menu()\n for menuitem in menuitems:\n menuitem.show_dropdown = has_menu_children(menuitem)\n # TODO: There has to be a better alternative!\n if hasattr(menuitem, 'googleformindex'):\n menuitem.translated_title = menuitem.googleformindex\\\n .translated_title\n elif hasattr(menuitem, 'googleformpage'):\n menuitem.translated_title = menuitem.googleformpage\\\n .translated_title\n elif hasattr(menuitem, 'homepage'):\n menuitem.translated_title = menuitem.homepage.translated_title\n elif hasattr(menuitem, 'recruitmentpage'):\n menuitem.translated_title = menuitem.recruitmentpage\\\n .translated_title\n elif hasattr(menuitem, 'newsindexpage'):\n menuitem.translated_title = menuitem.newsindexpage.translated_title\n elif hasattr(menuitem, 'newspage'):\n menuitem.translated_title = menuitem.newspage.translated_title\n elif hasattr(menuitem, 'webpage'):\n menuitem.translated_title = menuitem.webpage.translated_title\n # We don't directly check if calling_page is None since the template\n # engine can pass an empty string to calling_page\n # if the variable passed as calling_page does not exist.\n menuitem.active = (calling_page.url.startswith(menuitem.url)\n if calling_page else False)\n return {\n 'calling_page': calling_page,\n 'menuitems': menuitems,\n 'sidenav': sidenav,\n # required by the pageurl tag that we want to use within this template\n 'request': context['request'],\n }\n\n\n# Retrieves the children of the top menu items for the drop downs\[email protected]_tag('tags/menu_children.html', takes_context=True)\ndef menu_children(context, parent, sidenav=False):\n children = parent.get_children()\n children = children.live().in_menu()\n return {\n 'parent': parent,\n 'children': children,\n 'sidenav': sidenav,\n # required by the pageurl tag that we want to use within this template\n 'request': context['request'],\n }\n", "path": "website/website/templatetags/site_tags.py"}]}
1,370
523
gh_patches_debug_8815
rasdani/github-patches
git_diff
CTFd__CTFd-2458
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Upload to S3 Failing - CTFd Version/Commit: 3.6.1 - Operating System: Linux (Docker container) - Web Browser and Version: Chrome **What happened?** Upgrading CTFd resulting in S3 file uploads beginning to return 400 (bad request) status codes. I see one of the fixes for 3.6.1 was for S3, so perhaps a new bug was introduced. Here are some additional facts which may be helpful: - The files are successfully making there way into S3, despite the error - The timezone I have configured for my server is CST I can also confirm that my deployment had working file upload before upgrade to version 3.6.1 (file upload was working for 3.6.0). **What did you expect to happen?** File upload to continue working. **How to reproduce your issue** Deploy CTFd free version using version 3.6.1 with S3 file upload configured. **Any associated stack traces or error logs** The browser request returns error (400 status code): ``` { "success": false, "errors": { "location": [ "I/O operation on closed file." ] } } ``` The backend error is: ``` [ERROR] Error handling request Traceback (most recent call last): File "/opt/venv/lib/python3.9/site-packages/gunicorn/workers/base_async.py", line 113, in handle_request resp.write_file(respiter) File "/opt/venv/lib/python3.9/site-packages/gunicorn/http/wsgi.py", line 385, in write_file if not self.sendfile(respiter): File "/opt/venv/lib/python3.9/site-packages/gunicorn/http/wsgi.py", line 375, in sendfile self.sock.sendfile(respiter.filelike, count=nbytes) File "/opt/venv/lib/python3.9/site-packages/gevent/_socket3.py", line 486, in sendfile return self._sendfile_use_send(file, offset, count) File "/opt/venv/lib/python3.9/site-packages/gevent/_socket3.py", line 416, in _sendfile_use_send self._check_sendfile_params(file, offset, count) File "/opt/venv/lib/python3.9/site-packages/gevent/_socket3.py", line 461, in _check_sendfile_params raise ValueError( ValueError: count must be a positive integer (got 0) ``` </issue> <code> [start of CTFd/utils/uploads/__init__.py] 1 import hashlib 2 import shutil 3 from pathlib import Path 4 5 from CTFd.models import ChallengeFiles, Files, PageFiles, db 6 from CTFd.utils import get_app_config 7 from CTFd.utils.uploads.uploaders import FilesystemUploader, S3Uploader 8 9 UPLOADERS = {"filesystem": FilesystemUploader, "s3": S3Uploader} 10 11 12 def get_uploader(): 13 return UPLOADERS.get(get_app_config("UPLOAD_PROVIDER") or "filesystem")() 14 15 16 def upload_file(*args, **kwargs): 17 file_obj = kwargs.get("file") 18 challenge_id = kwargs.get("challenge_id") or kwargs.get("challenge") 19 page_id = kwargs.get("page_id") or kwargs.get("page") 20 file_type = kwargs.get("type", "standard") 21 location = kwargs.get("location") 22 23 # Validate location and default filename to uploaded file's name 24 parent = None 25 filename = file_obj.filename 26 if location: 27 path = Path(location) 28 if len(path.parts) != 2: 29 raise ValueError( 30 "Location must contain two parts, a directory and a filename" 31 ) 32 # Allow location to override the directory and filename 33 parent = path.parts[0] 34 filename = path.parts[1] 35 location = parent + "/" + filename 36 37 model_args = {"type": file_type, "location": location} 38 39 model = Files 40 if file_type == "challenge": 41 model = ChallengeFiles 42 model_args["challenge_id"] = challenge_id 43 if file_type == "page": 44 model = PageFiles 45 model_args["page_id"] = page_id 46 47 uploader = get_uploader() 48 location = uploader.upload(file_obj=file_obj, filename=filename, path=parent) 49 50 sha1sum = hash_file(fp=file_obj) 51 52 model_args["location"] = location 53 model_args["sha1sum"] = sha1sum 54 55 existing_file = Files.query.filter_by(location=location).first() 56 if existing_file: 57 for k, v in model_args.items(): 58 setattr(existing_file, k, v) 59 db.session.commit() 60 file_row = existing_file 61 else: 62 file_row = model(**model_args) 63 db.session.add(file_row) 64 db.session.commit() 65 return file_row 66 67 68 def hash_file(fp, algo="sha1"): 69 fp.seek(0) 70 if algo == "sha1": 71 h = hashlib.sha1() # nosec 72 # https://stackoverflow.com/a/64730457 73 while chunk := fp.read(1024): 74 h.update(chunk) 75 fp.seek(0) 76 return h.hexdigest() 77 else: 78 raise NotImplementedError 79 80 81 def delete_file(file_id): 82 f = Files.query.filter_by(id=file_id).first_or_404() 83 84 uploader = get_uploader() 85 uploader.delete(filename=f.location) 86 87 db.session.delete(f) 88 db.session.commit() 89 return True 90 91 92 def rmdir(directory): 93 shutil.rmtree(directory, ignore_errors=True) 94 [end of CTFd/utils/uploads/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/CTFd/utils/uploads/__init__.py b/CTFd/utils/uploads/__init__.py --- a/CTFd/utils/uploads/__init__.py +++ b/CTFd/utils/uploads/__init__.py @@ -44,11 +44,12 @@ model = PageFiles model_args["page_id"] = page_id + # Hash is calculated before upload since S3 file upload closes file object + sha1sum = hash_file(fp=file_obj) + uploader = get_uploader() location = uploader.upload(file_obj=file_obj, filename=filename, path=parent) - sha1sum = hash_file(fp=file_obj) - model_args["location"] = location model_args["sha1sum"] = sha1sum
{"golden_diff": "diff --git a/CTFd/utils/uploads/__init__.py b/CTFd/utils/uploads/__init__.py\n--- a/CTFd/utils/uploads/__init__.py\n+++ b/CTFd/utils/uploads/__init__.py\n@@ -44,11 +44,12 @@\n model = PageFiles\n model_args[\"page_id\"] = page_id\n \n+ # Hash is calculated before upload since S3 file upload closes file object\n+ sha1sum = hash_file(fp=file_obj)\n+\n uploader = get_uploader()\n location = uploader.upload(file_obj=file_obj, filename=filename, path=parent)\n \n- sha1sum = hash_file(fp=file_obj)\n-\n model_args[\"location\"] = location\n model_args[\"sha1sum\"] = sha1sum\n", "issue": "Upload to S3 Failing\n- CTFd Version/Commit: 3.6.1\r\n- Operating System: Linux (Docker container)\r\n- Web Browser and Version: Chrome\r\n\r\n**What happened?**\r\n\r\nUpgrading CTFd resulting in S3 file uploads beginning to return 400 (bad request) status codes. I see one of the fixes for 3.6.1 was for S3, so perhaps a new bug was introduced.\r\n\r\nHere are some additional facts which may be helpful:\r\n\r\n - The files are successfully making there way into S3, despite the error\r\n - The timezone I have configured for my server is CST\r\n\r\nI can also confirm that my deployment had working file upload before upgrade to version 3.6.1 (file upload was working for 3.6.0).\r\n\r\n**What did you expect to happen?**\r\n\r\nFile upload to continue working.\r\n\r\n**How to reproduce your issue**\r\n\r\nDeploy CTFd free version using version 3.6.1 with S3 file upload configured.\r\n\r\n**Any associated stack traces or error logs**\r\n\r\nThe browser request returns error (400 status code):\r\n\r\n```\r\n{\r\n \"success\": false,\r\n \"errors\": {\r\n \"location\": [\r\n \"I/O operation on closed file.\"\r\n ]\r\n }\r\n}\r\n```\r\n\r\nThe backend error is:\r\n\r\n```\r\n[ERROR] Error handling request\r\nTraceback (most recent call last):\r\nFile \"/opt/venv/lib/python3.9/site-packages/gunicorn/workers/base_async.py\", line 113, in handle_request\r\nresp.write_file(respiter)\r\nFile \"/opt/venv/lib/python3.9/site-packages/gunicorn/http/wsgi.py\", line 385, in write_file\r\nif not self.sendfile(respiter):\r\nFile \"/opt/venv/lib/python3.9/site-packages/gunicorn/http/wsgi.py\", line 375, in sendfile\r\nself.sock.sendfile(respiter.filelike, count=nbytes)\r\nFile \"/opt/venv/lib/python3.9/site-packages/gevent/_socket3.py\", line 486, in sendfile\r\nreturn self._sendfile_use_send(file, offset, count)\r\nFile \"/opt/venv/lib/python3.9/site-packages/gevent/_socket3.py\", line 416, in _sendfile_use_send\r\nself._check_sendfile_params(file, offset, count)\r\nFile \"/opt/venv/lib/python3.9/site-packages/gevent/_socket3.py\", line 461, in _check_sendfile_params\r\nraise ValueError(\r\nValueError: count must be a positive integer (got 0)\r\n```\n", "before_files": [{"content": "import hashlib\nimport shutil\nfrom pathlib import Path\n\nfrom CTFd.models import ChallengeFiles, Files, PageFiles, db\nfrom CTFd.utils import get_app_config\nfrom CTFd.utils.uploads.uploaders import FilesystemUploader, S3Uploader\n\nUPLOADERS = {\"filesystem\": FilesystemUploader, \"s3\": S3Uploader}\n\n\ndef get_uploader():\n return UPLOADERS.get(get_app_config(\"UPLOAD_PROVIDER\") or \"filesystem\")()\n\n\ndef upload_file(*args, **kwargs):\n file_obj = kwargs.get(\"file\")\n challenge_id = kwargs.get(\"challenge_id\") or kwargs.get(\"challenge\")\n page_id = kwargs.get(\"page_id\") or kwargs.get(\"page\")\n file_type = kwargs.get(\"type\", \"standard\")\n location = kwargs.get(\"location\")\n\n # Validate location and default filename to uploaded file's name\n parent = None\n filename = file_obj.filename\n if location:\n path = Path(location)\n if len(path.parts) != 2:\n raise ValueError(\n \"Location must contain two parts, a directory and a filename\"\n )\n # Allow location to override the directory and filename\n parent = path.parts[0]\n filename = path.parts[1]\n location = parent + \"/\" + filename\n\n model_args = {\"type\": file_type, \"location\": location}\n\n model = Files\n if file_type == \"challenge\":\n model = ChallengeFiles\n model_args[\"challenge_id\"] = challenge_id\n if file_type == \"page\":\n model = PageFiles\n model_args[\"page_id\"] = page_id\n\n uploader = get_uploader()\n location = uploader.upload(file_obj=file_obj, filename=filename, path=parent)\n\n sha1sum = hash_file(fp=file_obj)\n\n model_args[\"location\"] = location\n model_args[\"sha1sum\"] = sha1sum\n\n existing_file = Files.query.filter_by(location=location).first()\n if existing_file:\n for k, v in model_args.items():\n setattr(existing_file, k, v)\n db.session.commit()\n file_row = existing_file\n else:\n file_row = model(**model_args)\n db.session.add(file_row)\n db.session.commit()\n return file_row\n\n\ndef hash_file(fp, algo=\"sha1\"):\n fp.seek(0)\n if algo == \"sha1\":\n h = hashlib.sha1() # nosec\n # https://stackoverflow.com/a/64730457\n while chunk := fp.read(1024):\n h.update(chunk)\n fp.seek(0)\n return h.hexdigest()\n else:\n raise NotImplementedError\n\n\ndef delete_file(file_id):\n f = Files.query.filter_by(id=file_id).first_or_404()\n\n uploader = get_uploader()\n uploader.delete(filename=f.location)\n\n db.session.delete(f)\n db.session.commit()\n return True\n\n\ndef rmdir(directory):\n shutil.rmtree(directory, ignore_errors=True)\n", "path": "CTFd/utils/uploads/__init__.py"}]}
1,927
171
gh_patches_debug_29333
rasdani/github-patches
git_diff
pex-tool__pex-322
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove pkg_resources.build_zipmanifest monkeypatching This may involve increasing the minimum setuptools version. Another alternative is vendoring setuptools. </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = '1.1.15' 5 6 SETUPTOOLS_REQUIREMENT = 'setuptools>=2.2,<20.11' 7 WHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.30.0' 8 [end of pex/version.py] [start of pex/pex_bootstrapper.py] 1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 import contextlib 5 import os 6 import sys 7 import zipfile 8 9 __all__ = ('bootstrap_pex',) 10 11 12 def pex_info_name(entry_point): 13 """Return the PEX-INFO for an entry_point""" 14 return os.path.join(entry_point, 'PEX-INFO') 15 16 17 def is_compressed(entry_point): 18 return os.path.exists(entry_point) and not os.path.exists(pex_info_name(entry_point)) 19 20 21 def read_pexinfo_from_directory(entry_point): 22 with open(pex_info_name(entry_point), 'rb') as fp: 23 return fp.read() 24 25 26 def read_pexinfo_from_zip(entry_point): 27 with contextlib.closing(zipfile.ZipFile(entry_point)) as zf: 28 return zf.read('PEX-INFO') 29 30 31 def read_pex_info_content(entry_point): 32 """Return the raw content of a PEX-INFO.""" 33 if is_compressed(entry_point): 34 return read_pexinfo_from_zip(entry_point) 35 else: 36 return read_pexinfo_from_directory(entry_point) 37 38 39 def get_pex_info(entry_point): 40 """Return the PexInfo object for an entry point.""" 41 from . import pex_info 42 43 pex_info_content = read_pex_info_content(entry_point) 44 if pex_info_content: 45 return pex_info.PexInfo.from_json(pex_info_content) 46 raise ValueError('Invalid entry_point: %s' % entry_point) 47 48 49 # TODO(wickman) Remove once resolved (#91): 50 # https://bitbucket.org/pypa/setuptools/issue/154/build_zipmanifest-results-should-be 51 def monkeypatch_build_zipmanifest(): 52 import pkg_resources 53 if not hasattr(pkg_resources, 'build_zipmanifest'): 54 return 55 old_build_zipmanifest = pkg_resources.build_zipmanifest 56 def memoized_build_zipmanifest(archive, memo={}): 57 if archive not in memo: 58 memo[archive] = old_build_zipmanifest(archive) 59 return memo[archive] 60 pkg_resources.build_zipmanifest = memoized_build_zipmanifest 61 62 63 def find_in_path(target_interpreter): 64 if os.path.exists(target_interpreter): 65 return target_interpreter 66 67 for directory in os.getenv('PATH', '').split(os.pathsep): 68 try_path = os.path.join(directory, target_interpreter) 69 if os.path.exists(try_path): 70 return try_path 71 72 73 def maybe_reexec_pex(): 74 from .variables import ENV 75 if not ENV.PEX_PYTHON: 76 return 77 78 from .common import die 79 from .tracer import TRACER 80 81 target_python = ENV.PEX_PYTHON 82 target = find_in_path(target_python) 83 if not target: 84 die('Failed to find interpreter specified by PEX_PYTHON: %s' % target) 85 if os.path.exists(target) and os.path.realpath(target) != os.path.realpath(sys.executable): 86 TRACER.log('Detected PEX_PYTHON, re-exec to %s' % target) 87 ENV.delete('PEX_PYTHON') 88 os.execve(target, [target_python] + sys.argv, ENV.copy()) 89 90 91 def bootstrap_pex(entry_point): 92 from .finders import register_finders 93 monkeypatch_build_zipmanifest() 94 register_finders() 95 maybe_reexec_pex() 96 97 from . import pex 98 pex.PEX(entry_point).execute() 99 100 101 def bootstrap_pex_env(entry_point): 102 """Bootstrap the current runtime environment using a given pex.""" 103 from .environment import PEXEnvironment 104 from .finders import register_finders 105 from .pex_info import PexInfo 106 107 monkeypatch_build_zipmanifest() 108 register_finders() 109 110 PEXEnvironment(entry_point, PexInfo.from_pex(entry_point)).activate() 111 [end of pex/pex_bootstrapper.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/pex_bootstrapper.py b/pex/pex_bootstrapper.py --- a/pex/pex_bootstrapper.py +++ b/pex/pex_bootstrapper.py @@ -46,20 +46,6 @@ raise ValueError('Invalid entry_point: %s' % entry_point) -# TODO(wickman) Remove once resolved (#91): -# https://bitbucket.org/pypa/setuptools/issue/154/build_zipmanifest-results-should-be -def monkeypatch_build_zipmanifest(): - import pkg_resources - if not hasattr(pkg_resources, 'build_zipmanifest'): - return - old_build_zipmanifest = pkg_resources.build_zipmanifest - def memoized_build_zipmanifest(archive, memo={}): - if archive not in memo: - memo[archive] = old_build_zipmanifest(archive) - return memo[archive] - pkg_resources.build_zipmanifest = memoized_build_zipmanifest - - def find_in_path(target_interpreter): if os.path.exists(target_interpreter): return target_interpreter @@ -90,7 +76,6 @@ def bootstrap_pex(entry_point): from .finders import register_finders - monkeypatch_build_zipmanifest() register_finders() maybe_reexec_pex() @@ -104,7 +89,6 @@ from .finders import register_finders from .pex_info import PexInfo - monkeypatch_build_zipmanifest() register_finders() PEXEnvironment(entry_point, PexInfo.from_pex(entry_point)).activate() diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -3,5 +3,5 @@ __version__ = '1.1.15' -SETUPTOOLS_REQUIREMENT = 'setuptools>=2.2,<20.11' +SETUPTOOLS_REQUIREMENT = 'setuptools>=5.7,<20.11' WHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.30.0'
{"golden_diff": "diff --git a/pex/pex_bootstrapper.py b/pex/pex_bootstrapper.py\n--- a/pex/pex_bootstrapper.py\n+++ b/pex/pex_bootstrapper.py\n@@ -46,20 +46,6 @@\n raise ValueError('Invalid entry_point: %s' % entry_point)\n \n \n-# TODO(wickman) Remove once resolved (#91):\n-# https://bitbucket.org/pypa/setuptools/issue/154/build_zipmanifest-results-should-be\n-def monkeypatch_build_zipmanifest():\n- import pkg_resources\n- if not hasattr(pkg_resources, 'build_zipmanifest'):\n- return\n- old_build_zipmanifest = pkg_resources.build_zipmanifest\n- def memoized_build_zipmanifest(archive, memo={}):\n- if archive not in memo:\n- memo[archive] = old_build_zipmanifest(archive)\n- return memo[archive]\n- pkg_resources.build_zipmanifest = memoized_build_zipmanifest\n-\n-\n def find_in_path(target_interpreter):\n if os.path.exists(target_interpreter):\n return target_interpreter\n@@ -90,7 +76,6 @@\n \n def bootstrap_pex(entry_point):\n from .finders import register_finders\n- monkeypatch_build_zipmanifest()\n register_finders()\n maybe_reexec_pex()\n \n@@ -104,7 +89,6 @@\n from .finders import register_finders\n from .pex_info import PexInfo\n \n- monkeypatch_build_zipmanifest()\n register_finders()\n \n PEXEnvironment(entry_point, PexInfo.from_pex(entry_point)).activate()\ndiff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -3,5 +3,5 @@\n \n __version__ = '1.1.15'\n \n-SETUPTOOLS_REQUIREMENT = 'setuptools>=2.2,<20.11'\n+SETUPTOOLS_REQUIREMENT = 'setuptools>=5.7,<20.11'\n WHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.30.0'\n", "issue": "Remove pkg_resources.build_zipmanifest monkeypatching\nThis may involve increasing the minimum setuptools version. Another alternative is vendoring setuptools.\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.1.15'\n\nSETUPTOOLS_REQUIREMENT = 'setuptools>=2.2,<20.11'\nWHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.30.0'\n", "path": "pex/version.py"}, {"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport contextlib\nimport os\nimport sys\nimport zipfile\n\n__all__ = ('bootstrap_pex',)\n\n\ndef pex_info_name(entry_point):\n \"\"\"Return the PEX-INFO for an entry_point\"\"\"\n return os.path.join(entry_point, 'PEX-INFO')\n\n\ndef is_compressed(entry_point):\n return os.path.exists(entry_point) and not os.path.exists(pex_info_name(entry_point))\n\n\ndef read_pexinfo_from_directory(entry_point):\n with open(pex_info_name(entry_point), 'rb') as fp:\n return fp.read()\n\n\ndef read_pexinfo_from_zip(entry_point):\n with contextlib.closing(zipfile.ZipFile(entry_point)) as zf:\n return zf.read('PEX-INFO')\n\n\ndef read_pex_info_content(entry_point):\n \"\"\"Return the raw content of a PEX-INFO.\"\"\"\n if is_compressed(entry_point):\n return read_pexinfo_from_zip(entry_point)\n else:\n return read_pexinfo_from_directory(entry_point)\n\n\ndef get_pex_info(entry_point):\n \"\"\"Return the PexInfo object for an entry point.\"\"\"\n from . import pex_info\n\n pex_info_content = read_pex_info_content(entry_point)\n if pex_info_content:\n return pex_info.PexInfo.from_json(pex_info_content)\n raise ValueError('Invalid entry_point: %s' % entry_point)\n\n\n# TODO(wickman) Remove once resolved (#91):\n# https://bitbucket.org/pypa/setuptools/issue/154/build_zipmanifest-results-should-be\ndef monkeypatch_build_zipmanifest():\n import pkg_resources\n if not hasattr(pkg_resources, 'build_zipmanifest'):\n return\n old_build_zipmanifest = pkg_resources.build_zipmanifest\n def memoized_build_zipmanifest(archive, memo={}):\n if archive not in memo:\n memo[archive] = old_build_zipmanifest(archive)\n return memo[archive]\n pkg_resources.build_zipmanifest = memoized_build_zipmanifest\n\n\ndef find_in_path(target_interpreter):\n if os.path.exists(target_interpreter):\n return target_interpreter\n\n for directory in os.getenv('PATH', '').split(os.pathsep):\n try_path = os.path.join(directory, target_interpreter)\n if os.path.exists(try_path):\n return try_path\n\n\ndef maybe_reexec_pex():\n from .variables import ENV\n if not ENV.PEX_PYTHON:\n return\n\n from .common import die\n from .tracer import TRACER\n\n target_python = ENV.PEX_PYTHON\n target = find_in_path(target_python)\n if not target:\n die('Failed to find interpreter specified by PEX_PYTHON: %s' % target)\n if os.path.exists(target) and os.path.realpath(target) != os.path.realpath(sys.executable):\n TRACER.log('Detected PEX_PYTHON, re-exec to %s' % target)\n ENV.delete('PEX_PYTHON')\n os.execve(target, [target_python] + sys.argv, ENV.copy())\n\n\ndef bootstrap_pex(entry_point):\n from .finders import register_finders\n monkeypatch_build_zipmanifest()\n register_finders()\n maybe_reexec_pex()\n\n from . import pex\n pex.PEX(entry_point).execute()\n\n\ndef bootstrap_pex_env(entry_point):\n \"\"\"Bootstrap the current runtime environment using a given pex.\"\"\"\n from .environment import PEXEnvironment\n from .finders import register_finders\n from .pex_info import PexInfo\n\n monkeypatch_build_zipmanifest()\n register_finders()\n\n PEXEnvironment(entry_point, PexInfo.from_pex(entry_point)).activate()\n", "path": "pex/pex_bootstrapper.py"}]}
1,738
477
gh_patches_debug_18001
rasdani/github-patches
git_diff
mozilla__telemetry-analysis-service-258
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ATMO should pre-click my single SSH key Would save me thousands of milliseconds every time I launch a cluster ;) </issue> <code> [start of atmo/clusters/views.py] 1 # This Source Code Form is subject to the terms of the Mozilla Public 2 # License, v. 2.0. If a copy of the MPL was not distributed with this 3 # file, you can obtain one at http://mozilla.org/MPL/2.0/. 4 from django.contrib import messages 5 from django.contrib.auth.decorators import login_required 6 from django.shortcuts import redirect, render 7 from django.utils.safestring import mark_safe 8 9 from allauth.account.utils import user_display 10 11 from .forms import NewClusterForm 12 from .models import Cluster 13 from ..decorators import view_permission_required, delete_permission_required 14 15 16 @login_required 17 def new_cluster(request): 18 if request.user.created_sshkeys.count() == 0: 19 messages.error( 20 request, 21 mark_safe( 22 '<h4>No SSH keys associated to you.</h4>' 23 'Please upload one below to be able to launch a cluster.' 24 'This is one-time step.' 25 ) 26 ) 27 return redirect('keys-new') 28 initial = { 29 'identifier': '{}-telemetry-analysis'.format(user_display(request.user)), 30 'size': 1, 31 } 32 form = NewClusterForm( 33 request.user, 34 initial=initial, 35 ) 36 if request.method == 'POST': 37 form = NewClusterForm( 38 request.user, 39 data=request.POST, 40 files=request.FILES, 41 initial=initial, 42 ) 43 if form.is_valid(): 44 cluster = form.save() # this will also magically spawn the cluster for us 45 return redirect(cluster) 46 context = { 47 'form': form, 48 } 49 return render(request, 'atmo/clusters/new.html', context) 50 51 52 @login_required 53 @delete_permission_required(Cluster) 54 def terminate_cluster(request, id): 55 cluster = Cluster.objects.get(id=id) 56 if not cluster.is_active: 57 return redirect(cluster) 58 59 if request.method == 'POST': 60 cluster.deactivate() 61 return redirect(cluster) 62 63 context = { 64 'cluster': cluster, 65 } 66 return render(request, 'atmo/clusters/terminate.html', context=context) 67 68 69 @login_required 70 @view_permission_required(Cluster) 71 def detail_cluster(request, id): 72 cluster = Cluster.objects.get(id=id) 73 context = { 74 'cluster': cluster, 75 } 76 return render(request, 'atmo/clusters/detail.html', context=context) 77 [end of atmo/clusters/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/atmo/clusters/views.py b/atmo/clusters/views.py --- a/atmo/clusters/views.py +++ b/atmo/clusters/views.py @@ -15,7 +15,13 @@ @login_required def new_cluster(request): - if request.user.created_sshkeys.count() == 0: + initial = { + 'identifier': '{}-telemetry-analysis'.format(user_display(request.user)), + 'size': 1, + } + ssh_key_count = request.user.created_sshkeys.count() + + if ssh_key_count == 0: messages.error( request, mark_safe( @@ -25,10 +31,10 @@ ) ) return redirect('keys-new') - initial = { - 'identifier': '{}-telemetry-analysis'.format(user_display(request.user)), - 'size': 1, - } + elif ssh_key_count == 1: + # If only 1 ssh key, make it pre-selected. + initial['ssh_key'] = request.user.created_sshkeys.values('pk')[0]['pk'] + form = NewClusterForm( request.user, initial=initial,
{"golden_diff": "diff --git a/atmo/clusters/views.py b/atmo/clusters/views.py\n--- a/atmo/clusters/views.py\n+++ b/atmo/clusters/views.py\n@@ -15,7 +15,13 @@\n \n @login_required\n def new_cluster(request):\n- if request.user.created_sshkeys.count() == 0:\n+ initial = {\n+ 'identifier': '{}-telemetry-analysis'.format(user_display(request.user)),\n+ 'size': 1,\n+ }\n+ ssh_key_count = request.user.created_sshkeys.count()\n+\n+ if ssh_key_count == 0:\n messages.error(\n request,\n mark_safe(\n@@ -25,10 +31,10 @@\n )\n )\n return redirect('keys-new')\n- initial = {\n- 'identifier': '{}-telemetry-analysis'.format(user_display(request.user)),\n- 'size': 1,\n- }\n+ elif ssh_key_count == 1:\n+ # If only 1 ssh key, make it pre-selected.\n+ initial['ssh_key'] = request.user.created_sshkeys.values('pk')[0]['pk']\n+\n form = NewClusterForm(\n request.user,\n initial=initial,\n", "issue": "ATMO should pre-click my single SSH key\nWould save me thousands of milliseconds every time I launch a cluster ;)\n", "before_files": [{"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect, render\nfrom django.utils.safestring import mark_safe\n\nfrom allauth.account.utils import user_display\n\nfrom .forms import NewClusterForm\nfrom .models import Cluster\nfrom ..decorators import view_permission_required, delete_permission_required\n\n\n@login_required\ndef new_cluster(request):\n if request.user.created_sshkeys.count() == 0:\n messages.error(\n request,\n mark_safe(\n '<h4>No SSH keys associated to you.</h4>'\n 'Please upload one below to be able to launch a cluster.'\n 'This is one-time step.'\n )\n )\n return redirect('keys-new')\n initial = {\n 'identifier': '{}-telemetry-analysis'.format(user_display(request.user)),\n 'size': 1,\n }\n form = NewClusterForm(\n request.user,\n initial=initial,\n )\n if request.method == 'POST':\n form = NewClusterForm(\n request.user,\n data=request.POST,\n files=request.FILES,\n initial=initial,\n )\n if form.is_valid():\n cluster = form.save() # this will also magically spawn the cluster for us\n return redirect(cluster)\n context = {\n 'form': form,\n }\n return render(request, 'atmo/clusters/new.html', context)\n\n\n@login_required\n@delete_permission_required(Cluster)\ndef terminate_cluster(request, id):\n cluster = Cluster.objects.get(id=id)\n if not cluster.is_active:\n return redirect(cluster)\n\n if request.method == 'POST':\n cluster.deactivate()\n return redirect(cluster)\n\n context = {\n 'cluster': cluster,\n }\n return render(request, 'atmo/clusters/terminate.html', context=context)\n\n\n@login_required\n@view_permission_required(Cluster)\ndef detail_cluster(request, id):\n cluster = Cluster.objects.get(id=id)\n context = {\n 'cluster': cluster,\n }\n return render(request, 'atmo/clusters/detail.html', context=context)\n", "path": "atmo/clusters/views.py"}]}
1,199
263
gh_patches_debug_2290
rasdani/github-patches
git_diff
TheAlgorithms__Python-4779
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug with union in disjoint_set https://github.com/TheAlgorithms/Python/blob/master/data_structures/disjoint_set/disjoint_set.py ```python def union_set(x, y): """ union two sets. set with bigger rank should be parent, so that the disjoint set tree will be more flat. """ x, y = find_set(x), find_set(y) if x.rank > y.rank: y.parent = x else: x.parent = y if x.rank == y.rank: y.rank += 1 ``` here need check if x==y Bug with union in disjoint_set https://github.com/TheAlgorithms/Python/blob/master/data_structures/disjoint_set/disjoint_set.py ```python def union_set(x, y): """ union two sets. set with bigger rank should be parent, so that the disjoint set tree will be more flat. """ x, y = find_set(x), find_set(y) if x.rank > y.rank: y.parent = x else: x.parent = y if x.rank == y.rank: y.rank += 1 ``` here need check if x==y </issue> <code> [start of data_structures/disjoint_set/disjoint_set.py] 1 """ 2 disjoint set 3 Reference: https://en.wikipedia.org/wiki/Disjoint-set_data_structure 4 """ 5 6 7 class Node: 8 def __init__(self, data): 9 self.data = data 10 11 12 def make_set(x): 13 """ 14 make x as a set. 15 """ 16 # rank is the distance from x to its' parent 17 # root's rank is 0 18 x.rank = 0 19 x.parent = x 20 21 22 def union_set(x, y): 23 """ 24 union two sets. 25 set with bigger rank should be parent, so that the 26 disjoint set tree will be more flat. 27 """ 28 x, y = find_set(x), find_set(y) 29 if x.rank > y.rank: 30 y.parent = x 31 else: 32 x.parent = y 33 if x.rank == y.rank: 34 y.rank += 1 35 36 37 def find_set(x): 38 """ 39 return the parent of x 40 """ 41 if x != x.parent: 42 x.parent = find_set(x.parent) 43 return x.parent 44 45 46 def find_python_set(node: Node) -> set: 47 """ 48 Return a Python Standard Library set that contains i. 49 """ 50 sets = ({0, 1, 2}, {3, 4, 5}) 51 for s in sets: 52 if node.data in s: 53 return s 54 raise ValueError(f"{node.data} is not in {sets}") 55 56 57 def test_disjoint_set(): 58 """ 59 >>> test_disjoint_set() 60 """ 61 vertex = [Node(i) for i in range(6)] 62 for v in vertex: 63 make_set(v) 64 65 union_set(vertex[0], vertex[1]) 66 union_set(vertex[1], vertex[2]) 67 union_set(vertex[3], vertex[4]) 68 union_set(vertex[3], vertex[5]) 69 70 for node0 in vertex: 71 for node1 in vertex: 72 if find_python_set(node0).isdisjoint(find_python_set(node1)): 73 assert find_set(node0) != find_set(node1) 74 else: 75 assert find_set(node0) == find_set(node1) 76 77 78 if __name__ == "__main__": 79 test_disjoint_set() 80 [end of data_structures/disjoint_set/disjoint_set.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/data_structures/disjoint_set/disjoint_set.py b/data_structures/disjoint_set/disjoint_set.py --- a/data_structures/disjoint_set/disjoint_set.py +++ b/data_structures/disjoint_set/disjoint_set.py @@ -26,7 +26,10 @@ disjoint set tree will be more flat. """ x, y = find_set(x), find_set(y) - if x.rank > y.rank: + if x == y: + return + + elif x.rank > y.rank: y.parent = x else: x.parent = y
{"golden_diff": "diff --git a/data_structures/disjoint_set/disjoint_set.py b/data_structures/disjoint_set/disjoint_set.py\n--- a/data_structures/disjoint_set/disjoint_set.py\n+++ b/data_structures/disjoint_set/disjoint_set.py\n@@ -26,7 +26,10 @@\n disjoint set tree will be more flat.\r\n \"\"\"\r\n x, y = find_set(x), find_set(y)\r\n- if x.rank > y.rank:\r\n+ if x == y:\r\n+ return\r\n+\r\n+ elif x.rank > y.rank:\r\n y.parent = x\r\n else:\r\n x.parent = y\n", "issue": "Bug with union in disjoint_set\nhttps://github.com/TheAlgorithms/Python/blob/master/data_structures/disjoint_set/disjoint_set.py\r\n```python\r\ndef union_set(x, y):\r\n \"\"\"\r\n union two sets.\r\n set with bigger rank should be parent, so that the\r\n disjoint set tree will be more flat.\r\n \"\"\"\r\n x, y = find_set(x), find_set(y)\r\n if x.rank > y.rank:\r\n y.parent = x\r\n else:\r\n x.parent = y\r\n if x.rank == y.rank:\r\n y.rank += 1\r\n```\r\n\r\nhere need check if x==y\r\n\nBug with union in disjoint_set\nhttps://github.com/TheAlgorithms/Python/blob/master/data_structures/disjoint_set/disjoint_set.py\r\n```python\r\ndef union_set(x, y):\r\n \"\"\"\r\n union two sets.\r\n set with bigger rank should be parent, so that the\r\n disjoint set tree will be more flat.\r\n \"\"\"\r\n x, y = find_set(x), find_set(y)\r\n if x.rank > y.rank:\r\n y.parent = x\r\n else:\r\n x.parent = y\r\n if x.rank == y.rank:\r\n y.rank += 1\r\n```\r\n\r\nhere need check if x==y\r\n\n", "before_files": [{"content": "\"\"\"\r\n disjoint set\r\n Reference: https://en.wikipedia.org/wiki/Disjoint-set_data_structure\r\n\"\"\"\r\n\r\n\r\nclass Node:\r\n def __init__(self, data):\r\n self.data = data\r\n\r\n\r\ndef make_set(x):\r\n \"\"\"\r\n make x as a set.\r\n \"\"\"\r\n # rank is the distance from x to its' parent\r\n # root's rank is 0\r\n x.rank = 0\r\n x.parent = x\r\n\r\n\r\ndef union_set(x, y):\r\n \"\"\"\r\n union two sets.\r\n set with bigger rank should be parent, so that the\r\n disjoint set tree will be more flat.\r\n \"\"\"\r\n x, y = find_set(x), find_set(y)\r\n if x.rank > y.rank:\r\n y.parent = x\r\n else:\r\n x.parent = y\r\n if x.rank == y.rank:\r\n y.rank += 1\r\n\r\n\r\ndef find_set(x):\r\n \"\"\"\r\n return the parent of x\r\n \"\"\"\r\n if x != x.parent:\r\n x.parent = find_set(x.parent)\r\n return x.parent\r\n\r\n\r\ndef find_python_set(node: Node) -> set:\r\n \"\"\"\r\n Return a Python Standard Library set that contains i.\r\n \"\"\"\r\n sets = ({0, 1, 2}, {3, 4, 5})\r\n for s in sets:\r\n if node.data in s:\r\n return s\r\n raise ValueError(f\"{node.data} is not in {sets}\")\r\n\r\n\r\ndef test_disjoint_set():\r\n \"\"\"\r\n >>> test_disjoint_set()\r\n \"\"\"\r\n vertex = [Node(i) for i in range(6)]\r\n for v in vertex:\r\n make_set(v)\r\n\r\n union_set(vertex[0], vertex[1])\r\n union_set(vertex[1], vertex[2])\r\n union_set(vertex[3], vertex[4])\r\n union_set(vertex[3], vertex[5])\r\n\r\n for node0 in vertex:\r\n for node1 in vertex:\r\n if find_python_set(node0).isdisjoint(find_python_set(node1)):\r\n assert find_set(node0) != find_set(node1)\r\n else:\r\n assert find_set(node0) == find_set(node1)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test_disjoint_set()\r\n", "path": "data_structures/disjoint_set/disjoint_set.py"}]}
1,432
136
gh_patches_debug_15343
rasdani/github-patches
git_diff
Pylons__pyramid-1131
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> No way to add query parameters without a value I occasionally need to put a hint in the query string for a URL, which is essentially a parameter without a value. This can be important to provide information to javascript or as a hint to GA. For example I may need to use `http://localhost/dashboard?new-user` as URL when I redirect a new user to the dashboard after completing registration. Intuitively I expected this to work: ``` python return HTTPFound(request.route_url('dashboard', _query={'new-user': None})) ``` but that returns `/dashboard?new-user=None` which is not very pretty. </issue> <code> [start of pyramid/encode.py] 1 from pyramid.compat import ( 2 text_type, 3 binary_type, 4 is_nonstr_iter, 5 url_quote as _url_quote, 6 url_quote_plus as quote_plus, # bw compat api (dnr) 7 ) 8 9 def url_quote(s, safe=''): # bw compat api 10 return _url_quote(s, safe=safe) 11 12 def urlencode(query, doseq=True): 13 """ 14 An alternate implementation of Python's stdlib `urllib.urlencode 15 function <http://docs.python.org/library/urllib.html>`_ which 16 accepts unicode keys and values within the ``query`` 17 dict/sequence; all Unicode keys and values are first converted to 18 UTF-8 before being used to compose the query string. 19 20 The value of ``query`` must be a sequence of two-tuples 21 representing key/value pairs *or* an object (often a dictionary) 22 with an ``.items()`` method that returns a sequence of two-tuples 23 representing key/value pairs. 24 25 For minimal calling convention backwards compatibility, this 26 version of urlencode accepts *but ignores* a second argument 27 conventionally named ``doseq``. The Python stdlib version behaves 28 differently when ``doseq`` is False and when a sequence is 29 presented as one of the values. This version always behaves in 30 the ``doseq=True`` mode, no matter what the value of the second 31 argument. 32 33 See the Python stdlib documentation for ``urllib.urlencode`` for 34 more information. 35 """ 36 try: 37 # presumed to be a dictionary 38 query = query.items() 39 except AttributeError: 40 pass 41 42 result = '' 43 prefix = '' 44 45 for (k, v) in query: 46 k = _enc(k) 47 48 if is_nonstr_iter(v): 49 for x in v: 50 x = _enc(x) 51 result += '%s%s=%s' % (prefix, k, x) 52 prefix = '&' 53 else: 54 v = _enc(v) 55 result += '%s%s=%s' % (prefix, k, v) 56 57 prefix = '&' 58 59 return result 60 61 def _enc(val): 62 cls = val.__class__ 63 if cls is text_type: 64 val = val.encode('utf-8') 65 elif cls is not binary_type: 66 val = str(val).encode('utf-8') 67 return quote_plus(val) 68 69 [end of pyramid/encode.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyramid/encode.py b/pyramid/encode.py --- a/pyramid/encode.py +++ b/pyramid/encode.py @@ -32,6 +32,10 @@ See the Python stdlib documentation for ``urllib.urlencode`` for more information. + + .. versionchanged:: 1.5 + In a key/value pair, if the value is ``None`` then it will be + dropped from the resulting output. """ try: # presumed to be a dictionary @@ -50,6 +54,8 @@ x = _enc(x) result += '%s%s=%s' % (prefix, k, x) prefix = '&' + elif v is None: + result += '%s%s=' % (prefix, k) else: v = _enc(v) result += '%s%s=%s' % (prefix, k, v)
{"golden_diff": "diff --git a/pyramid/encode.py b/pyramid/encode.py\n--- a/pyramid/encode.py\n+++ b/pyramid/encode.py\n@@ -32,6 +32,10 @@\n \n See the Python stdlib documentation for ``urllib.urlencode`` for\n more information.\n+\n+ .. versionchanged:: 1.5\n+ In a key/value pair, if the value is ``None`` then it will be\n+ dropped from the resulting output.\n \"\"\"\n try:\n # presumed to be a dictionary\n@@ -50,6 +54,8 @@\n x = _enc(x)\n result += '%s%s=%s' % (prefix, k, x)\n prefix = '&'\n+ elif v is None:\n+ result += '%s%s=' % (prefix, k)\n else:\n v = _enc(v)\n result += '%s%s=%s' % (prefix, k, v)\n", "issue": "No way to add query parameters without a value\nI occasionally need to put a hint in the query string for a URL, which is essentially a parameter without a value. This can be important to provide information to javascript or as a hint to GA. For example I may need to use `http://localhost/dashboard?new-user` as URL when I redirect a new user to the dashboard after completing registration.\n\nIntuitively I expected this to work:\n\n``` python\nreturn HTTPFound(request.route_url('dashboard', _query={'new-user': None}))\n```\n\nbut that returns `/dashboard?new-user=None` which is not very pretty.\n\n", "before_files": [{"content": "from pyramid.compat import (\n text_type,\n binary_type,\n is_nonstr_iter,\n url_quote as _url_quote,\n url_quote_plus as quote_plus, # bw compat api (dnr)\n )\n\ndef url_quote(s, safe=''): # bw compat api\n return _url_quote(s, safe=safe)\n\ndef urlencode(query, doseq=True):\n \"\"\"\n An alternate implementation of Python's stdlib `urllib.urlencode\n function <http://docs.python.org/library/urllib.html>`_ which\n accepts unicode keys and values within the ``query``\n dict/sequence; all Unicode keys and values are first converted to\n UTF-8 before being used to compose the query string.\n\n The value of ``query`` must be a sequence of two-tuples\n representing key/value pairs *or* an object (often a dictionary)\n with an ``.items()`` method that returns a sequence of two-tuples\n representing key/value pairs.\n\n For minimal calling convention backwards compatibility, this\n version of urlencode accepts *but ignores* a second argument\n conventionally named ``doseq``. The Python stdlib version behaves\n differently when ``doseq`` is False and when a sequence is\n presented as one of the values. This version always behaves in\n the ``doseq=True`` mode, no matter what the value of the second\n argument.\n\n See the Python stdlib documentation for ``urllib.urlencode`` for\n more information.\n \"\"\"\n try:\n # presumed to be a dictionary\n query = query.items()\n except AttributeError:\n pass\n\n result = ''\n prefix = ''\n\n for (k, v) in query:\n k = _enc(k)\n\n if is_nonstr_iter(v):\n for x in v:\n x = _enc(x)\n result += '%s%s=%s' % (prefix, k, x)\n prefix = '&'\n else:\n v = _enc(v)\n result += '%s%s=%s' % (prefix, k, v)\n\n prefix = '&'\n\n return result\n\ndef _enc(val):\n cls = val.__class__\n if cls is text_type:\n val = val.encode('utf-8')\n elif cls is not binary_type:\n val = str(val).encode('utf-8')\n return quote_plus(val)\n\n", "path": "pyramid/encode.py"}]}
1,307
209
gh_patches_debug_25471
rasdani/github-patches
git_diff
StackStorm__st2-5383
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Trigger name collision workaround This addresses the jinja trigger name collision noted in issue #4641 </issue> <code> [start of contrib/core/actions/inject_trigger.py] 1 # Copyright 2020 The StackStorm Authors. 2 # Copyright 2019 Extreme Networks, Inc. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 from __future__ import absolute_import 17 18 from st2common.runners.base_action import Action 19 20 __all__ = ["InjectTriggerAction"] 21 22 23 class InjectTriggerAction(Action): 24 def run(self, trigger, payload=None, trace_tag=None): 25 payload = payload or {} 26 27 datastore_service = self.action_service.datastore_service 28 client = datastore_service.get_api_client() 29 30 # Dispatch the trigger using the /webhooks/st2 API endpoint 31 # NOTE: Webhooks API endpoint is asynchronous so we don't know if the actual injection 32 # results in a TriggerInstanceDB database object creation or not. The object is created 33 # inside rulesengine service and could fail due to the user providing an invalid trigger 34 # reference or similar. 35 self.logger.debug( 36 'Injecting trigger "%s" with payload="%s"' % (trigger, str(payload)) 37 ) 38 result = client.webhooks.post_generic_webhook( 39 trigger=trigger, payload=payload, trace_tag=trace_tag 40 ) 41 42 return result 43 [end of contrib/core/actions/inject_trigger.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/contrib/core/actions/inject_trigger.py b/contrib/core/actions/inject_trigger.py --- a/contrib/core/actions/inject_trigger.py +++ b/contrib/core/actions/inject_trigger.py @@ -21,7 +21,7 @@ class InjectTriggerAction(Action): - def run(self, trigger, payload=None, trace_tag=None): + def run(self, trigger=None, trigger_name=None, payload=None, trace_tag=None): payload = payload or {} datastore_service = self.action_service.datastore_service @@ -32,6 +32,18 @@ # results in a TriggerInstanceDB database object creation or not. The object is created # inside rulesengine service and could fail due to the user providing an invalid trigger # reference or similar. + + # Raise an error if both trigger and trigger_name are specified + if trigger and trigger_name: + raise ValueError( + "Parameters `trigger` and `trigger_name` are mutually exclusive." + ) + + # Raise an error if neither trigger nor trigger_name are specified + if not trigger and not trigger_name: + raise ValueError("You must include the `trigger_name` parameter.") + + trigger = trigger if trigger else trigger_name self.logger.debug( 'Injecting trigger "%s" with payload="%s"' % (trigger, str(payload)) )
{"golden_diff": "diff --git a/contrib/core/actions/inject_trigger.py b/contrib/core/actions/inject_trigger.py\n--- a/contrib/core/actions/inject_trigger.py\n+++ b/contrib/core/actions/inject_trigger.py\n@@ -21,7 +21,7 @@\n \n \n class InjectTriggerAction(Action):\n- def run(self, trigger, payload=None, trace_tag=None):\n+ def run(self, trigger=None, trigger_name=None, payload=None, trace_tag=None):\n payload = payload or {}\n \n datastore_service = self.action_service.datastore_service\n@@ -32,6 +32,18 @@\n # results in a TriggerInstanceDB database object creation or not. The object is created\n # inside rulesengine service and could fail due to the user providing an invalid trigger\n # reference or similar.\n+\n+ # Raise an error if both trigger and trigger_name are specified\n+ if trigger and trigger_name:\n+ raise ValueError(\n+ \"Parameters `trigger` and `trigger_name` are mutually exclusive.\"\n+ )\n+\n+ # Raise an error if neither trigger nor trigger_name are specified\n+ if not trigger and not trigger_name:\n+ raise ValueError(\"You must include the `trigger_name` parameter.\")\n+\n+ trigger = trigger if trigger else trigger_name\n self.logger.debug(\n 'Injecting trigger \"%s\" with payload=\"%s\"' % (trigger, str(payload))\n )\n", "issue": "Trigger name collision workaround\nThis addresses the jinja trigger name collision noted in issue #4641\n", "before_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom st2common.runners.base_action import Action\n\n__all__ = [\"InjectTriggerAction\"]\n\n\nclass InjectTriggerAction(Action):\n def run(self, trigger, payload=None, trace_tag=None):\n payload = payload or {}\n\n datastore_service = self.action_service.datastore_service\n client = datastore_service.get_api_client()\n\n # Dispatch the trigger using the /webhooks/st2 API endpoint\n # NOTE: Webhooks API endpoint is asynchronous so we don't know if the actual injection\n # results in a TriggerInstanceDB database object creation or not. The object is created\n # inside rulesengine service and could fail due to the user providing an invalid trigger\n # reference or similar.\n self.logger.debug(\n 'Injecting trigger \"%s\" with payload=\"%s\"' % (trigger, str(payload))\n )\n result = client.webhooks.post_generic_webhook(\n trigger=trigger, payload=payload, trace_tag=trace_tag\n )\n\n return result\n", "path": "contrib/core/actions/inject_trigger.py"}]}
1,001
301
gh_patches_debug_40529
rasdani/github-patches
git_diff
nautobot__nautobot-1148
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove Custom Fields from Admin UI ### Proposed Changes Remove custom fields from Admin UI. This should be as simple as deleting a bunch of code from `nautobot/extras/admin.py` that's no longer needed. ### Justification Now that we have custom field management in the regular UI (#735, #997), the admin UI for custom field management is redundant. </issue> <code> [start of nautobot/extras/admin.py] 1 from db_file_storage.form_widgets import DBAdminClearableFileInput 2 from django import forms 3 from django.contrib import admin, messages 4 from django.db import transaction 5 from django.db.models import ProtectedError 6 7 from .models import CustomField, CustomFieldChoice, FileProxy, JobResult 8 9 10 def order_content_types(field): 11 """ 12 Order the list of available ContentTypes by application 13 """ 14 queryset = field.queryset.order_by("app_label", "model") 15 field.choices = [(ct.pk, "{} > {}".format(ct.app_label, ct.name)) for ct in queryset] 16 17 18 # 19 # Custom fields 20 # 21 22 23 class CustomFieldForm(forms.ModelForm): 24 class Meta: 25 model = CustomField 26 exclude = [] 27 widgets = { 28 "default": forms.TextInput(), 29 "validation_regex": forms.Textarea( 30 attrs={ 31 "cols": 80, 32 "rows": 3, 33 } 34 ), 35 } 36 37 def __init__(self, *args, **kwargs): 38 super().__init__(*args, **kwargs) 39 40 order_content_types(self.fields["content_types"]) 41 42 43 class CustomFieldChoiceAdmin(admin.TabularInline): 44 """ 45 Defines the inline formset factory that handles choices for selection type custom fields. 46 The `extra` defines the default number of inline rows that appear in the UI. 47 """ 48 49 model = CustomFieldChoice 50 extra = 5 51 52 53 @admin.register(CustomField) 54 class CustomFieldAdmin(admin.ModelAdmin): 55 """ 56 Define the structure and composition of the custom field form in the admin panel. 57 """ 58 59 actions = None 60 form = CustomFieldForm 61 inlines = [CustomFieldChoiceAdmin] 62 list_display = [ 63 "name", 64 "models", 65 "type", 66 "required", 67 "filter_logic", 68 "default", 69 "weight", 70 "description", 71 ] 72 list_filter = [ 73 "type", 74 "required", 75 "content_types", 76 ] 77 fieldsets = ( 78 ( 79 "Custom Field", 80 { 81 "fields": ( 82 "type", 83 "name", 84 "weight", 85 "label", 86 "description", 87 "required", 88 "default", 89 "filter_logic", 90 ) 91 }, 92 ), 93 ( 94 "Assignment", 95 { 96 "description": "A custom field must be assigned to one or more object types.", 97 "fields": ("content_types",), 98 }, 99 ), 100 ( 101 "Validation Rules", 102 { 103 "fields": ( 104 "validation_minimum", 105 "validation_maximum", 106 "validation_regex", 107 ), 108 "classes": ("monospace",), 109 }, 110 ), 111 ) 112 113 def models(self, obj): 114 return ", ".join([ct.name for ct in obj.content_types.all()]) 115 116 @transaction.atomic 117 def save_formset(self, request, form, formset, change): 118 # TODO(John): revisit this when custom fields are moved out of admin... there is a better way... 119 if formset.model != CustomFieldChoice: 120 return super().save_formset(request, form, formset, change) 121 instances = formset.save(commit=False) 122 for instance in instances: 123 instance.save() 124 formset.save_m2m() 125 for obj in formset.deleted_objects: 126 try: 127 obj.delete() 128 except ProtectedError as e: 129 self.message_user(request, e, level=messages.ERROR) 130 raise e 131 132 133 # 134 # File attachments 135 # 136 137 138 class FileProxyForm(forms.ModelForm): 139 class Meta: 140 model = FileProxy 141 exclude = [] 142 widgets = { 143 "file": DBAdminClearableFileInput, 144 } 145 146 147 @admin.register(FileProxy) 148 class FileProxyAdmin(admin.ModelAdmin): 149 form = FileProxyForm 150 list_display = ["name", "uploaded_at"] 151 list_filter = ["uploaded_at"] 152 153 154 # 155 # Job results (jobs, scripts, reports, Git repository sync, etc.) 156 # 157 158 159 @admin.register(JobResult) 160 class JobResultAdmin(admin.ModelAdmin): 161 list_display = [ 162 "obj_type", 163 "name", 164 "created", 165 "completed", 166 "user", 167 "status", 168 ] 169 fields = [ 170 "obj_type", 171 "name", 172 "created", 173 "completed", 174 "user", 175 "status", 176 "data", 177 "job_id", 178 ] 179 list_filter = [ 180 "status", 181 ] 182 readonly_fields = fields 183 184 def has_add_permission(self, request): 185 return False 186 [end of nautobot/extras/admin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nautobot/extras/admin.py b/nautobot/extras/admin.py --- a/nautobot/extras/admin.py +++ b/nautobot/extras/admin.py @@ -1,10 +1,8 @@ from db_file_storage.form_widgets import DBAdminClearableFileInput from django import forms -from django.contrib import admin, messages -from django.db import transaction -from django.db.models import ProtectedError +from django.contrib import admin -from .models import CustomField, CustomFieldChoice, FileProxy, JobResult +from .models import FileProxy, JobResult def order_content_types(field): @@ -15,121 +13,6 @@ field.choices = [(ct.pk, "{} > {}".format(ct.app_label, ct.name)) for ct in queryset] -# -# Custom fields -# - - -class CustomFieldForm(forms.ModelForm): - class Meta: - model = CustomField - exclude = [] - widgets = { - "default": forms.TextInput(), - "validation_regex": forms.Textarea( - attrs={ - "cols": 80, - "rows": 3, - } - ), - } - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - order_content_types(self.fields["content_types"]) - - -class CustomFieldChoiceAdmin(admin.TabularInline): - """ - Defines the inline formset factory that handles choices for selection type custom fields. - The `extra` defines the default number of inline rows that appear in the UI. - """ - - model = CustomFieldChoice - extra = 5 - - [email protected](CustomField) -class CustomFieldAdmin(admin.ModelAdmin): - """ - Define the structure and composition of the custom field form in the admin panel. - """ - - actions = None - form = CustomFieldForm - inlines = [CustomFieldChoiceAdmin] - list_display = [ - "name", - "models", - "type", - "required", - "filter_logic", - "default", - "weight", - "description", - ] - list_filter = [ - "type", - "required", - "content_types", - ] - fieldsets = ( - ( - "Custom Field", - { - "fields": ( - "type", - "name", - "weight", - "label", - "description", - "required", - "default", - "filter_logic", - ) - }, - ), - ( - "Assignment", - { - "description": "A custom field must be assigned to one or more object types.", - "fields": ("content_types",), - }, - ), - ( - "Validation Rules", - { - "fields": ( - "validation_minimum", - "validation_maximum", - "validation_regex", - ), - "classes": ("monospace",), - }, - ), - ) - - def models(self, obj): - return ", ".join([ct.name for ct in obj.content_types.all()]) - - @transaction.atomic - def save_formset(self, request, form, formset, change): - # TODO(John): revisit this when custom fields are moved out of admin... there is a better way... - if formset.model != CustomFieldChoice: - return super().save_formset(request, form, formset, change) - instances = formset.save(commit=False) - for instance in instances: - instance.save() - formset.save_m2m() - for obj in formset.deleted_objects: - try: - obj.delete() - except ProtectedError as e: - self.message_user(request, e, level=messages.ERROR) - raise e - - # # File attachments #
{"golden_diff": "diff --git a/nautobot/extras/admin.py b/nautobot/extras/admin.py\n--- a/nautobot/extras/admin.py\n+++ b/nautobot/extras/admin.py\n@@ -1,10 +1,8 @@\n from db_file_storage.form_widgets import DBAdminClearableFileInput\n from django import forms\n-from django.contrib import admin, messages\n-from django.db import transaction\n-from django.db.models import ProtectedError\n+from django.contrib import admin\n \n-from .models import CustomField, CustomFieldChoice, FileProxy, JobResult\n+from .models import FileProxy, JobResult\n \n \n def order_content_types(field):\n@@ -15,121 +13,6 @@\n field.choices = [(ct.pk, \"{} > {}\".format(ct.app_label, ct.name)) for ct in queryset]\n \n \n-#\n-# Custom fields\n-#\n-\n-\n-class CustomFieldForm(forms.ModelForm):\n- class Meta:\n- model = CustomField\n- exclude = []\n- widgets = {\n- \"default\": forms.TextInput(),\n- \"validation_regex\": forms.Textarea(\n- attrs={\n- \"cols\": 80,\n- \"rows\": 3,\n- }\n- ),\n- }\n-\n- def __init__(self, *args, **kwargs):\n- super().__init__(*args, **kwargs)\n-\n- order_content_types(self.fields[\"content_types\"])\n-\n-\n-class CustomFieldChoiceAdmin(admin.TabularInline):\n- \"\"\"\n- Defines the inline formset factory that handles choices for selection type custom fields.\n- The `extra` defines the default number of inline rows that appear in the UI.\n- \"\"\"\n-\n- model = CustomFieldChoice\n- extra = 5\n-\n-\[email protected](CustomField)\n-class CustomFieldAdmin(admin.ModelAdmin):\n- \"\"\"\n- Define the structure and composition of the custom field form in the admin panel.\n- \"\"\"\n-\n- actions = None\n- form = CustomFieldForm\n- inlines = [CustomFieldChoiceAdmin]\n- list_display = [\n- \"name\",\n- \"models\",\n- \"type\",\n- \"required\",\n- \"filter_logic\",\n- \"default\",\n- \"weight\",\n- \"description\",\n- ]\n- list_filter = [\n- \"type\",\n- \"required\",\n- \"content_types\",\n- ]\n- fieldsets = (\n- (\n- \"Custom Field\",\n- {\n- \"fields\": (\n- \"type\",\n- \"name\",\n- \"weight\",\n- \"label\",\n- \"description\",\n- \"required\",\n- \"default\",\n- \"filter_logic\",\n- )\n- },\n- ),\n- (\n- \"Assignment\",\n- {\n- \"description\": \"A custom field must be assigned to one or more object types.\",\n- \"fields\": (\"content_types\",),\n- },\n- ),\n- (\n- \"Validation Rules\",\n- {\n- \"fields\": (\n- \"validation_minimum\",\n- \"validation_maximum\",\n- \"validation_regex\",\n- ),\n- \"classes\": (\"monospace\",),\n- },\n- ),\n- )\n-\n- def models(self, obj):\n- return \", \".join([ct.name for ct in obj.content_types.all()])\n-\n- @transaction.atomic\n- def save_formset(self, request, form, formset, change):\n- # TODO(John): revisit this when custom fields are moved out of admin... there is a better way...\n- if formset.model != CustomFieldChoice:\n- return super().save_formset(request, form, formset, change)\n- instances = formset.save(commit=False)\n- for instance in instances:\n- instance.save()\n- formset.save_m2m()\n- for obj in formset.deleted_objects:\n- try:\n- obj.delete()\n- except ProtectedError as e:\n- self.message_user(request, e, level=messages.ERROR)\n- raise e\n-\n-\n #\n # File attachments\n #\n", "issue": "Remove Custom Fields from Admin UI\n### Proposed Changes\r\n\r\nRemove custom fields from Admin UI. This should be as simple as deleting a bunch of code from `nautobot/extras/admin.py` that's no longer needed.\r\n\r\n### Justification\r\n\r\nNow that we have custom field management in the regular UI (#735, #997), the admin UI for custom field management is redundant.\n", "before_files": [{"content": "from db_file_storage.form_widgets import DBAdminClearableFileInput\nfrom django import forms\nfrom django.contrib import admin, messages\nfrom django.db import transaction\nfrom django.db.models import ProtectedError\n\nfrom .models import CustomField, CustomFieldChoice, FileProxy, JobResult\n\n\ndef order_content_types(field):\n \"\"\"\n Order the list of available ContentTypes by application\n \"\"\"\n queryset = field.queryset.order_by(\"app_label\", \"model\")\n field.choices = [(ct.pk, \"{} > {}\".format(ct.app_label, ct.name)) for ct in queryset]\n\n\n#\n# Custom fields\n#\n\n\nclass CustomFieldForm(forms.ModelForm):\n class Meta:\n model = CustomField\n exclude = []\n widgets = {\n \"default\": forms.TextInput(),\n \"validation_regex\": forms.Textarea(\n attrs={\n \"cols\": 80,\n \"rows\": 3,\n }\n ),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n order_content_types(self.fields[\"content_types\"])\n\n\nclass CustomFieldChoiceAdmin(admin.TabularInline):\n \"\"\"\n Defines the inline formset factory that handles choices for selection type custom fields.\n The `extra` defines the default number of inline rows that appear in the UI.\n \"\"\"\n\n model = CustomFieldChoice\n extra = 5\n\n\[email protected](CustomField)\nclass CustomFieldAdmin(admin.ModelAdmin):\n \"\"\"\n Define the structure and composition of the custom field form in the admin panel.\n \"\"\"\n\n actions = None\n form = CustomFieldForm\n inlines = [CustomFieldChoiceAdmin]\n list_display = [\n \"name\",\n \"models\",\n \"type\",\n \"required\",\n \"filter_logic\",\n \"default\",\n \"weight\",\n \"description\",\n ]\n list_filter = [\n \"type\",\n \"required\",\n \"content_types\",\n ]\n fieldsets = (\n (\n \"Custom Field\",\n {\n \"fields\": (\n \"type\",\n \"name\",\n \"weight\",\n \"label\",\n \"description\",\n \"required\",\n \"default\",\n \"filter_logic\",\n )\n },\n ),\n (\n \"Assignment\",\n {\n \"description\": \"A custom field must be assigned to one or more object types.\",\n \"fields\": (\"content_types\",),\n },\n ),\n (\n \"Validation Rules\",\n {\n \"fields\": (\n \"validation_minimum\",\n \"validation_maximum\",\n \"validation_regex\",\n ),\n \"classes\": (\"monospace\",),\n },\n ),\n )\n\n def models(self, obj):\n return \", \".join([ct.name for ct in obj.content_types.all()])\n\n @transaction.atomic\n def save_formset(self, request, form, formset, change):\n # TODO(John): revisit this when custom fields are moved out of admin... there is a better way...\n if formset.model != CustomFieldChoice:\n return super().save_formset(request, form, formset, change)\n instances = formset.save(commit=False)\n for instance in instances:\n instance.save()\n formset.save_m2m()\n for obj in formset.deleted_objects:\n try:\n obj.delete()\n except ProtectedError as e:\n self.message_user(request, e, level=messages.ERROR)\n raise e\n\n\n#\n# File attachments\n#\n\n\nclass FileProxyForm(forms.ModelForm):\n class Meta:\n model = FileProxy\n exclude = []\n widgets = {\n \"file\": DBAdminClearableFileInput,\n }\n\n\[email protected](FileProxy)\nclass FileProxyAdmin(admin.ModelAdmin):\n form = FileProxyForm\n list_display = [\"name\", \"uploaded_at\"]\n list_filter = [\"uploaded_at\"]\n\n\n#\n# Job results (jobs, scripts, reports, Git repository sync, etc.)\n#\n\n\[email protected](JobResult)\nclass JobResultAdmin(admin.ModelAdmin):\n list_display = [\n \"obj_type\",\n \"name\",\n \"created\",\n \"completed\",\n \"user\",\n \"status\",\n ]\n fields = [\n \"obj_type\",\n \"name\",\n \"created\",\n \"completed\",\n \"user\",\n \"status\",\n \"data\",\n \"job_id\",\n ]\n list_filter = [\n \"status\",\n ]\n readonly_fields = fields\n\n def has_add_permission(self, request):\n return False\n", "path": "nautobot/extras/admin.py"}]}
2,016
876
gh_patches_debug_14993
rasdani/github-patches
git_diff
PrefectHQ__prefect-1583
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add example code block to `switch` docstring I recently realized I hadn't touched the `switch` code in a long time, and I would've really appreciated an example to work off of. Instead, I ended up looking at our tests which most users won't want to do. Relevant doc: https://docs.prefect.io/api/unreleased/tasks/control_flow.html#prefect-tasks-control-flow-conditional-switch </issue> <code> [start of src/prefect/tasks/control_flow/conditional.py] 1 from typing import Any, Dict 2 3 import prefect 4 from prefect import Task 5 from prefect.engine import signals 6 from prefect.engine.result import NoResult 7 8 __all__ = ["switch", "ifelse"] 9 10 11 class Merge(Task): 12 def __init__(self, **kwargs) -> None: 13 if kwargs.setdefault("skip_on_upstream_skip", False): 14 raise ValueError("Merge tasks must have `skip_on_upstream_skip=False`.") 15 super().__init__(**kwargs) 16 17 def run(self, **task_results: Any) -> Any: 18 return next((v for v in task_results.values() if v != NoResult), None) 19 20 21 class CompareValue(Task): 22 """ 23 This task stores a `value` at initialization and compares it to a `value` received at runtime. 24 If the values don't match, it raises a SKIP exception. 25 26 Args: 27 - value (Any): the value this task will attempt to match when it runs 28 - **kwargs: keyword arguments for the Task 29 """ 30 31 def __init__(self, value: Any, **kwargs: Any): 32 self.value = value 33 kwargs.setdefault("name", 'CompareValue: "{}"'.format(value)) 34 super().__init__(**kwargs) 35 36 def run(self, value: Any) -> None: 37 """ 38 Raises a SKIP signal if the passed value does not match the task's match value; 39 succeeds silently otherwise. 40 41 Args: 42 - value (Any): the value that will be matched against the task's value. 43 """ 44 if value != self.value: 45 raise signals.SKIP( 46 'Provided value "{}" did not match "{}"'.format(value, self.value) 47 ) 48 49 50 def switch(condition: Task, cases: Dict[Any, Task]) -> None: 51 """ 52 Adds a SWITCH to a workflow. 53 54 The condition task is evaluated and the result is compared to the keys of the cases 55 dictionary. The task corresponding to the matching key is run; all other tasks are 56 skipped. Any tasks downstream of the skipped tasks are also skipped unless they set 57 `skip_on_upstream_skip=False`. 58 59 Args: 60 - condition (Task): a task whose result forms the condition for the switch 61 - cases (Dict[Any, Task]): a dict representing the "case" statements of the switch. 62 The value of the `condition` task will be compared to the keys of this dict, and 63 the matching task will be executed. 64 65 Raises: 66 - PrefectWarning: if any of the tasks in "cases" have upstream dependencies, 67 then this task will warn that those upstream tasks may run whether or not the switch condition matches their branch. The most common cause of this 68 is passing a list of tasks as one of the cases, which adds the `List` task 69 to the switch condition but leaves the tasks themselves upstream. 70 """ 71 72 with prefect.tags("switch"): 73 for value, task in cases.items(): 74 task = prefect.utilities.tasks.as_task(task) 75 match_condition = CompareValue(value=value).bind(value=condition) 76 task.set_dependencies(upstream_tasks=[match_condition]) 77 78 79 def ifelse(condition: Task, true_task: Task, false_task: Task) -> None: 80 """ 81 Builds a conditional branch into a workflow. 82 83 If the condition evaluates True(ish), the true_task will run. If it 84 evaluates False(ish), the false_task will run. The task doesn't run is Skipped, as are 85 all downstream tasks that don't set `skip_on_upstream_skip=False`. 86 87 Args: 88 - condition (Task): a task whose boolean result forms the condition for the ifelse 89 - true_task (Task): a task that will be executed if the condition is True 90 - false_task (Task): a task that will be executed if the condition is False 91 """ 92 93 switch(condition=condition, cases={True: true_task, False: false_task}) 94 95 96 def merge(*tasks: Task) -> Task: 97 """ 98 Merges conditional branches back together. 99 100 A conditional branch in a flow results in one or more tasks proceeding and one or 101 more tasks skipping. It is often convenient to merge those branches back into a 102 single result. This function is a simple way to achieve that goal. 103 104 The merge will return the first real result it encounters, or `None`. If multiple 105 tasks might return a result, group them with a list. 106 107 Example: 108 ```python 109 with Flow("My Flow"): 110 true_branch = ActionIfTrue() 111 false_branch = ActionIfFalse() 112 ifelse(CheckCondition(), true_branch, false_branch) 113 114 merged_result = merge(true_branch, false_branch) 115 ``` 116 117 Args: 118 - *tasks (Task): tasks whose results should be merged into a single result. The tasks are 119 assumed to all sit downstream of different `switch` branches, such that only 120 one of them will contain a result and the others will all be skipped. 121 122 Returns: 123 - Task: a Task representing the merged result. 124 125 """ 126 return Merge().bind(**{"task_{}".format(i + 1): t for i, t in enumerate(tasks)}) 127 [end of src/prefect/tasks/control_flow/conditional.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/prefect/tasks/control_flow/conditional.py b/src/prefect/tasks/control_flow/conditional.py --- a/src/prefect/tasks/control_flow/conditional.py +++ b/src/prefect/tasks/control_flow/conditional.py @@ -56,6 +56,24 @@ skipped. Any tasks downstream of the skipped tasks are also skipped unless they set `skip_on_upstream_skip=False`. + Example: + ```python + @task + def condition(): + return "b" # returning 'b' will take the b_branch + + @task + def a_branch(): + return "A Branch" + + @task + def b_branch(): + return "B Branch" + + with Flow("switch-flow") as flow: + switch(condition, dict(a=a_branch, b=b_branch)) + ``` + Args: - condition (Task): a task whose result forms the condition for the switch - cases (Dict[Any, Task]): a dict representing the "case" statements of the switch.
{"golden_diff": "diff --git a/src/prefect/tasks/control_flow/conditional.py b/src/prefect/tasks/control_flow/conditional.py\n--- a/src/prefect/tasks/control_flow/conditional.py\n+++ b/src/prefect/tasks/control_flow/conditional.py\n@@ -56,6 +56,24 @@\n skipped. Any tasks downstream of the skipped tasks are also skipped unless they set\n `skip_on_upstream_skip=False`.\n \n+ Example:\n+ ```python\n+ @task\n+ def condition():\n+ return \"b\" # returning 'b' will take the b_branch\n+\n+ @task\n+ def a_branch():\n+ return \"A Branch\"\n+\n+ @task\n+ def b_branch():\n+ return \"B Branch\"\n+\n+ with Flow(\"switch-flow\") as flow:\n+ switch(condition, dict(a=a_branch, b=b_branch))\n+ ```\n+\n Args:\n - condition (Task): a task whose result forms the condition for the switch\n - cases (Dict[Any, Task]): a dict representing the \"case\" statements of the switch.\n", "issue": "Add example code block to `switch` docstring\nI recently realized I hadn't touched the `switch` code in a long time, and I would've really appreciated an example to work off of. Instead, I ended up looking at our tests which most users won't want to do. Relevant doc: https://docs.prefect.io/api/unreleased/tasks/control_flow.html#prefect-tasks-control-flow-conditional-switch\n", "before_files": [{"content": "from typing import Any, Dict\n\nimport prefect\nfrom prefect import Task\nfrom prefect.engine import signals\nfrom prefect.engine.result import NoResult\n\n__all__ = [\"switch\", \"ifelse\"]\n\n\nclass Merge(Task):\n def __init__(self, **kwargs) -> None:\n if kwargs.setdefault(\"skip_on_upstream_skip\", False):\n raise ValueError(\"Merge tasks must have `skip_on_upstream_skip=False`.\")\n super().__init__(**kwargs)\n\n def run(self, **task_results: Any) -> Any:\n return next((v for v in task_results.values() if v != NoResult), None)\n\n\nclass CompareValue(Task):\n \"\"\"\n This task stores a `value` at initialization and compares it to a `value` received at runtime.\n If the values don't match, it raises a SKIP exception.\n\n Args:\n - value (Any): the value this task will attempt to match when it runs\n - **kwargs: keyword arguments for the Task\n \"\"\"\n\n def __init__(self, value: Any, **kwargs: Any):\n self.value = value\n kwargs.setdefault(\"name\", 'CompareValue: \"{}\"'.format(value))\n super().__init__(**kwargs)\n\n def run(self, value: Any) -> None:\n \"\"\"\n Raises a SKIP signal if the passed value does not match the task's match value;\n succeeds silently otherwise.\n\n Args:\n - value (Any): the value that will be matched against the task's value.\n \"\"\"\n if value != self.value:\n raise signals.SKIP(\n 'Provided value \"{}\" did not match \"{}\"'.format(value, self.value)\n )\n\n\ndef switch(condition: Task, cases: Dict[Any, Task]) -> None:\n \"\"\"\n Adds a SWITCH to a workflow.\n\n The condition task is evaluated and the result is compared to the keys of the cases\n dictionary. The task corresponding to the matching key is run; all other tasks are\n skipped. Any tasks downstream of the skipped tasks are also skipped unless they set\n `skip_on_upstream_skip=False`.\n\n Args:\n - condition (Task): a task whose result forms the condition for the switch\n - cases (Dict[Any, Task]): a dict representing the \"case\" statements of the switch.\n The value of the `condition` task will be compared to the keys of this dict, and\n the matching task will be executed.\n\n Raises:\n - PrefectWarning: if any of the tasks in \"cases\" have upstream dependencies,\n then this task will warn that those upstream tasks may run whether or not the switch condition matches their branch. The most common cause of this\n is passing a list of tasks as one of the cases, which adds the `List` task\n to the switch condition but leaves the tasks themselves upstream.\n \"\"\"\n\n with prefect.tags(\"switch\"):\n for value, task in cases.items():\n task = prefect.utilities.tasks.as_task(task)\n match_condition = CompareValue(value=value).bind(value=condition)\n task.set_dependencies(upstream_tasks=[match_condition])\n\n\ndef ifelse(condition: Task, true_task: Task, false_task: Task) -> None:\n \"\"\"\n Builds a conditional branch into a workflow.\n\n If the condition evaluates True(ish), the true_task will run. If it\n evaluates False(ish), the false_task will run. The task doesn't run is Skipped, as are\n all downstream tasks that don't set `skip_on_upstream_skip=False`.\n\n Args:\n - condition (Task): a task whose boolean result forms the condition for the ifelse\n - true_task (Task): a task that will be executed if the condition is True\n - false_task (Task): a task that will be executed if the condition is False\n \"\"\"\n\n switch(condition=condition, cases={True: true_task, False: false_task})\n\n\ndef merge(*tasks: Task) -> Task:\n \"\"\"\n Merges conditional branches back together.\n\n A conditional branch in a flow results in one or more tasks proceeding and one or\n more tasks skipping. It is often convenient to merge those branches back into a\n single result. This function is a simple way to achieve that goal.\n\n The merge will return the first real result it encounters, or `None`. If multiple\n tasks might return a result, group them with a list.\n\n Example:\n ```python\n with Flow(\"My Flow\"):\n true_branch = ActionIfTrue()\n false_branch = ActionIfFalse()\n ifelse(CheckCondition(), true_branch, false_branch)\n\n merged_result = merge(true_branch, false_branch)\n ```\n\n Args:\n - *tasks (Task): tasks whose results should be merged into a single result. The tasks are\n assumed to all sit downstream of different `switch` branches, such that only\n one of them will contain a result and the others will all be skipped.\n\n Returns:\n - Task: a Task representing the merged result.\n\n \"\"\"\n return Merge().bind(**{\"task_{}\".format(i + 1): t for i, t in enumerate(tasks)})\n", "path": "src/prefect/tasks/control_flow/conditional.py"}]}
2,006
239
gh_patches_debug_30157
rasdani/github-patches
git_diff
xonsh__xonsh-3796
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bad documentation or bug: _.rtn does not work [In the Documentation](https://xon.sh/bash_to_xsh.html) you write that `_.rtn` is the equivalent of the shell `$?` and that it `Returns the exit code, or status, of the previous command.`. Either I understand the documentation wrong or there is a bug: ``` #!/usr/bin/env xonsh echo "abc" print(_.rtn) ``` Outputs ``` abc Traceback (most recent call last): File "/home/volker/.local/bin/xonsh", line 8, in <module> sys.exit(main()) File "/home/volker/.local/lib/python3.8/site-packages/xonsh/main.py", line 426, in main _failback_to_other_shells(args, err) File "/home/volker/.local/lib/python3.8/site-packages/xonsh/main.py", line 373, in _failback_to_other_shells raise err File "/home/volker/.local/lib/python3.8/site-packages/xonsh/main.py", line 424, in main sys.exit(main_xonsh(args)) File "/home/volker/.local/lib/python3.8/site-packages/xonsh/main.py", line 471, in main_xonsh run_script_with_cache( File "/home/volker/.local/lib/python3.8/site-packages/xonsh/codecache.py", line 162, in run_script_with_cache run_compiled_code(ccode, glb, loc, mode) File "/home/volker/.local/lib/python3.8/site-packages/xonsh/codecache.py", line 67, in run_compiled_code func(code, glb, loc) File "./generateIso.xonsh", line 24, in <module> print(_.rtn) NameError: name '_' is not defined ``` </issue> <code> [start of xontrib/bashisms.py] 1 """Bash-like interface extensions for xonsh.""" 2 import shlex 3 import sys 4 import re 5 import builtins 6 7 8 __all__ = () 9 10 11 @events.on_transform_command 12 def bash_preproc(cmd, **kw): 13 bang_previous = { 14 "!": lambda x: x, 15 "$": lambda x: shlex.split(x)[-1], 16 "^": lambda x: shlex.split(x)[0], 17 "*": lambda x: " ".join(shlex.split(x)[1:]), 18 } 19 20 def replace_bang(m): 21 arg = m.group(1) 22 inputs = __xonsh__.history.inps 23 24 # Dissect the previous command. 25 if arg in bang_previous: 26 try: 27 return bang_previous[arg](inputs[-1]) 28 except IndexError: 29 print("xonsh: no history for '!{}'".format(arg)) 30 return "" 31 32 # Look back in history for a matching command. 33 else: 34 try: 35 return next((x for x in reversed(inputs) if x.startswith(arg))) 36 except StopIteration: 37 print("xonsh: no previous commands match '!{}'".format(arg)) 38 return "" 39 40 return re.sub(r"!([!$^*]|[\w]+)", replace_bang, cmd) 41 42 43 def alias(args, stdin=None): 44 ret = 0 45 46 if args: 47 for arg in args: 48 if "=" in arg: 49 # shlex.split to remove quotes, e.g. "foo='echo hey'" into 50 # "foo=echo hey" 51 name, cmd = shlex.split(arg)[0].split("=", 1) 52 aliases[name] = shlex.split(cmd) 53 elif arg in aliases: 54 print("{}={}".format(arg, aliases[arg])) 55 else: 56 print("alias: {}: not found".format(arg), file=sys.stderr) 57 ret = 1 58 else: 59 for alias, cmd in aliases.items(): 60 print("{}={}".format(alias, cmd)) 61 62 return ret 63 64 65 aliases["alias"] = alias 66 builtins.__xonsh__.env["THREAD_SUBPROCS"] = False 67 [end of xontrib/bashisms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/xontrib/bashisms.py b/xontrib/bashisms.py --- a/xontrib/bashisms.py +++ b/xontrib/bashisms.py @@ -64,3 +64,86 @@ aliases["alias"] = alias builtins.__xonsh__.env["THREAD_SUBPROCS"] = False + + +def _unset(args): + if not args: + print("Usage: unset ENV_VARIABLE", file=sys.stderr) + + for v in args: + try: + __xonsh__.env.pop(v) + except KeyError: + print(f"{v} not found", file=sys.stderr) + + +aliases["unset"] = _unset + + +def _export(args): + if not args: + print("Usage: export ENV_VARIABLE=VALUE", file=sys.stderr) + + for eq in args: + if "=" in eq: + name, val = shlex.split(eq)[0].split("=", 1) + __xonsh__.env[name] = val + else: + print(f"{eq} equal sign not found", file=sys.stderr) + + +aliases["export"] = _export + + +def _set(args): + arg = args[0] + if arg == "-e": + __xonsh__.env["RAISE_SUBPROC_ERROR"] = True + elif arg == "+e": + __xonsh__.env["RAISE_SUBPROC_ERROR"] = False + elif arg == "-x": + __xonsh__.env["XONSH_TRACE_SUBPROC"] = True + elif arg == "+x": + __xonsh__.env["XONSH_TRACE_SUBPROC"] = False + else: + print( + "Not supported in xontrib bashisms.\nPRs are welcome - https://github.com/xonsh/xonsh/blob/master/xontrib/bashisms.py", + file=sys.stderr, + ) + + +aliases["set"] = _set + + +def _shopt(args): + + supported_shopt = ["DOTGLOB"] + + args_len = len(args) + if args_len == 0: + for so in supported_shopt: + onoff = "on" if so in __xonsh__.env and __xonsh__.env[so] else "off" + print(f"dotglob\t{onoff}") + return + elif args_len < 2 or args[0] in ["-h", "--help"]: + print(f'Usage: shopt <-s|-u> <{"|".join(supported_shopt).lower()}>') + return + + opt = args[0] + optname = args[1] + + if opt == "-s" and optname == "dotglob": + __xonsh__.env["DOTGLOB"] = True + elif opt == "-u" and optname == "dotglob": + __xonsh__.env["DOTGLOB"] = False + else: + print( + "Not supported in xontrib bashisms.\nPRs are welcome - https://github.com/xonsh/xonsh/blob/master/xontrib/bashisms.py", + file=sys.stderr, + ) + + +aliases["shopt"] = _shopt + + +aliases["complete"] = "completer list"
{"golden_diff": "diff --git a/xontrib/bashisms.py b/xontrib/bashisms.py\n--- a/xontrib/bashisms.py\n+++ b/xontrib/bashisms.py\n@@ -64,3 +64,86 @@\n \n aliases[\"alias\"] = alias\n builtins.__xonsh__.env[\"THREAD_SUBPROCS\"] = False\n+\n+\n+def _unset(args):\n+ if not args:\n+ print(\"Usage: unset ENV_VARIABLE\", file=sys.stderr)\n+\n+ for v in args:\n+ try:\n+ __xonsh__.env.pop(v)\n+ except KeyError:\n+ print(f\"{v} not found\", file=sys.stderr)\n+\n+\n+aliases[\"unset\"] = _unset\n+\n+\n+def _export(args):\n+ if not args:\n+ print(\"Usage: export ENV_VARIABLE=VALUE\", file=sys.stderr)\n+\n+ for eq in args:\n+ if \"=\" in eq:\n+ name, val = shlex.split(eq)[0].split(\"=\", 1)\n+ __xonsh__.env[name] = val\n+ else:\n+ print(f\"{eq} equal sign not found\", file=sys.stderr)\n+\n+\n+aliases[\"export\"] = _export\n+\n+\n+def _set(args):\n+ arg = args[0]\n+ if arg == \"-e\":\n+ __xonsh__.env[\"RAISE_SUBPROC_ERROR\"] = True\n+ elif arg == \"+e\":\n+ __xonsh__.env[\"RAISE_SUBPROC_ERROR\"] = False\n+ elif arg == \"-x\":\n+ __xonsh__.env[\"XONSH_TRACE_SUBPROC\"] = True\n+ elif arg == \"+x\":\n+ __xonsh__.env[\"XONSH_TRACE_SUBPROC\"] = False\n+ else:\n+ print(\n+ \"Not supported in xontrib bashisms.\\nPRs are welcome - https://github.com/xonsh/xonsh/blob/master/xontrib/bashisms.py\",\n+ file=sys.stderr,\n+ )\n+\n+\n+aliases[\"set\"] = _set\n+\n+\n+def _shopt(args):\n+\n+ supported_shopt = [\"DOTGLOB\"]\n+\n+ args_len = len(args)\n+ if args_len == 0:\n+ for so in supported_shopt:\n+ onoff = \"on\" if so in __xonsh__.env and __xonsh__.env[so] else \"off\"\n+ print(f\"dotglob\\t{onoff}\")\n+ return\n+ elif args_len < 2 or args[0] in [\"-h\", \"--help\"]:\n+ print(f'Usage: shopt <-s|-u> <{\"|\".join(supported_shopt).lower()}>')\n+ return\n+\n+ opt = args[0]\n+ optname = args[1]\n+\n+ if opt == \"-s\" and optname == \"dotglob\":\n+ __xonsh__.env[\"DOTGLOB\"] = True\n+ elif opt == \"-u\" and optname == \"dotglob\":\n+ __xonsh__.env[\"DOTGLOB\"] = False\n+ else:\n+ print(\n+ \"Not supported in xontrib bashisms.\\nPRs are welcome - https://github.com/xonsh/xonsh/blob/master/xontrib/bashisms.py\",\n+ file=sys.stderr,\n+ )\n+\n+\n+aliases[\"shopt\"] = _shopt\n+\n+\n+aliases[\"complete\"] = \"completer list\"\n", "issue": "Bad documentation or bug: _.rtn does not work\n[In the Documentation](https://xon.sh/bash_to_xsh.html) you write that `_.rtn` is the equivalent of the shell `$?` and that it `Returns the exit code, or status, of the previous command.`. Either I understand the documentation wrong or there is a bug:\r\n```\r\n#!/usr/bin/env xonsh\r\necho \"abc\"\r\nprint(_.rtn)\r\n```\r\nOutputs\r\n```\r\nabc\r\nTraceback (most recent call last):\r\n File \"/home/volker/.local/bin/xonsh\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/home/volker/.local/lib/python3.8/site-packages/xonsh/main.py\", line 426, in main\r\n _failback_to_other_shells(args, err)\r\n File \"/home/volker/.local/lib/python3.8/site-packages/xonsh/main.py\", line 373, in _failback_to_other_shells\r\n raise err\r\n File \"/home/volker/.local/lib/python3.8/site-packages/xonsh/main.py\", line 424, in main\r\n sys.exit(main_xonsh(args))\r\n File \"/home/volker/.local/lib/python3.8/site-packages/xonsh/main.py\", line 471, in main_xonsh\r\n run_script_with_cache(\r\n File \"/home/volker/.local/lib/python3.8/site-packages/xonsh/codecache.py\", line 162, in run_script_with_cache\r\n run_compiled_code(ccode, glb, loc, mode)\r\n File \"/home/volker/.local/lib/python3.8/site-packages/xonsh/codecache.py\", line 67, in run_compiled_code\r\n func(code, glb, loc)\r\n File \"./generateIso.xonsh\", line 24, in <module>\r\n print(_.rtn)\r\nNameError: name '_' is not defined\r\n```\n", "before_files": [{"content": "\"\"\"Bash-like interface extensions for xonsh.\"\"\"\nimport shlex\nimport sys\nimport re\nimport builtins\n\n\n__all__ = ()\n\n\[email protected]_transform_command\ndef bash_preproc(cmd, **kw):\n bang_previous = {\n \"!\": lambda x: x,\n \"$\": lambda x: shlex.split(x)[-1],\n \"^\": lambda x: shlex.split(x)[0],\n \"*\": lambda x: \" \".join(shlex.split(x)[1:]),\n }\n\n def replace_bang(m):\n arg = m.group(1)\n inputs = __xonsh__.history.inps\n\n # Dissect the previous command.\n if arg in bang_previous:\n try:\n return bang_previous[arg](inputs[-1])\n except IndexError:\n print(\"xonsh: no history for '!{}'\".format(arg))\n return \"\"\n\n # Look back in history for a matching command.\n else:\n try:\n return next((x for x in reversed(inputs) if x.startswith(arg)))\n except StopIteration:\n print(\"xonsh: no previous commands match '!{}'\".format(arg))\n return \"\"\n\n return re.sub(r\"!([!$^*]|[\\w]+)\", replace_bang, cmd)\n\n\ndef alias(args, stdin=None):\n ret = 0\n\n if args:\n for arg in args:\n if \"=\" in arg:\n # shlex.split to remove quotes, e.g. \"foo='echo hey'\" into\n # \"foo=echo hey\"\n name, cmd = shlex.split(arg)[0].split(\"=\", 1)\n aliases[name] = shlex.split(cmd)\n elif arg in aliases:\n print(\"{}={}\".format(arg, aliases[arg]))\n else:\n print(\"alias: {}: not found\".format(arg), file=sys.stderr)\n ret = 1\n else:\n for alias, cmd in aliases.items():\n print(\"{}={}\".format(alias, cmd))\n\n return ret\n\n\naliases[\"alias\"] = alias\nbuiltins.__xonsh__.env[\"THREAD_SUBPROCS\"] = False\n", "path": "xontrib/bashisms.py"}]}
1,542
752
gh_patches_debug_30656
rasdani/github-patches
git_diff
rucio__rucio-2150
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Test reaper console script Motivation ---------- The reaper console script `rucio-reaper` is not tested in the testsuite. Modification ------------ - Add test for the reaper console script. - Install the environnement with `python setup.py develop` in the docker env to have the generated console scripts available in the docker. - Extend the reaper argparse method and the reaper tests to validate the argparse main method and console script. </issue> <code> [start of lib/rucio/clis/daemons/reaper/reaper.py] 1 # Copyright 2012-2018 CERN for the benefit of the ATLAS collaboration. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # 15 # Authors: 16 # - Vincent Garonne, <[email protected]>, 2012-2018 17 # - Wen Guan, <[email protected]>, 2014 18 # - Hannes Hansen, <[email protected]>, 2018 19 20 """ 21 Reaper is a daemon to manage file deletion 22 """ 23 24 import argparse 25 import signal 26 27 from rucio.daemons.reaper.reaper import run, stop 28 29 30 def get_parser(): 31 """ 32 Returns the argparse parser. 33 """ 34 parser = argparse.ArgumentParser(description="The Reaper daemon is responsible for replica deletion. It deletes them by checking if there are replicas that are not locked and have a tombstone to indicate that they can be deleted.", epilog=''' 35 Upload a file and prepare the rules and replicas for deletion by using the judge-cleaner daemon:: 36 37 $ rucio upload --rse MOCK --scope mock --name file filename.txt 38 $ rucio add-rule mock:file 1 MOCK2 --lifetime 1 39 $ rucio-judge-cleaner --run-once 40 41 Check if the replica was created:: 42 43 $ rucio list-file-replica mock:file 44 +---------+--------+------------+-----------+---------------------------------------------------------+ 45 | SCOPE | NAME | FILESIZE | ADLER32 | RSE: REPLICA | 46 |---------+--------+------------+-----------+---------------------------------------------------------| 47 | mock | file | 1.542 kB | 1268ee71 | MOCK: file://localhost:0/tmp/rucio_rse/mock/15/58/file | 48 +---------+--------+------------+-----------+---------------------------------------------------------+ 49 50 Run the daemon:: 51 52 $ rucio-reaper --run-once 53 54 Check if the replica exists:: 55 56 $ rucio list-file-replica mock:file 57 +---------+--------+------------+-----------+---------------------------------------------------------+ 58 | SCOPE | NAME | FILESIZE | ADLER32 | RSE: REPLICA | 59 |---------+--------+------------+-----------+---------------------------------------------------------| 60 +---------+--------+------------+-----------+---------------------------------------------------------+ 61 ''') 62 parser.add_argument("--run-once", action="store_true", default=False, help='One iteration only') 63 parser.add_argument("--total-workers", action="store", default=1, type=int, help='Total number of workers per process') 64 parser.add_argument("--threads-per-worker", action="store", default=None, type=int, help='Total number of threads created by each worker') 65 parser.add_argument("--chunk-size", action="store", default=10, type=int, help='Chunk size') 66 parser.add_argument("--scheme", action="store", default=None, type=str, help='Force the reaper to use a particular protocol, e.g., mock.') 67 parser.add_argument('--greedy', action='store_true', default=False, help='Greedy mode') 68 parser.add_argument('--exclude-rses', action="store", default=None, type=str, help='RSEs expression to exclude RSEs') 69 parser.add_argument('--include-rses', action="store", default=None, type=str, help='RSEs expression to include RSEs') 70 parser.add_argument('--rses', nargs='+', type=str, help='List of RSEs') 71 parser.add_argument('--delay-seconds', action="store", default=3600, type=int, help='Delay to retry failed deletion') 72 return parser 73 74 75 def main(): 76 77 signal.signal(signal.SIGTERM, stop) 78 parser = get_parser() 79 args = parser.parse_args() 80 try: 81 run(total_workers=args.total_workers, chunk_size=args.chunk_size, greedy=args.greedy, 82 once=args.run_once, scheme=args.scheme, rses=args.rses, threads_per_worker=args.threads_per_worker, 83 exclude_rses=args.exclude_rses, include_rses=args.include_rses, delay_seconds=args.delay_seconds) 84 except KeyboardInterrupt: 85 stop() 86 [end of lib/rucio/clis/daemons/reaper/reaper.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/rucio/clis/daemons/reaper/reaper.py b/lib/rucio/clis/daemons/reaper/reaper.py --- a/lib/rucio/clis/daemons/reaper/reaper.py +++ b/lib/rucio/clis/daemons/reaper/reaper.py @@ -1,4 +1,4 @@ -# Copyright 2012-2018 CERN for the benefit of the ATLAS collaboration. +# Copyright 2012-2019 CERN for the benefit of the ATLAS collaboration. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # limitations under the License. # # Authors: -# - Vincent Garonne, <[email protected]>, 2012-2018 +# - Vincent Garonne, <[email protected]>, 2012-2019 # - Wen Guan, <[email protected]>, 2014 # - Hannes Hansen, <[email protected]>, 2018 @@ -23,6 +23,7 @@ import argparse import signal +import sys from rucio.daemons.reaper.reaper import run, stop @@ -72,11 +73,19 @@ return parser -def main(): +def main(argv=None): + """ + The main reaper method called by the command. + :param argv: Command-line arguments. Default to sys.argv if not set. + """ signal.signal(signal.SIGTERM, stop) + + if argv is None: + argv = sys.argv[1:] + parser = get_parser() - args = parser.parse_args() + args = parser.parse_args(argv) try: run(total_workers=args.total_workers, chunk_size=args.chunk_size, greedy=args.greedy, once=args.run_once, scheme=args.scheme, rses=args.rses, threads_per_worker=args.threads_per_worker,
{"golden_diff": "diff --git a/lib/rucio/clis/daemons/reaper/reaper.py b/lib/rucio/clis/daemons/reaper/reaper.py\n--- a/lib/rucio/clis/daemons/reaper/reaper.py\n+++ b/lib/rucio/clis/daemons/reaper/reaper.py\n@@ -1,4 +1,4 @@\n-# Copyright 2012-2018 CERN for the benefit of the ATLAS collaboration.\n+# Copyright 2012-2019 CERN for the benefit of the ATLAS collaboration.\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n@@ -13,7 +13,7 @@\n # limitations under the License.\n #\n # Authors:\n-# - Vincent Garonne, <[email protected]>, 2012-2018\n+# - Vincent Garonne, <[email protected]>, 2012-2019\n # - Wen Guan, <[email protected]>, 2014\n # - Hannes Hansen, <[email protected]>, 2018\n \n@@ -23,6 +23,7 @@\n \n import argparse\n import signal\n+import sys\n \n from rucio.daemons.reaper.reaper import run, stop\n \n@@ -72,11 +73,19 @@\n return parser\n \n \n-def main():\n+def main(argv=None):\n+ \"\"\"\n+ The main reaper method called by the command.\n \n+ :param argv: Command-line arguments. Default to sys.argv if not set.\n+ \"\"\"\n signal.signal(signal.SIGTERM, stop)\n+\n+ if argv is None:\n+ argv = sys.argv[1:]\n+\n parser = get_parser()\n- args = parser.parse_args()\n+ args = parser.parse_args(argv)\n try:\n run(total_workers=args.total_workers, chunk_size=args.chunk_size, greedy=args.greedy,\n once=args.run_once, scheme=args.scheme, rses=args.rses, threads_per_worker=args.threads_per_worker,\n", "issue": "Test reaper console script\nMotivation\r\n----------\r\n\r\nThe reaper console script `rucio-reaper` is not tested in the testsuite.\r\n\r\nModification\r\n------------\r\n- Add test for the reaper console script.\r\n- Install the environnement with `python setup.py develop` in the docker env to have the generated console scripts available in the docker.\r\n- Extend the reaper argparse method and the reaper tests to validate the argparse main method and console script.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2012-2018 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Vincent Garonne, <[email protected]>, 2012-2018\n# - Wen Guan, <[email protected]>, 2014\n# - Hannes Hansen, <[email protected]>, 2018\n\n\"\"\"\nReaper is a daemon to manage file deletion\n\"\"\"\n\nimport argparse\nimport signal\n\nfrom rucio.daemons.reaper.reaper import run, stop\n\n\ndef get_parser():\n \"\"\"\n Returns the argparse parser.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"The Reaper daemon is responsible for replica deletion. It deletes them by checking if there are replicas that are not locked and have a tombstone to indicate that they can be deleted.\", epilog='''\nUpload a file and prepare the rules and replicas for deletion by using the judge-cleaner daemon::\n\n $ rucio upload --rse MOCK --scope mock --name file filename.txt\n $ rucio add-rule mock:file 1 MOCK2 --lifetime 1\n $ rucio-judge-cleaner --run-once\n\nCheck if the replica was created::\n\n $ rucio list-file-replica mock:file\n +---------+--------+------------+-----------+---------------------------------------------------------+\n | SCOPE | NAME | FILESIZE | ADLER32 | RSE: REPLICA |\n |---------+--------+------------+-----------+---------------------------------------------------------|\n | mock | file | 1.542 kB | 1268ee71 | MOCK: file://localhost:0/tmp/rucio_rse/mock/15/58/file |\n +---------+--------+------------+-----------+---------------------------------------------------------+\n\nRun the daemon::\n\n $ rucio-reaper --run-once\n\nCheck if the replica exists::\n\n $ rucio list-file-replica mock:file\n +---------+--------+------------+-----------+---------------------------------------------------------+\n | SCOPE | NAME | FILESIZE | ADLER32 | RSE: REPLICA |\n |---------+--------+------------+-----------+---------------------------------------------------------|\n +---------+--------+------------+-----------+---------------------------------------------------------+\n ''')\n parser.add_argument(\"--run-once\", action=\"store_true\", default=False, help='One iteration only')\n parser.add_argument(\"--total-workers\", action=\"store\", default=1, type=int, help='Total number of workers per process')\n parser.add_argument(\"--threads-per-worker\", action=\"store\", default=None, type=int, help='Total number of threads created by each worker')\n parser.add_argument(\"--chunk-size\", action=\"store\", default=10, type=int, help='Chunk size')\n parser.add_argument(\"--scheme\", action=\"store\", default=None, type=str, help='Force the reaper to use a particular protocol, e.g., mock.')\n parser.add_argument('--greedy', action='store_true', default=False, help='Greedy mode')\n parser.add_argument('--exclude-rses', action=\"store\", default=None, type=str, help='RSEs expression to exclude RSEs')\n parser.add_argument('--include-rses', action=\"store\", default=None, type=str, help='RSEs expression to include RSEs')\n parser.add_argument('--rses', nargs='+', type=str, help='List of RSEs')\n parser.add_argument('--delay-seconds', action=\"store\", default=3600, type=int, help='Delay to retry failed deletion')\n return parser\n\n\ndef main():\n\n signal.signal(signal.SIGTERM, stop)\n parser = get_parser()\n args = parser.parse_args()\n try:\n run(total_workers=args.total_workers, chunk_size=args.chunk_size, greedy=args.greedy,\n once=args.run_once, scheme=args.scheme, rses=args.rses, threads_per_worker=args.threads_per_worker,\n exclude_rses=args.exclude_rses, include_rses=args.include_rses, delay_seconds=args.delay_seconds)\n except KeyboardInterrupt:\n stop()\n", "path": "lib/rucio/clis/daemons/reaper/reaper.py"}]}
1,832
477
gh_patches_debug_57588
rasdani/github-patches
git_diff
joke2k__faker-1043
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BBAN for en_GB too short * Faker version: v2.0.3 * OS: linux Numeric part of the en_GB BBAN needs to be 14 digits long, it currently only returns 13, failing further validation. ### Steps to reproduce Invoke `fake.iban()` or `fake.bban()` with the en_GB locale, an IBAN or BBAN with 1 digit missing is returned. ### Expected behavior GB ibans should be 22 chars long: https://www.xe.com/ibancalculator/sample/?ibancountry=united kingdom </issue> <code> [start of faker/providers/bank/en_GB/__init__.py] 1 from .. import Provider as BankProvider 2 3 4 class Provider(BankProvider): 5 bban_format = '????#############' 6 country_code = 'GB' 7 [end of faker/providers/bank/en_GB/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/faker/providers/bank/en_GB/__init__.py b/faker/providers/bank/en_GB/__init__.py --- a/faker/providers/bank/en_GB/__init__.py +++ b/faker/providers/bank/en_GB/__init__.py @@ -2,5 +2,5 @@ class Provider(BankProvider): - bban_format = '????#############' + bban_format = '????##############' country_code = 'GB'
{"golden_diff": "diff --git a/faker/providers/bank/en_GB/__init__.py b/faker/providers/bank/en_GB/__init__.py\n--- a/faker/providers/bank/en_GB/__init__.py\n+++ b/faker/providers/bank/en_GB/__init__.py\n@@ -2,5 +2,5 @@\n \n \n class Provider(BankProvider):\n- bban_format = '????#############'\n+ bban_format = '????##############'\n country_code = 'GB'\n", "issue": "BBAN for en_GB too short\n* Faker version: v2.0.3\r\n* OS: linux\r\n\r\nNumeric part of the en_GB BBAN needs to be 14 digits long, it currently only returns 13, failing further validation.\r\n\r\n### Steps to reproduce\r\n\r\nInvoke `fake.iban()` or `fake.bban()` with the en_GB locale, an IBAN or BBAN with 1 digit missing is returned.\r\n\r\n### Expected behavior\r\n\r\nGB ibans should be 22 chars long: https://www.xe.com/ibancalculator/sample/?ibancountry=united kingdom\r\n\r\n\n", "before_files": [{"content": "from .. import Provider as BankProvider\n\n\nclass Provider(BankProvider):\n bban_format = '????#############'\n country_code = 'GB'\n", "path": "faker/providers/bank/en_GB/__init__.py"}]}
714
103
gh_patches_debug_60487
rasdani/github-patches
git_diff
mars-project__mars-284
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Fuse operand's sparse value is wrong <!-- Thank you for your contribution! Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue. --> **Describe the bug** A fuse operand's sparseness should be the same as tail node's, it is not set correctly now. **To Reproduce** ``` Python In [1]: import scipy.sparse as sps In [2]: import mars.tensor as mt In [3]: data = sps.rand(10, 10, density=0.05) In [4]: a = mt.tensor(data, chunk_size=3) In [5]: b = (a * 2) * 2 In [6]: g = b.build_graph(tiled=True, compose=True) In [7]: list(g)[0].op.sparse Out[7]: False In [8]: list(g)[0].op Out[8]: <mars.tensor.expressions.fuse.core.TensorFuseChunk at 0xa208b7048> In [9]: list(g)[0].composed[-1].op.sparse Out[9]: True ``` </issue> <code> [start of mars/tensor/expressions/fuse/core.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # Copyright 1999-2018 Alibaba Group Holding Ltd. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 from .... import operands 18 from ....tiles import NotSupportTile 19 from ..core import TensorOperandMixin 20 21 22 class TensorFuseChunk(operands.Fuse, TensorOperandMixin): 23 def __init__(self, dtype=None, **kw): 24 super(TensorFuseChunk, self).__init__(_dtype=dtype, **kw) 25 26 def calc_shape(self, *inputs_shape): 27 in_shapes = inputs_shape 28 out_shape = None 29 30 # TODO: the logic will be changed when fusion is not only straight line 31 for c in self.outputs[0].composed: 32 out_shape = c.op.calc_shape(*in_shapes) 33 in_shapes = [out_shape] 34 return out_shape 35 36 @classmethod 37 def tile(cls, op): 38 raise NotSupportTile('TensorFuseChunk is a chunk operand which does not support tile') 39 40 41 class TensorFuseChunkMixin(TensorOperandMixin): 42 __slots__ = () 43 44 @classmethod 45 def tile(cls, op): 46 raise NotSupportTile('TensorFuseChunk is a chunk operand which does not support tile') 47 48 def __call__(self, fuse_chunks): 49 head_chunk = fuse_chunks[0] 50 tail_chunk = fuse_chunks[-1] 51 setattr(self, '_operands', [c.op for c in fuse_chunks]) 52 return self.new_chunk(head_chunk.inputs, tail_chunk.shape, 53 _composed=fuse_chunks, _key=tail_chunk.key) 54 [end of mars/tensor/expressions/fuse/core.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mars/tensor/expressions/fuse/core.py b/mars/tensor/expressions/fuse/core.py --- a/mars/tensor/expressions/fuse/core.py +++ b/mars/tensor/expressions/fuse/core.py @@ -20,8 +20,8 @@ class TensorFuseChunk(operands.Fuse, TensorOperandMixin): - def __init__(self, dtype=None, **kw): - super(TensorFuseChunk, self).__init__(_dtype=dtype, **kw) + def __init__(self, dtype=None, sparse=False, **kw): + super(TensorFuseChunk, self).__init__(_dtype=dtype, _sparse=sparse, **kw) def calc_shape(self, *inputs_shape): in_shapes = inputs_shape
{"golden_diff": "diff --git a/mars/tensor/expressions/fuse/core.py b/mars/tensor/expressions/fuse/core.py\n--- a/mars/tensor/expressions/fuse/core.py\n+++ b/mars/tensor/expressions/fuse/core.py\n@@ -20,8 +20,8 @@\n \n \n class TensorFuseChunk(operands.Fuse, TensorOperandMixin):\n- def __init__(self, dtype=None, **kw):\n- super(TensorFuseChunk, self).__init__(_dtype=dtype, **kw)\n+ def __init__(self, dtype=None, sparse=False, **kw):\n+ super(TensorFuseChunk, self).__init__(_dtype=dtype, _sparse=sparse, **kw)\n \n def calc_shape(self, *inputs_shape):\n in_shapes = inputs_shape\n", "issue": "[BUG] Fuse operand's sparse value is wrong\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\nA fuse operand's sparseness should be the same as tail node's, it is not set correctly now.\r\n\r\n**To Reproduce**\r\n``` Python\r\nIn [1]: import scipy.sparse as sps \r\n\r\nIn [2]: import mars.tensor as mt \r\n\r\nIn [3]: data = sps.rand(10, 10, density=0.05) \r\n\r\nIn [4]: a = mt.tensor(data, chunk_size=3) \r\n\r\nIn [5]: b = (a * 2) * 2 \r\n\r\nIn [6]: g = b.build_graph(tiled=True, compose=True) \r\n\r\nIn [7]: list(g)[0].op.sparse \r\nOut[7]: False\r\n\r\nIn [8]: list(g)[0].op \r\nOut[8]: <mars.tensor.expressions.fuse.core.TensorFuseChunk at 0xa208b7048>\r\n\r\nIn [9]: list(g)[0].composed[-1].op.sparse \r\nOut[9]: True\r\n```\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .... import operands\nfrom ....tiles import NotSupportTile\nfrom ..core import TensorOperandMixin\n\n\nclass TensorFuseChunk(operands.Fuse, TensorOperandMixin):\n def __init__(self, dtype=None, **kw):\n super(TensorFuseChunk, self).__init__(_dtype=dtype, **kw)\n\n def calc_shape(self, *inputs_shape):\n in_shapes = inputs_shape\n out_shape = None\n\n # TODO: the logic will be changed when fusion is not only straight line\n for c in self.outputs[0].composed:\n out_shape = c.op.calc_shape(*in_shapes)\n in_shapes = [out_shape]\n return out_shape\n\n @classmethod\n def tile(cls, op):\n raise NotSupportTile('TensorFuseChunk is a chunk operand which does not support tile')\n\n\nclass TensorFuseChunkMixin(TensorOperandMixin):\n __slots__ = ()\n\n @classmethod\n def tile(cls, op):\n raise NotSupportTile('TensorFuseChunk is a chunk operand which does not support tile')\n\n def __call__(self, fuse_chunks):\n head_chunk = fuse_chunks[0]\n tail_chunk = fuse_chunks[-1]\n setattr(self, '_operands', [c.op for c in fuse_chunks])\n return self.new_chunk(head_chunk.inputs, tail_chunk.shape,\n _composed=fuse_chunks, _key=tail_chunk.key)\n", "path": "mars/tensor/expressions/fuse/core.py"}]}
1,374
177
gh_patches_debug_16846
rasdani/github-patches
git_diff
electricitymaps__electricitymaps-contrib-1691
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Import intensity could fallback on yearly averages when missing/unknown When a country, or area, is importing electricity from another country and the exporting country's production sources are unknown, it seems as if the intensity of the imported electricity is set to be equal to the intensity of the importing country. But this is hardly meaningful. Would it be possible to set the unknown intensity of imported electricity to an average or mean value from a historical period? E.g. the last month or the same month last year. Or to the last available dataset (depending on how old that is). I can see that it happens quite often for Norway, that "Data [is] temporarily unavailable". The intensity of the electricity exported to Sweden is low, while it is medium high when exported to West Denmark. </issue> <code> [start of utils/config.py] 1 import json 2 import os 3 4 def relative_path(script_reference_path, rel_path): 5 # __file__ should be passed as script_reference_path 6 script_path = os.path.abspath( 7 script_reference_path) # i.e. /path/to/dir/foobar.py 8 script_dir = os.path.split(script_path)[0] # i.e. /path/to/dir/ 9 return os.path.join(script_dir, rel_path) 10 11 12 # Prepare zone bounding boxes 13 ZONE_BOUNDING_BOXES = {} 14 15 # Read parser import list from config jsons 16 ZONES_CONFIG = json.load(open(relative_path( 17 __file__, '../config/zones.json'))) 18 19 # Read all zones 20 for zone_id, zone_config in ZONES_CONFIG.items(): 21 if 'bounding_box' in zone_config: 22 ZONE_BOUNDING_BOXES[zone_id] = zone_config['bounding_box'] 23 24 # Read parser import list from config jsons 25 ZONES_CONFIG = json.load(open(relative_path( 26 __file__, '../config/zones.json'))) 27 EXCHANGES_CONFIG = json.load(open(relative_path( 28 __file__, '../config/exchanges.json'))) 29 ZONE_NEIGHBOURS = {} 30 for k, v in EXCHANGES_CONFIG.items(): 31 zone_names = k.split('->') 32 pairs = [ 33 (zone_names[0], zone_names[1]), 34 (zone_names[1], zone_names[0]) 35 ] 36 for zone_name_1, zone_name_2 in pairs: 37 if zone_name_1 not in ZONE_NEIGHBOURS: 38 ZONE_NEIGHBOURS[zone_name_1] = set() 39 ZONE_NEIGHBOURS[zone_name_1].add(zone_name_2) 40 # we want neighbors to always be in the same order 41 for zone, neighbors in ZONE_NEIGHBOURS.items(): 42 ZONE_NEIGHBOURS[zone] = sorted(neighbors) 43 [end of utils/config.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/utils/config.py b/utils/config.py --- a/utils/config.py +++ b/utils/config.py @@ -40,3 +40,22 @@ # we want neighbors to always be in the same order for zone, neighbors in ZONE_NEIGHBOURS.items(): ZONE_NEIGHBOURS[zone] = sorted(neighbors) + +CO2EQ_PARAMETERS = json.load(open(relative_path( + __file__, '../config/co2eq_parameters.json'))) + +def emission_factors(zone_key): + fallback_carbon_intensity = CO2EQ_PARAMETERS['fallbackZoneMixes'].get(zone_key, {}).get('carbonIntensity'); + override = CO2EQ_PARAMETERS['emissionFactors']['zoneOverrides'].get(zone_key, {}) + defaults = CO2EQ_PARAMETERS['emissionFactors']['defaults'] + merged = {**defaults, **override} + if fallback_carbon_intensity: + merged['battery storage'] = { + 'value': fallback_carbon_intensity, + 'source': 'Annual carbon intensity' + } + merged['hydro storage'] = { + 'value': fallback_carbon_intensity, + 'source': 'Annual carbon intensity' + } + return dict([(k, (v or {}).get('value')) for (k, v) in merged.items()])
{"golden_diff": "diff --git a/utils/config.py b/utils/config.py\n--- a/utils/config.py\n+++ b/utils/config.py\n@@ -40,3 +40,22 @@\n # we want neighbors to always be in the same order\n for zone, neighbors in ZONE_NEIGHBOURS.items():\n ZONE_NEIGHBOURS[zone] = sorted(neighbors)\n+\n+CO2EQ_PARAMETERS = json.load(open(relative_path(\n+ __file__, '../config/co2eq_parameters.json')))\n+\n+def emission_factors(zone_key):\n+ fallback_carbon_intensity = CO2EQ_PARAMETERS['fallbackZoneMixes'].get(zone_key, {}).get('carbonIntensity');\n+ override = CO2EQ_PARAMETERS['emissionFactors']['zoneOverrides'].get(zone_key, {})\n+ defaults = CO2EQ_PARAMETERS['emissionFactors']['defaults']\n+ merged = {**defaults, **override}\n+ if fallback_carbon_intensity:\n+ merged['battery storage'] = {\n+ 'value': fallback_carbon_intensity,\n+ 'source': 'Annual carbon intensity'\n+ }\n+ merged['hydro storage'] = {\n+ 'value': fallback_carbon_intensity,\n+ 'source': 'Annual carbon intensity'\n+ }\n+ return dict([(k, (v or {}).get('value')) for (k, v) in merged.items()])\n", "issue": "Import intensity could fallback on yearly averages when missing/unknown\nWhen a country, or area, is importing electricity from another country and the exporting country's production sources are unknown, it seems as if the intensity of the imported electricity is set to be equal to the intensity of the importing country. But this is hardly meaningful. Would it be possible to set the unknown intensity of imported electricity to an average or mean value from a historical period? E.g. the last month or the same month last year. Or to the last available dataset (depending on how old that is).\r\n\r\nI can see that it happens quite often for Norway, that \"Data [is] temporarily unavailable\". The intensity of the electricity exported to Sweden is low, while it is medium high when exported to West Denmark.\n", "before_files": [{"content": "import json\nimport os\n\ndef relative_path(script_reference_path, rel_path):\n # __file__ should be passed as script_reference_path\n script_path = os.path.abspath(\n script_reference_path) # i.e. /path/to/dir/foobar.py\n script_dir = os.path.split(script_path)[0] # i.e. /path/to/dir/\n return os.path.join(script_dir, rel_path)\n\n\n# Prepare zone bounding boxes\nZONE_BOUNDING_BOXES = {}\n\n# Read parser import list from config jsons\nZONES_CONFIG = json.load(open(relative_path(\n __file__, '../config/zones.json')))\n\n# Read all zones\nfor zone_id, zone_config in ZONES_CONFIG.items():\n if 'bounding_box' in zone_config:\n ZONE_BOUNDING_BOXES[zone_id] = zone_config['bounding_box']\n\n# Read parser import list from config jsons\nZONES_CONFIG = json.load(open(relative_path(\n __file__, '../config/zones.json')))\nEXCHANGES_CONFIG = json.load(open(relative_path(\n __file__, '../config/exchanges.json')))\nZONE_NEIGHBOURS = {}\nfor k, v in EXCHANGES_CONFIG.items():\n zone_names = k.split('->')\n pairs = [\n (zone_names[0], zone_names[1]),\n (zone_names[1], zone_names[0])\n ]\n for zone_name_1, zone_name_2 in pairs:\n if zone_name_1 not in ZONE_NEIGHBOURS:\n ZONE_NEIGHBOURS[zone_name_1] = set()\n ZONE_NEIGHBOURS[zone_name_1].add(zone_name_2)\n# we want neighbors to always be in the same order\nfor zone, neighbors in ZONE_NEIGHBOURS.items():\n ZONE_NEIGHBOURS[zone] = sorted(neighbors)\n", "path": "utils/config.py"}]}
1,168
285
gh_patches_debug_34020
rasdani/github-patches
git_diff
pre-commit__pre-commit-1888
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Path is not mounted correctly when running Docker hooks from Docker **Situation**: - In our CI we want to run `pre-commit` inside Docker. - Some of our hooks are `docker_image` **Problem** This line mostly https://github.com/pre-commit/pre-commit/blob/528c7afd18dafa6e47ce73add2c8e1550d105674/pre_commit/languages/docker.py#L94 Currently `pre-commit` mounts the current directory to `/src` and uses current directory name as mount base. However this does not work when `pre-commit` is run inside the container on some mounted path already, because mount points are relative to the host, not to the container. Example: ``` /opt/my_code <- host, mounts /opt/my_code:/project /project <- in Docker running pre-commit, pre-commit is doing mount /project:/src /src <- (in Dockerized hook) ``` Currently pre-commit will try to mount it as `-v /project:/src,rw,Z`. Expected - to mount it as `-v /opt/my_code:/src` **Possible solution**: When I replaced `os.getcwd()` from the code above to `translate_path(os.getcwd())` where `translate_path` is taken from https://gist.github.com/dpfoose/f96d4e4b76c2e01265619d545b77987a, it worked perfectly. It does add extra `docker` pip-dependency though. **See also**: https://forums.docker.com/t/mounting-a-volume-not-working-with-running-docker-in-docker/25775/2 </issue> <code> [start of pre_commit/languages/docker.py] 1 import hashlib 2 import os 3 from typing import Sequence 4 from typing import Tuple 5 6 import pre_commit.constants as C 7 from pre_commit.hook import Hook 8 from pre_commit.languages import helpers 9 from pre_commit.prefix import Prefix 10 from pre_commit.util import clean_path_on_failure 11 12 ENVIRONMENT_DIR = 'docker' 13 PRE_COMMIT_LABEL = 'PRE_COMMIT' 14 get_default_version = helpers.basic_get_default_version 15 healthy = helpers.basic_healthy 16 17 18 def md5(s: str) -> str: # pragma: win32 no cover 19 return hashlib.md5(s.encode()).hexdigest() 20 21 22 def docker_tag(prefix: Prefix) -> str: # pragma: win32 no cover 23 md5sum = md5(os.path.basename(prefix.prefix_dir)).lower() 24 return f'pre-commit-{md5sum}' 25 26 27 def build_docker_image( 28 prefix: Prefix, 29 *, 30 pull: bool, 31 ) -> None: # pragma: win32 no cover 32 cmd: Tuple[str, ...] = ( 33 'docker', 'build', 34 '--tag', docker_tag(prefix), 35 '--label', PRE_COMMIT_LABEL, 36 ) 37 if pull: 38 cmd += ('--pull',) 39 # This must come last for old versions of docker. See #477 40 cmd += ('.',) 41 helpers.run_setup_cmd(prefix, cmd) 42 43 44 def install_environment( 45 prefix: Prefix, version: str, additional_dependencies: Sequence[str], 46 ) -> None: # pragma: win32 no cover 47 helpers.assert_version_default('docker', version) 48 helpers.assert_no_additional_deps('docker', additional_dependencies) 49 50 directory = prefix.path( 51 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT), 52 ) 53 54 # Docker doesn't really have relevant disk environment, but pre-commit 55 # still needs to cleanup its state files on failure 56 with clean_path_on_failure(directory): 57 build_docker_image(prefix, pull=True) 58 os.mkdir(directory) 59 60 61 def get_docker_user() -> Tuple[str, ...]: # pragma: win32 no cover 62 try: 63 return ('-u', f'{os.getuid()}:{os.getgid()}') 64 except AttributeError: 65 return () 66 67 68 def docker_cmd() -> Tuple[str, ...]: # pragma: win32 no cover 69 return ( 70 'docker', 'run', 71 '--rm', 72 *get_docker_user(), 73 # https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from 74 # The `Z` option tells Docker to label the content with a private 75 # unshared label. Only the current container can use a private volume. 76 '-v', f'{os.getcwd()}:/src:rw,Z', 77 '--workdir', '/src', 78 ) 79 80 81 def run_hook( 82 hook: Hook, 83 file_args: Sequence[str], 84 color: bool, 85 ) -> Tuple[int, bytes]: # pragma: win32 no cover 86 # Rebuild the docker image in case it has gone missing, as many people do 87 # automated cleanup of docker images. 88 build_docker_image(hook.prefix, pull=False) 89 90 entry_exe, *cmd_rest = hook.cmd 91 92 entry_tag = ('--entrypoint', entry_exe, docker_tag(hook.prefix)) 93 cmd = (*docker_cmd(), *entry_tag, *cmd_rest) 94 return helpers.run_xargs(hook, cmd, file_args, color=color) 95 [end of pre_commit/languages/docker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/languages/docker.py b/pre_commit/languages/docker.py --- a/pre_commit/languages/docker.py +++ b/pre_commit/languages/docker.py @@ -1,5 +1,7 @@ import hashlib +import json import os +import socket from typing import Sequence from typing import Tuple @@ -8,6 +10,7 @@ from pre_commit.languages import helpers from pre_commit.prefix import Prefix from pre_commit.util import clean_path_on_failure +from pre_commit.util import cmd_output_b ENVIRONMENT_DIR = 'docker' PRE_COMMIT_LABEL = 'PRE_COMMIT' @@ -15,6 +18,34 @@ healthy = helpers.basic_healthy +def _is_in_docker() -> bool: + try: + with open('/proc/1/cgroup', 'rb') as f: + return b'docker' in f.read() + except FileNotFoundError: + return False + + +def _get_docker_path(path: str) -> str: + if not _is_in_docker(): + return path + hostname = socket.gethostname() + + _, out, _ = cmd_output_b('docker', 'inspect', hostname) + + container, = json.loads(out) + for mount in container['Mounts']: + src_path = mount['Source'] + to_path = mount['Destination'] + if os.path.commonpath((path, to_path)) == to_path: + # So there is something in common, + # and we can proceed remapping it + return path.replace(to_path, src_path) + # we're in Docker, but the path is not mounted, cannot really do anything, + # so fall back to original path + return path + + def md5(s: str) -> str: # pragma: win32 no cover return hashlib.md5(s.encode()).hexdigest() @@ -73,7 +104,7 @@ # https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from # The `Z` option tells Docker to label the content with a private # unshared label. Only the current container can use a private volume. - '-v', f'{os.getcwd()}:/src:rw,Z', + '-v', f'{_get_docker_path(os.getcwd())}:/src:rw,Z', '--workdir', '/src', )
{"golden_diff": "diff --git a/pre_commit/languages/docker.py b/pre_commit/languages/docker.py\n--- a/pre_commit/languages/docker.py\n+++ b/pre_commit/languages/docker.py\n@@ -1,5 +1,7 @@\n import hashlib\n+import json\n import os\n+import socket\n from typing import Sequence\n from typing import Tuple\n \n@@ -8,6 +10,7 @@\n from pre_commit.languages import helpers\n from pre_commit.prefix import Prefix\n from pre_commit.util import clean_path_on_failure\n+from pre_commit.util import cmd_output_b\n \n ENVIRONMENT_DIR = 'docker'\n PRE_COMMIT_LABEL = 'PRE_COMMIT'\n@@ -15,6 +18,34 @@\n healthy = helpers.basic_healthy\n \n \n+def _is_in_docker() -> bool:\n+ try:\n+ with open('/proc/1/cgroup', 'rb') as f:\n+ return b'docker' in f.read()\n+ except FileNotFoundError:\n+ return False\n+\n+\n+def _get_docker_path(path: str) -> str:\n+ if not _is_in_docker():\n+ return path\n+ hostname = socket.gethostname()\n+\n+ _, out, _ = cmd_output_b('docker', 'inspect', hostname)\n+\n+ container, = json.loads(out)\n+ for mount in container['Mounts']:\n+ src_path = mount['Source']\n+ to_path = mount['Destination']\n+ if os.path.commonpath((path, to_path)) == to_path:\n+ # So there is something in common,\n+ # and we can proceed remapping it\n+ return path.replace(to_path, src_path)\n+ # we're in Docker, but the path is not mounted, cannot really do anything,\n+ # so fall back to original path\n+ return path\n+\n+\n def md5(s: str) -> str: # pragma: win32 no cover\n return hashlib.md5(s.encode()).hexdigest()\n \n@@ -73,7 +104,7 @@\n # https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from\n # The `Z` option tells Docker to label the content with a private\n # unshared label. Only the current container can use a private volume.\n- '-v', f'{os.getcwd()}:/src:rw,Z',\n+ '-v', f'{_get_docker_path(os.getcwd())}:/src:rw,Z',\n '--workdir', '/src',\n )\n", "issue": "Path is not mounted correctly when running Docker hooks from Docker\n**Situation**:\r\n\r\n- In our CI we want to run `pre-commit` inside Docker.\r\n- Some of our hooks are `docker_image`\r\n\r\n**Problem**\r\nThis line mostly https://github.com/pre-commit/pre-commit/blob/528c7afd18dafa6e47ce73add2c8e1550d105674/pre_commit/languages/docker.py#L94\r\n\r\nCurrently `pre-commit` mounts the current directory to `/src` and uses current directory name as mount base.\r\nHowever this does not work when `pre-commit` is run inside the container on some mounted path already, because mount points are relative to the host, not to the container.\r\n\r\n Example: \r\n```\r\n/opt/my_code <- host, mounts /opt/my_code:/project\r\n/project <- in Docker running pre-commit, pre-commit is doing mount /project:/src\r\n/src <- (in Dockerized hook)\r\n```\r\n\r\nCurrently pre-commit will try to mount it as `-v /project:/src,rw,Z`. Expected - to mount it as `-v /opt/my_code:/src`\r\n\r\n**Possible solution**:\r\n\r\nWhen I replaced `os.getcwd()` from the code above to `translate_path(os.getcwd())` where `translate_path` is taken from https://gist.github.com/dpfoose/f96d4e4b76c2e01265619d545b77987a, it worked perfectly. It does add extra `docker` pip-dependency though.\r\n\r\n**See also**: https://forums.docker.com/t/mounting-a-volume-not-working-with-running-docker-in-docker/25775/2\n", "before_files": [{"content": "import hashlib\nimport os\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\n\nENVIRONMENT_DIR = 'docker'\nPRE_COMMIT_LABEL = 'PRE_COMMIT'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef md5(s: str) -> str: # pragma: win32 no cover\n return hashlib.md5(s.encode()).hexdigest()\n\n\ndef docker_tag(prefix: Prefix) -> str: # pragma: win32 no cover\n md5sum = md5(os.path.basename(prefix.prefix_dir)).lower()\n return f'pre-commit-{md5sum}'\n\n\ndef build_docker_image(\n prefix: Prefix,\n *,\n pull: bool,\n) -> None: # pragma: win32 no cover\n cmd: Tuple[str, ...] = (\n 'docker', 'build',\n '--tag', docker_tag(prefix),\n '--label', PRE_COMMIT_LABEL,\n )\n if pull:\n cmd += ('--pull',)\n # This must come last for old versions of docker. See #477\n cmd += ('.',)\n helpers.run_setup_cmd(prefix, cmd)\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None: # pragma: win32 no cover\n helpers.assert_version_default('docker', version)\n helpers.assert_no_additional_deps('docker', additional_dependencies)\n\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n # Docker doesn't really have relevant disk environment, but pre-commit\n # still needs to cleanup its state files on failure\n with clean_path_on_failure(directory):\n build_docker_image(prefix, pull=True)\n os.mkdir(directory)\n\n\ndef get_docker_user() -> Tuple[str, ...]: # pragma: win32 no cover\n try:\n return ('-u', f'{os.getuid()}:{os.getgid()}')\n except AttributeError:\n return ()\n\n\ndef docker_cmd() -> Tuple[str, ...]: # pragma: win32 no cover\n return (\n 'docker', 'run',\n '--rm',\n *get_docker_user(),\n # https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from\n # The `Z` option tells Docker to label the content with a private\n # unshared label. Only the current container can use a private volume.\n '-v', f'{os.getcwd()}:/src:rw,Z',\n '--workdir', '/src',\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]: # pragma: win32 no cover\n # Rebuild the docker image in case it has gone missing, as many people do\n # automated cleanup of docker images.\n build_docker_image(hook.prefix, pull=False)\n\n entry_exe, *cmd_rest = hook.cmd\n\n entry_tag = ('--entrypoint', entry_exe, docker_tag(hook.prefix))\n cmd = (*docker_cmd(), *entry_tag, *cmd_rest)\n return helpers.run_xargs(hook, cmd, file_args, color=color)\n", "path": "pre_commit/languages/docker.py"}]}
1,818
534
gh_patches_debug_37126
rasdani/github-patches
git_diff
open-mmlab__mmdetection3d-69
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> iou3d failed when inference with gpu:1 Thanks for your error report and we appreciate it a lot. **Checklist** 1. I have searched related issues but cannot get the expected help. 2. The bug has not been fixed in the latest version. **Describe the bug** Training on single GPU, when using default gpu (gpu:0) , everything is ok. Switch to gpu:1, report `an illegal memory access was encountered mmdet3d/ops/iou3d/src/iou3d.cpp 121` during inference, however training is ok. **Reproduction** 1. What command or script did you run? ``` python tools/train.py CONFIG_PATH --gpu-ids 1 ``` 2. Did you make any modifications on the code or config? Did you understand what you have modified? 3. What dataset did you use? - kitti **Environment** 1. Please run `python mmdet3d/utils/collect_env.py` to collect necessary environment infomation and paste it here. 2. You may add addition that may be helpful for locating the problem, such as - How you installed PyTorch [e.g., pip, conda, source] - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) **Error traceback** If applicable, paste the error trackback here. ``` A placeholder for trackback. ``` **Bug fix** If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated! </issue> <code> [start of mmdet3d/ops/iou3d/iou3d_utils.py] 1 import torch 2 3 from . import iou3d_cuda 4 5 6 def boxes_iou_bev(boxes_a, boxes_b): 7 """ 8 :param boxes_a: (M, 5) 9 :param boxes_b: (N, 5) 10 :return: 11 ans_iou: (M, N) 12 """ 13 14 ans_iou = torch.cuda.FloatTensor( 15 torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() 16 17 iou3d_cuda.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), 18 ans_iou) 19 20 return ans_iou 21 22 23 def nms_gpu(boxes, scores, thresh): 24 """ 25 :param boxes: (N, 5) [x1, y1, x2, y2, ry] 26 :param scores: (N) 27 :param thresh: 28 :return: 29 """ 30 # areas = (x2 - x1) * (y2 - y1) 31 order = scores.sort(0, descending=True)[1] 32 33 boxes = boxes[order].contiguous() 34 35 keep = torch.LongTensor(boxes.size(0)) 36 num_out = iou3d_cuda.nms_gpu(boxes, keep, thresh) 37 return order[keep[:num_out].cuda()].contiguous() 38 39 40 def nms_normal_gpu(boxes, scores, thresh): 41 """ 42 :param boxes: (N, 5) [x1, y1, x2, y2, ry] 43 :param scores: (N) 44 :param thresh: 45 :return: 46 """ 47 # areas = (x2 - x1) * (y2 - y1) 48 order = scores.sort(0, descending=True)[1] 49 50 boxes = boxes[order].contiguous() 51 52 keep = torch.LongTensor(boxes.size(0)) 53 num_out = iou3d_cuda.nms_normal_gpu(boxes, keep, thresh) 54 return order[keep[:num_out].cuda()].contiguous() 55 [end of mmdet3d/ops/iou3d/iou3d_utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mmdet3d/ops/iou3d/iou3d_utils.py b/mmdet3d/ops/iou3d/iou3d_utils.py --- a/mmdet3d/ops/iou3d/iou3d_utils.py +++ b/mmdet3d/ops/iou3d/iou3d_utils.py @@ -4,15 +4,17 @@ def boxes_iou_bev(boxes_a, boxes_b): - """ - :param boxes_a: (M, 5) - :param boxes_b: (N, 5) - :return: - ans_iou: (M, N) - """ + """Calculate boxes IoU in the bird view. - ans_iou = torch.cuda.FloatTensor( - torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() + Args: + boxes_a (torch.Tensor): Input boxes a with shape (M, 5). + boxes_b (torch.Tensor): Input boxes b with shape (N, 5). + + Returns: + ans_iou (torch.Tensor): IoU result with shape (M, N). + """ + ans_iou = boxes_a.new_zeros( + torch.Size((boxes_a.shape[0], boxes_b.shape[0]))) iou3d_cuda.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou) @@ -21,34 +23,41 @@ def nms_gpu(boxes, scores, thresh): + """Non maximum suppression on GPU. + + Args: + boxes (torch.Tensor): Input boxes with shape (N, 5). + scores (torch.Tensor): Scores of predicted boxes with shape (N). + thresh (torch.Tensor): Threshold of non maximum suppression. + + Returns: + torch.Tensor: Remaining indices with scores in descending order. """ - :param boxes: (N, 5) [x1, y1, x2, y2, ry] - :param scores: (N) - :param thresh: - :return: - """ - # areas = (x2 - x1) * (y2 - y1) order = scores.sort(0, descending=True)[1] boxes = boxes[order].contiguous() - keep = torch.LongTensor(boxes.size(0)) - num_out = iou3d_cuda.nms_gpu(boxes, keep, thresh) - return order[keep[:num_out].cuda()].contiguous() + keep = boxes.new_zeros(boxes.size(0)) + num_out = iou3d_cuda.nms_gpu(boxes, keep, thresh, boxes.device.index) + return order[keep[:num_out].cuda(boxes.device)].contiguous() def nms_normal_gpu(boxes, scores, thresh): + """Normal non maximum suppression on GPU. + + Args: + boxes (torch.Tensor): Input boxes with shape (N, 5). + scores (torch.Tensor): Scores of predicted boxes with shape (N). + thresh (torch.Tensor): Threshold of non maximum suppression. + + Returns: + torch.Tensor: Remaining indices with scores in descending order. """ - :param boxes: (N, 5) [x1, y1, x2, y2, ry] - :param scores: (N) - :param thresh: - :return: - """ - # areas = (x2 - x1) * (y2 - y1) order = scores.sort(0, descending=True)[1] boxes = boxes[order].contiguous() - keep = torch.LongTensor(boxes.size(0)) - num_out = iou3d_cuda.nms_normal_gpu(boxes, keep, thresh) - return order[keep[:num_out].cuda()].contiguous() + keep = boxes.new_zeros(boxes.size(0)) + num_out = iou3d_cuda.nms_normal_gpu(boxes, keep, thresh, + boxes.device.index) + return order[keep[:num_out].cuda(boxes.device)].contiguous()
{"golden_diff": "diff --git a/mmdet3d/ops/iou3d/iou3d_utils.py b/mmdet3d/ops/iou3d/iou3d_utils.py\n--- a/mmdet3d/ops/iou3d/iou3d_utils.py\n+++ b/mmdet3d/ops/iou3d/iou3d_utils.py\n@@ -4,15 +4,17 @@\n \n \n def boxes_iou_bev(boxes_a, boxes_b):\n- \"\"\"\n- :param boxes_a: (M, 5)\n- :param boxes_b: (N, 5)\n- :return:\n- ans_iou: (M, N)\n- \"\"\"\n+ \"\"\"Calculate boxes IoU in the bird view.\n \n- ans_iou = torch.cuda.FloatTensor(\n- torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_()\n+ Args:\n+ boxes_a (torch.Tensor): Input boxes a with shape (M, 5).\n+ boxes_b (torch.Tensor): Input boxes b with shape (N, 5).\n+\n+ Returns:\n+ ans_iou (torch.Tensor): IoU result with shape (M, N).\n+ \"\"\"\n+ ans_iou = boxes_a.new_zeros(\n+ torch.Size((boxes_a.shape[0], boxes_b.shape[0])))\n \n iou3d_cuda.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(),\n ans_iou)\n@@ -21,34 +23,41 @@\n \n \n def nms_gpu(boxes, scores, thresh):\n+ \"\"\"Non maximum suppression on GPU.\n+\n+ Args:\n+ boxes (torch.Tensor): Input boxes with shape (N, 5).\n+ scores (torch.Tensor): Scores of predicted boxes with shape (N).\n+ thresh (torch.Tensor): Threshold of non maximum suppression.\n+\n+ Returns:\n+ torch.Tensor: Remaining indices with scores in descending order.\n \"\"\"\n- :param boxes: (N, 5) [x1, y1, x2, y2, ry]\n- :param scores: (N)\n- :param thresh:\n- :return:\n- \"\"\"\n- # areas = (x2 - x1) * (y2 - y1)\n order = scores.sort(0, descending=True)[1]\n \n boxes = boxes[order].contiguous()\n \n- keep = torch.LongTensor(boxes.size(0))\n- num_out = iou3d_cuda.nms_gpu(boxes, keep, thresh)\n- return order[keep[:num_out].cuda()].contiguous()\n+ keep = boxes.new_zeros(boxes.size(0))\n+ num_out = iou3d_cuda.nms_gpu(boxes, keep, thresh, boxes.device.index)\n+ return order[keep[:num_out].cuda(boxes.device)].contiguous()\n \n \n def nms_normal_gpu(boxes, scores, thresh):\n+ \"\"\"Normal non maximum suppression on GPU.\n+\n+ Args:\n+ boxes (torch.Tensor): Input boxes with shape (N, 5).\n+ scores (torch.Tensor): Scores of predicted boxes with shape (N).\n+ thresh (torch.Tensor): Threshold of non maximum suppression.\n+\n+ Returns:\n+ torch.Tensor: Remaining indices with scores in descending order.\n \"\"\"\n- :param boxes: (N, 5) [x1, y1, x2, y2, ry]\n- :param scores: (N)\n- :param thresh:\n- :return:\n- \"\"\"\n- # areas = (x2 - x1) * (y2 - y1)\n order = scores.sort(0, descending=True)[1]\n \n boxes = boxes[order].contiguous()\n \n- keep = torch.LongTensor(boxes.size(0))\n- num_out = iou3d_cuda.nms_normal_gpu(boxes, keep, thresh)\n- return order[keep[:num_out].cuda()].contiguous()\n+ keep = boxes.new_zeros(boxes.size(0))\n+ num_out = iou3d_cuda.nms_normal_gpu(boxes, keep, thresh,\n+ boxes.device.index)\n+ return order[keep[:num_out].cuda(boxes.device)].contiguous()\n", "issue": "iou3d failed when inference with gpu:1\nThanks for your error report and we appreciate it a lot.\r\n\r\n**Checklist**\r\n1. I have searched related issues but cannot get the expected help.\r\n2. The bug has not been fixed in the latest version.\r\n\r\n**Describe the bug**\r\nTraining on single GPU, when using default gpu (gpu:0) , everything is ok. \r\nSwitch to gpu:1, report `an illegal memory access was encountered mmdet3d/ops/iou3d/src/iou3d.cpp 121` during inference, however training is ok.\r\n\r\n**Reproduction**\r\n1. What command or script did you run?\r\n```\r\npython tools/train.py CONFIG_PATH --gpu-ids 1\r\n```\r\n2. Did you make any modifications on the code or config? Did you understand what you have modified?\r\n3. What dataset did you use?\r\n- kitti\r\n\r\n**Environment**\r\n\r\n1. Please run `python mmdet3d/utils/collect_env.py` to collect necessary environment infomation and paste it here.\r\n2. You may add addition that may be helpful for locating the problem, such as\r\n - How you installed PyTorch [e.g., pip, conda, source]\r\n - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)\r\n\r\n**Error traceback**\r\nIf applicable, paste the error trackback here.\r\n```\r\nA placeholder for trackback.\r\n```\r\n\r\n**Bug fix**\r\nIf you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!\r\n\n", "before_files": [{"content": "import torch\n\nfrom . import iou3d_cuda\n\n\ndef boxes_iou_bev(boxes_a, boxes_b):\n \"\"\"\n :param boxes_a: (M, 5)\n :param boxes_b: (N, 5)\n :return:\n ans_iou: (M, N)\n \"\"\"\n\n ans_iou = torch.cuda.FloatTensor(\n torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_()\n\n iou3d_cuda.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(),\n ans_iou)\n\n return ans_iou\n\n\ndef nms_gpu(boxes, scores, thresh):\n \"\"\"\n :param boxes: (N, 5) [x1, y1, x2, y2, ry]\n :param scores: (N)\n :param thresh:\n :return:\n \"\"\"\n # areas = (x2 - x1) * (y2 - y1)\n order = scores.sort(0, descending=True)[1]\n\n boxes = boxes[order].contiguous()\n\n keep = torch.LongTensor(boxes.size(0))\n num_out = iou3d_cuda.nms_gpu(boxes, keep, thresh)\n return order[keep[:num_out].cuda()].contiguous()\n\n\ndef nms_normal_gpu(boxes, scores, thresh):\n \"\"\"\n :param boxes: (N, 5) [x1, y1, x2, y2, ry]\n :param scores: (N)\n :param thresh:\n :return:\n \"\"\"\n # areas = (x2 - x1) * (y2 - y1)\n order = scores.sort(0, descending=True)[1]\n\n boxes = boxes[order].contiguous()\n\n keep = torch.LongTensor(boxes.size(0))\n num_out = iou3d_cuda.nms_normal_gpu(boxes, keep, thresh)\n return order[keep[:num_out].cuda()].contiguous()\n", "path": "mmdet3d/ops/iou3d/iou3d_utils.py"}]}
1,451
925
gh_patches_debug_5988
rasdani/github-patches
git_diff
liqd__a4-meinberlin-4916
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> #6963 Too many codes in 1 package URL: https://meinberlin-dev.liqd.net/dashboard/modules/burgerinnenhaushalt-3-phasen-21/download-codes/ user: admin, initiator expected behaviour: Each code-package should contain a max. of 1.000.000 codes. ~~The wording of the helptext should have also the right number of 1.000.000 codes per package as each package should contain a maximum of 1.000.000 codes per excel-file.~~ behaviour: ~~the number in the wording of the helptext is "10.000.000" and~~ the packages can contain more than 1.000.000 codes. important screensize: - device & browser: mac ff Comment/Question: I tried it with generating two mill codes and the codes were put in only one code-package. I also couldn't download the package probably because it was too big. Linked: https://github.com/liqd/a4-meinberlin/issues/4907 </issue> <code> [start of meinberlin/apps/votes/tasks.py] 1 from background_task import background 2 3 from adhocracy4.modules.models import Module 4 from meinberlin.apps.votes.models import VotingToken 5 from meinberlin.apps.votes.models import get_token_12 6 7 # Number of tokens to insert into database per bulk_create 8 BATCH_SIZE = 1000000 9 # Max number of tokens in one download / package 10 PACKAGE_SIZE = 10000000 11 12 13 def generate_voting_tokens(module_id, number_of_tokens, existing_tokens): 14 module = Module.objects.get(pk=module_id) 15 package_number = VotingToken.next_package_number(module) 16 module_name = module.name 17 project_id = module.project.id 18 project_name = module.project.name 19 20 number_to_generate = number_of_tokens 21 package_number_limit = 0 22 if number_of_tokens > PACKAGE_SIZE: 23 package_number_limit = number_of_tokens - PACKAGE_SIZE 24 while number_to_generate > 0: 25 if number_to_generate >= BATCH_SIZE: 26 generate_voting_tokens_batch( 27 module_id, 28 BATCH_SIZE, 29 package_number, 30 number_of_tokens, 31 module_name, 32 project_id, 33 project_name, 34 existing_tokens, 35 ) 36 number_to_generate = number_to_generate - BATCH_SIZE 37 else: 38 generate_voting_tokens_batch( 39 module_id, 40 number_to_generate, 41 package_number, 42 number_of_tokens, 43 module_name, 44 project_id, 45 project_name, 46 existing_tokens, 47 ) 48 number_to_generate = 0 49 if package_number_limit >= number_to_generate: 50 package_number += 1 51 package_number_limit - PACKAGE_SIZE 52 53 54 @background(schedule=1) 55 def generate_voting_tokens_batch( 56 module_id, 57 batch_size, 58 package_number, 59 number_of_tokens, 60 module_name, 61 project_id, 62 project_name, 63 existing_tokens, 64 ): 65 module = Module.objects.get(pk=module_id) 66 VotingToken.objects.bulk_create( 67 [get_token_and_hash(module, package_number) for i in range(batch_size)] 68 ) 69 70 71 def get_token_and_hash(module, package_number): 72 token = get_token_12() 73 token_hash = VotingToken.hash_token(token, module) 74 return VotingToken( 75 token=token, token_hash=token_hash, module=module, package_number=package_number 76 ) 77 [end of meinberlin/apps/votes/tasks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/votes/tasks.py b/meinberlin/apps/votes/tasks.py --- a/meinberlin/apps/votes/tasks.py +++ b/meinberlin/apps/votes/tasks.py @@ -5,9 +5,9 @@ from meinberlin.apps.votes.models import get_token_12 # Number of tokens to insert into database per bulk_create -BATCH_SIZE = 1000000 +BATCH_SIZE = 100000 # Max number of tokens in one download / package -PACKAGE_SIZE = 10000000 +PACKAGE_SIZE = 1000000 def generate_voting_tokens(module_id, number_of_tokens, existing_tokens):
{"golden_diff": "diff --git a/meinberlin/apps/votes/tasks.py b/meinberlin/apps/votes/tasks.py\n--- a/meinberlin/apps/votes/tasks.py\n+++ b/meinberlin/apps/votes/tasks.py\n@@ -5,9 +5,9 @@\n from meinberlin.apps.votes.models import get_token_12\n \n # Number of tokens to insert into database per bulk_create\n-BATCH_SIZE = 1000000\n+BATCH_SIZE = 100000\n # Max number of tokens in one download / package\n-PACKAGE_SIZE = 10000000\n+PACKAGE_SIZE = 1000000\n \n \n def generate_voting_tokens(module_id, number_of_tokens, existing_tokens):\n", "issue": "#6963 Too many codes in 1 package\nURL: https://meinberlin-dev.liqd.net/dashboard/modules/burgerinnenhaushalt-3-phasen-21/download-codes/\r\nuser: admin, initiator\r\nexpected behaviour: Each code-package should contain a max. of 1.000.000 codes. ~~The wording of the helptext should have also the right number of 1.000.000 codes per package as each package should contain a maximum of 1.000.000 codes per excel-file.~~\r\nbehaviour: ~~the number in the wording of the helptext is \"10.000.000\" and~~ the packages can contain more than 1.000.000 codes.\r\nimportant screensize: -\r\ndevice & browser: mac ff\r\nComment/Question: I tried it with generating two mill codes and the codes were put in only one code-package. I also couldn't download the package probably because it was too big.\r\n\r\nLinked: https://github.com/liqd/a4-meinberlin/issues/4907\r\n\n", "before_files": [{"content": "from background_task import background\n\nfrom adhocracy4.modules.models import Module\nfrom meinberlin.apps.votes.models import VotingToken\nfrom meinberlin.apps.votes.models import get_token_12\n\n# Number of tokens to insert into database per bulk_create\nBATCH_SIZE = 1000000\n# Max number of tokens in one download / package\nPACKAGE_SIZE = 10000000\n\n\ndef generate_voting_tokens(module_id, number_of_tokens, existing_tokens):\n module = Module.objects.get(pk=module_id)\n package_number = VotingToken.next_package_number(module)\n module_name = module.name\n project_id = module.project.id\n project_name = module.project.name\n\n number_to_generate = number_of_tokens\n package_number_limit = 0\n if number_of_tokens > PACKAGE_SIZE:\n package_number_limit = number_of_tokens - PACKAGE_SIZE\n while number_to_generate > 0:\n if number_to_generate >= BATCH_SIZE:\n generate_voting_tokens_batch(\n module_id,\n BATCH_SIZE,\n package_number,\n number_of_tokens,\n module_name,\n project_id,\n project_name,\n existing_tokens,\n )\n number_to_generate = number_to_generate - BATCH_SIZE\n else:\n generate_voting_tokens_batch(\n module_id,\n number_to_generate,\n package_number,\n number_of_tokens,\n module_name,\n project_id,\n project_name,\n existing_tokens,\n )\n number_to_generate = 0\n if package_number_limit >= number_to_generate:\n package_number += 1\n package_number_limit - PACKAGE_SIZE\n\n\n@background(schedule=1)\ndef generate_voting_tokens_batch(\n module_id,\n batch_size,\n package_number,\n number_of_tokens,\n module_name,\n project_id,\n project_name,\n existing_tokens,\n):\n module = Module.objects.get(pk=module_id)\n VotingToken.objects.bulk_create(\n [get_token_and_hash(module, package_number) for i in range(batch_size)]\n )\n\n\ndef get_token_and_hash(module, package_number):\n token = get_token_12()\n token_hash = VotingToken.hash_token(token, module)\n return VotingToken(\n token=token, token_hash=token_hash, module=module, package_number=package_number\n )\n", "path": "meinberlin/apps/votes/tasks.py"}]}
1,427
166
gh_patches_debug_17265
rasdani/github-patches
git_diff
netbox-community__netbox-2694
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add "White" as a cable color ### Environment * Python version: 3.6 * NetBox version: 2.5.1 ### Proposed Functionality Add color white to the cable colors. Optionally add: * ~~slate~~(Dark Grey works, almost identical color) * rose * ~~violet~~ (Fuschia works, almost identical color) * aqua ### Use Case These fiber strand colors are missing ### Database Changes None ### External Dependencies None </issue> <code> [start of netbox/utilities/constants.py] 1 COLOR_CHOICES = ( 2 ('aa1409', 'Dark red'), 3 ('f44336', 'Red'), 4 ('e91e63', 'Pink'), 5 ('ff66ff', 'Fuschia'), 6 ('9c27b0', 'Purple'), 7 ('673ab7', 'Dark purple'), 8 ('3f51b5', 'Indigo'), 9 ('2196f3', 'Blue'), 10 ('03a9f4', 'Light blue'), 11 ('00bcd4', 'Cyan'), 12 ('009688', 'Teal'), 13 ('2f6a31', 'Dark green'), 14 ('4caf50', 'Green'), 15 ('8bc34a', 'Light green'), 16 ('cddc39', 'Lime'), 17 ('ffeb3b', 'Yellow'), 18 ('ffc107', 'Amber'), 19 ('ff9800', 'Orange'), 20 ('ff5722', 'Dark orange'), 21 ('795548', 'Brown'), 22 ('c0c0c0', 'Light grey'), 23 ('9e9e9e', 'Grey'), 24 ('607d8b', 'Dark grey'), 25 ('111111', 'Black'), 26 ) 27 [end of netbox/utilities/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/netbox/utilities/constants.py b/netbox/utilities/constants.py --- a/netbox/utilities/constants.py +++ b/netbox/utilities/constants.py @@ -2,6 +2,7 @@ ('aa1409', 'Dark red'), ('f44336', 'Red'), ('e91e63', 'Pink'), + ('ffe4e1', 'Rose'), ('ff66ff', 'Fuschia'), ('9c27b0', 'Purple'), ('673ab7', 'Dark purple'), @@ -10,6 +11,7 @@ ('03a9f4', 'Light blue'), ('00bcd4', 'Cyan'), ('009688', 'Teal'), + ('00ffff', 'Aqua'), ('2f6a31', 'Dark green'), ('4caf50', 'Green'), ('8bc34a', 'Light green'), @@ -23,4 +25,5 @@ ('9e9e9e', 'Grey'), ('607d8b', 'Dark grey'), ('111111', 'Black'), + ('ffffff', 'White'), )
{"golden_diff": "diff --git a/netbox/utilities/constants.py b/netbox/utilities/constants.py\n--- a/netbox/utilities/constants.py\n+++ b/netbox/utilities/constants.py\n@@ -2,6 +2,7 @@\n ('aa1409', 'Dark red'),\n ('f44336', 'Red'),\n ('e91e63', 'Pink'),\n+ ('ffe4e1', 'Rose'),\n ('ff66ff', 'Fuschia'),\n ('9c27b0', 'Purple'),\n ('673ab7', 'Dark purple'),\n@@ -10,6 +11,7 @@\n ('03a9f4', 'Light blue'),\n ('00bcd4', 'Cyan'),\n ('009688', 'Teal'),\n+ ('00ffff', 'Aqua'),\n ('2f6a31', 'Dark green'),\n ('4caf50', 'Green'),\n ('8bc34a', 'Light green'),\n@@ -23,4 +25,5 @@\n ('9e9e9e', 'Grey'),\n ('607d8b', 'Dark grey'),\n ('111111', 'Black'),\n+ ('ffffff', 'White'),\n )\n", "issue": "Add \"White\" as a cable color\n### Environment\r\n* Python version: 3.6\r\n* NetBox version: 2.5.1\r\n\r\n### Proposed Functionality\r\n\r\nAdd color white to the cable colors.\r\n\r\nOptionally add:\r\n\r\n* ~~slate~~(Dark Grey works, almost identical color)\r\n* rose\r\n* ~~violet~~ (Fuschia works, almost identical color)\r\n* aqua\r\n\r\n### Use Case\r\n\r\nThese fiber strand colors are missing\r\n\r\n### Database Changes\r\n\r\nNone\r\n\r\n### External Dependencies\r\n\r\nNone\n", "before_files": [{"content": "COLOR_CHOICES = (\n ('aa1409', 'Dark red'),\n ('f44336', 'Red'),\n ('e91e63', 'Pink'),\n ('ff66ff', 'Fuschia'),\n ('9c27b0', 'Purple'),\n ('673ab7', 'Dark purple'),\n ('3f51b5', 'Indigo'),\n ('2196f3', 'Blue'),\n ('03a9f4', 'Light blue'),\n ('00bcd4', 'Cyan'),\n ('009688', 'Teal'),\n ('2f6a31', 'Dark green'),\n ('4caf50', 'Green'),\n ('8bc34a', 'Light green'),\n ('cddc39', 'Lime'),\n ('ffeb3b', 'Yellow'),\n ('ffc107', 'Amber'),\n ('ff9800', 'Orange'),\n ('ff5722', 'Dark orange'),\n ('795548', 'Brown'),\n ('c0c0c0', 'Light grey'),\n ('9e9e9e', 'Grey'),\n ('607d8b', 'Dark grey'),\n ('111111', 'Black'),\n)\n", "path": "netbox/utilities/constants.py"}]}
986
283
gh_patches_debug_4763
rasdani/github-patches
git_diff
pytorch__ignite-3199
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Mean Absolute Percentage Error (MAPE) ## 🚀 Feature I'd like to implement the mean absolute percentage error [(MAPE)](https://en.wikipedia.org/wiki/Mean_absolute_percentage_error) in `ignite/metrics`. It is a commonly used metric for regression problems and it would be really convenient to be able to use it directly with ignite evaluators. For that, I would write a custom Metric class in a new file `mean_absolute_percentage_error.py` inheriting from the base `Metric` class in `ignite/metrics/metric.py`. </issue> <code> [start of ignite/contrib/metrics/regression/mean_absolute_relative_error.py] 1 from typing import Tuple 2 3 import torch 4 5 from ignite.contrib.metrics.regression._base import _BaseRegression 6 from ignite.exceptions import NotComputableError 7 from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce 8 9 10 class MeanAbsoluteRelativeError(_BaseRegression): 11 r"""Calculate Mean Absolute Relative Error. 12 13 .. math:: 14 \text{MARE} = \frac{1}{n}\sum_{j=1}^n\frac{\left|A_j-P_j\right|}{\left|A_j\right|} 15 16 where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value. 17 18 More details can be found in the reference `Botchkarev 2018`__. 19 20 - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. 21 - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`. 22 23 __ https://arxiv.org/ftp/arxiv/papers/1809/1809.03006.pdf 24 25 Parameters are inherited from ``Metric.__init__``. 26 27 Args: 28 output_transform: a callable that is used to transform the 29 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the 30 form expected by the metric. This can be useful if, for example, you have a multi-output model and 31 you want to compute the metric with respect to one of the outputs. 32 By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. 33 device: specifies which device updates are accumulated on. Setting the 34 metric's device to be the same as your ``update`` arguments ensures the ``update`` method is 35 non-blocking. By default, CPU. 36 37 Examples: 38 To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. 39 The output of the engine's ``process_function`` needs to be in format of 40 ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. 41 42 .. include:: defaults.rst 43 :start-after: :orphan: 44 45 .. testcode:: 46 47 metric = MeanAbsoluteRelativeError() 48 metric.attach(default_evaluator, 'mare') 49 y_true = torch.tensor([1., 2., 3., 4., 5.]) 50 y_pred = y_true * 0.75 51 state = default_evaluator.run([[y_pred, y_true]]) 52 print(state.metrics['mare']) 53 54 .. testoutput:: 55 56 0.25... 57 58 .. versionchanged:: 0.4.5 59 - Works with DDP. 60 """ 61 _state_dict_all_req_keys = ("_sum_of_absolute_relative_errors", "_num_samples") 62 63 @reinit__is_reduced 64 def reset(self) -> None: 65 self._sum_of_absolute_relative_errors = torch.tensor(0.0, device=self._device) 66 self._num_samples = 0 67 68 def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None: 69 y_pred, y = output[0].detach(), output[1].detach() 70 if (y == 0).any(): 71 raise NotComputableError("The ground truth has 0.") 72 absolute_error = torch.abs(y_pred - y.view_as(y_pred)) / torch.abs(y.view_as(y_pred)) 73 self._sum_of_absolute_relative_errors += torch.sum(absolute_error).to(self._device) 74 self._num_samples += y.size()[0] 75 76 @sync_all_reduce("_sum_of_absolute_relative_errors", "_num_samples") 77 def compute(self) -> float: 78 if self._num_samples == 0: 79 raise NotComputableError( 80 "MeanAbsoluteRelativeError must have at least one sample before it can be computed." 81 ) 82 return self._sum_of_absolute_relative_errors.item() / self._num_samples 83 [end of ignite/contrib/metrics/regression/mean_absolute_relative_error.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ignite/contrib/metrics/regression/mean_absolute_relative_error.py b/ignite/contrib/metrics/regression/mean_absolute_relative_error.py --- a/ignite/contrib/metrics/regression/mean_absolute_relative_error.py +++ b/ignite/contrib/metrics/regression/mean_absolute_relative_error.py @@ -8,7 +8,7 @@ class MeanAbsoluteRelativeError(_BaseRegression): - r"""Calculate Mean Absolute Relative Error. + r"""Calculate Mean Absolute Relative Error (MARE), also known as Mean Absolute Percentage Error (MAPE). .. math:: \text{MARE} = \frac{1}{n}\sum_{j=1}^n\frac{\left|A_j-P_j\right|}{\left|A_j\right|}
{"golden_diff": "diff --git a/ignite/contrib/metrics/regression/mean_absolute_relative_error.py b/ignite/contrib/metrics/regression/mean_absolute_relative_error.py\n--- a/ignite/contrib/metrics/regression/mean_absolute_relative_error.py\n+++ b/ignite/contrib/metrics/regression/mean_absolute_relative_error.py\n@@ -8,7 +8,7 @@\n \n \n class MeanAbsoluteRelativeError(_BaseRegression):\n- r\"\"\"Calculate Mean Absolute Relative Error.\n+ r\"\"\"Calculate Mean Absolute Relative Error (MARE), also known as Mean Absolute Percentage Error (MAPE).\n \n .. math::\n \\text{MARE} = \\frac{1}{n}\\sum_{j=1}^n\\frac{\\left|A_j-P_j\\right|}{\\left|A_j\\right|}\n", "issue": "Mean Absolute Percentage Error (MAPE)\n## \ud83d\ude80 Feature\r\n\r\nI'd like to implement the mean absolute percentage error [(MAPE)](https://en.wikipedia.org/wiki/Mean_absolute_percentage_error) in `ignite/metrics`.\r\n\r\nIt is a commonly used metric for regression problems and it would be really convenient to be able to use it directly with ignite evaluators.\r\n\r\nFor that, I would write a custom Metric class in a new file `mean_absolute_percentage_error.py` inheriting from the base `Metric` class in `ignite/metrics/metric.py`.\r\n\n", "before_files": [{"content": "from typing import Tuple\n\nimport torch\n\nfrom ignite.contrib.metrics.regression._base import _BaseRegression\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import reinit__is_reduced, sync_all_reduce\n\n\nclass MeanAbsoluteRelativeError(_BaseRegression):\n r\"\"\"Calculate Mean Absolute Relative Error.\n\n .. math::\n \\text{MARE} = \\frac{1}{n}\\sum_{j=1}^n\\frac{\\left|A_j-P_j\\right|}{\\left|A_j\\right|}\n\n where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.\n\n More details can be found in the reference `Botchkarev 2018`__.\n\n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.\n\n __ https://arxiv.org/ftp/arxiv/papers/1809/1809.03006.pdf\n\n Parameters are inherited from ``Metric.__init__``.\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n device: specifies which device updates are accumulated on. Setting the\n metric's device to be the same as your ``update`` arguments ensures the ``update`` method is\n non-blocking. By default, CPU.\n\n Examples:\n To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.\n The output of the engine's ``process_function`` needs to be in format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n metric = MeanAbsoluteRelativeError()\n metric.attach(default_evaluator, 'mare')\n y_true = torch.tensor([1., 2., 3., 4., 5.])\n y_pred = y_true * 0.75\n state = default_evaluator.run([[y_pred, y_true]])\n print(state.metrics['mare'])\n\n .. testoutput::\n\n 0.25...\n\n .. versionchanged:: 0.4.5\n - Works with DDP.\n \"\"\"\n _state_dict_all_req_keys = (\"_sum_of_absolute_relative_errors\", \"_num_samples\")\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum_of_absolute_relative_errors = torch.tensor(0.0, device=self._device)\n self._num_samples = 0\n\n def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:\n y_pred, y = output[0].detach(), output[1].detach()\n if (y == 0).any():\n raise NotComputableError(\"The ground truth has 0.\")\n absolute_error = torch.abs(y_pred - y.view_as(y_pred)) / torch.abs(y.view_as(y_pred))\n self._sum_of_absolute_relative_errors += torch.sum(absolute_error).to(self._device)\n self._num_samples += y.size()[0]\n\n @sync_all_reduce(\"_sum_of_absolute_relative_errors\", \"_num_samples\")\n def compute(self) -> float:\n if self._num_samples == 0:\n raise NotComputableError(\n \"MeanAbsoluteRelativeError must have at least one sample before it can be computed.\"\n )\n return self._sum_of_absolute_relative_errors.item() / self._num_samples\n", "path": "ignite/contrib/metrics/regression/mean_absolute_relative_error.py"}]}
1,717
172
gh_patches_debug_1757
rasdani/github-patches
git_diff
mne-tools__mne-bids-1156
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> MNE-BIDS 0.13 release A release of MNE-BIDS has been requested: https://mne.discourse.group/t/mne-bids-0-13-release-date/7291/2 Our last release has been in December 2022, so I feel like cutting a release now is reasonable. I'll migrate issues from the [0.13 milestone](https://github.com/mne-tools/mne-bids/milestone/14) to a new 0.14 milestone. Please comment here if you need some particular thing to be fixed before the release. cc @agramfort @hoechenberger @larsoner </issue> <code> [start of mne_bids/__init__.py] 1 """MNE software for easily interacting with BIDS compatible datasets.""" 2 3 __version__ = "0.13.dev0" 4 from mne_bids import commands 5 from mne_bids.report import make_report 6 from mne_bids.path import ( 7 BIDSPath, 8 get_datatypes, 9 get_entity_vals, 10 print_dir_tree, 11 get_entities_from_fname, 12 search_folder_for_text, 13 get_bids_path_from_fname, 14 find_matching_paths, 15 ) 16 from mne_bids.read import get_head_mri_trans, read_raw_bids 17 from mne_bids.utils import get_anonymization_daysback 18 from mne_bids.write import ( 19 make_dataset_description, 20 write_anat, 21 write_raw_bids, 22 mark_channels, 23 write_meg_calibration, 24 write_meg_crosstalk, 25 get_anat_landmarks, 26 anonymize_dataset, 27 ) 28 from mne_bids.sidecar_updates import update_sidecar_json, update_anat_landmarks 29 from mne_bids.inspect import inspect_dataset 30 from mne_bids.dig import ( 31 template_to_head, 32 convert_montage_to_ras, 33 convert_montage_to_mri, 34 ) 35 [end of mne_bids/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mne_bids/__init__.py b/mne_bids/__init__.py --- a/mne_bids/__init__.py +++ b/mne_bids/__init__.py @@ -1,6 +1,6 @@ """MNE software for easily interacting with BIDS compatible datasets.""" -__version__ = "0.13.dev0" +__version__ = "0.13" from mne_bids import commands from mne_bids.report import make_report from mne_bids.path import (
{"golden_diff": "diff --git a/mne_bids/__init__.py b/mne_bids/__init__.py\n--- a/mne_bids/__init__.py\n+++ b/mne_bids/__init__.py\n@@ -1,6 +1,6 @@\n \"\"\"MNE software for easily interacting with BIDS compatible datasets.\"\"\"\n \n-__version__ = \"0.13.dev0\"\n+__version__ = \"0.13\"\n from mne_bids import commands\n from mne_bids.report import make_report\n from mne_bids.path import (\n", "issue": "MNE-BIDS 0.13 release\nA release of MNE-BIDS has been requested: https://mne.discourse.group/t/mne-bids-0-13-release-date/7291/2\r\n\r\nOur last release has been in December 2022, so I feel like cutting a release now is reasonable.\r\n\r\nI'll migrate issues from the [0.13 milestone](https://github.com/mne-tools/mne-bids/milestone/14) to a new 0.14 milestone.\r\n\r\nPlease comment here if you need some particular thing to be fixed before the release.\r\n\r\ncc @agramfort @hoechenberger @larsoner \n", "before_files": [{"content": "\"\"\"MNE software for easily interacting with BIDS compatible datasets.\"\"\"\n\n__version__ = \"0.13.dev0\"\nfrom mne_bids import commands\nfrom mne_bids.report import make_report\nfrom mne_bids.path import (\n BIDSPath,\n get_datatypes,\n get_entity_vals,\n print_dir_tree,\n get_entities_from_fname,\n search_folder_for_text,\n get_bids_path_from_fname,\n find_matching_paths,\n)\nfrom mne_bids.read import get_head_mri_trans, read_raw_bids\nfrom mne_bids.utils import get_anonymization_daysback\nfrom mne_bids.write import (\n make_dataset_description,\n write_anat,\n write_raw_bids,\n mark_channels,\n write_meg_calibration,\n write_meg_crosstalk,\n get_anat_landmarks,\n anonymize_dataset,\n)\nfrom mne_bids.sidecar_updates import update_sidecar_json, update_anat_landmarks\nfrom mne_bids.inspect import inspect_dataset\nfrom mne_bids.dig import (\n template_to_head,\n convert_montage_to_ras,\n convert_montage_to_mri,\n)\n", "path": "mne_bids/__init__.py"}]}
993
119
gh_patches_debug_36782
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-2006
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> cfn-lint 0.49.1 does not catch `/` as an invalid character in a Mapping element name *cfn-lint version: cfn-lint 0.49.1* *cfn-lint did not catch `/` as an invalid character in a Mapping element name* cfn-lint passed successfully with this mapping included in the template: ```yaml Mappings: NameServers: 10.90.0.0/16: NameServer1: 10.90.0.10 NameServer2: 10.90.4.10 10.91.0.0/16: NameServer1: 10.91.0.10 NameServer2: 10.91.4.10 ``` However AWS rejected it: > Template format error: Mappings element name '10.93.0.0/16' must be non-empty and can contain only alphanumerics, '-' or '.' ![Screen Shot 2021-05-12 at 11 34 41](https://user-images.githubusercontent.com/7205587/117905164-0e917c00-b316-11eb-8132-2da240ff892a.png) </issue> <code> [start of src/cfnlint/rules/mappings/KeyName.py] 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 import re 6 import six 7 from cfnlint.rules import CloudFormationLintRule 8 from cfnlint.rules import RuleMatch 9 from cfnlint.helpers import REGEX_ALPHANUMERIC 10 11 12 class KeyName(CloudFormationLintRule): 13 """Check if Mapping Keys are type string""" 14 id = 'E7003' 15 shortdesc = 'Mapping keys are strings and alphanumeric' 16 description = 'Check if Mappings keys are properly typed as strings and alphanumeric' 17 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html' 18 tags = ['mappings'] 19 20 def check_key(self, key, path, check_alphanumeric=True): 21 """ Check the key name for string and alphanumeric""" 22 matches = [] 23 if not isinstance(key, six.string_types): 24 message = 'Mapping key ({0}) has to be a string.' 25 matches.append(RuleMatch(path[:], message.format(key))) 26 elif not re.match(REGEX_ALPHANUMERIC, key) and check_alphanumeric: 27 message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric.' 28 matches.append(RuleMatch(path[:], message.format(key))) 29 30 return matches 31 32 def match(self, cfn): 33 matches = [] 34 35 mappings = cfn.template.get('Mappings', {}) 36 for mapping_name, mapping_value in mappings.items(): 37 if isinstance(mapping_value, dict): 38 for key_name, key_value in mapping_value.items(): 39 matches.extend(self.check_key( 40 key_name, ['Mappings', mapping_name, key_name], False)) 41 if isinstance(key_value, dict): 42 for sub_key_name, _ in key_value.items(): 43 matches.extend( 44 self.check_key( 45 sub_key_name, ['Mappings', mapping_name, key_name, sub_key_name])) 46 47 return matches 48 [end of src/cfnlint/rules/mappings/KeyName.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/mappings/KeyName.py b/src/cfnlint/rules/mappings/KeyName.py --- a/src/cfnlint/rules/mappings/KeyName.py +++ b/src/cfnlint/rules/mappings/KeyName.py @@ -17,14 +17,26 @@ source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html' tags = ['mappings'] - def check_key(self, key, path, check_alphanumeric=True): + def check_attribute(self, key, path): + """ Check the key name for string and alphanumeric""" + matches = [] + if not isinstance(key, six.string_types): + message = 'Mapping attribute ({0}) has to be a string.' + matches.append(RuleMatch(path[:], message.format(key))) + elif not re.match(REGEX_ALPHANUMERIC, key): + message = 'Mapping attribute ({0}) has invalid name. Name has to be alphanumeric.' + matches.append(RuleMatch(path[:], message.format(key))) + + return matches + + def check_key(self, key, path): """ Check the key name for string and alphanumeric""" matches = [] if not isinstance(key, six.string_types): message = 'Mapping key ({0}) has to be a string.' matches.append(RuleMatch(path[:], message.format(key))) - elif not re.match(REGEX_ALPHANUMERIC, key) and check_alphanumeric: - message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric.' + elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key): + message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric, \'-\' or \'.\'' matches.append(RuleMatch(path[:], message.format(key))) return matches @@ -37,11 +49,11 @@ if isinstance(mapping_value, dict): for key_name, key_value in mapping_value.items(): matches.extend(self.check_key( - key_name, ['Mappings', mapping_name, key_name], False)) + key_name, ['Mappings', mapping_name, key_name])) if isinstance(key_value, dict): for sub_key_name, _ in key_value.items(): matches.extend( - self.check_key( + self.check_attribute( sub_key_name, ['Mappings', mapping_name, key_name, sub_key_name])) return matches
{"golden_diff": "diff --git a/src/cfnlint/rules/mappings/KeyName.py b/src/cfnlint/rules/mappings/KeyName.py\n--- a/src/cfnlint/rules/mappings/KeyName.py\n+++ b/src/cfnlint/rules/mappings/KeyName.py\n@@ -17,14 +17,26 @@\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html'\n tags = ['mappings']\n \n- def check_key(self, key, path, check_alphanumeric=True):\n+ def check_attribute(self, key, path):\n+ \"\"\" Check the key name for string and alphanumeric\"\"\"\n+ matches = []\n+ if not isinstance(key, six.string_types):\n+ message = 'Mapping attribute ({0}) has to be a string.'\n+ matches.append(RuleMatch(path[:], message.format(key)))\n+ elif not re.match(REGEX_ALPHANUMERIC, key):\n+ message = 'Mapping attribute ({0}) has invalid name. Name has to be alphanumeric.'\n+ matches.append(RuleMatch(path[:], message.format(key)))\n+\n+ return matches\n+\n+ def check_key(self, key, path):\n \"\"\" Check the key name for string and alphanumeric\"\"\"\n matches = []\n if not isinstance(key, six.string_types):\n message = 'Mapping key ({0}) has to be a string.'\n matches.append(RuleMatch(path[:], message.format(key)))\n- elif not re.match(REGEX_ALPHANUMERIC, key) and check_alphanumeric:\n- message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric.'\n+ elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key):\n+ message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric, \\'-\\' or \\'.\\''\n matches.append(RuleMatch(path[:], message.format(key)))\n \n return matches\n@@ -37,11 +49,11 @@\n if isinstance(mapping_value, dict):\n for key_name, key_value in mapping_value.items():\n matches.extend(self.check_key(\n- key_name, ['Mappings', mapping_name, key_name], False))\n+ key_name, ['Mappings', mapping_name, key_name]))\n if isinstance(key_value, dict):\n for sub_key_name, _ in key_value.items():\n matches.extend(\n- self.check_key(\n+ self.check_attribute(\n sub_key_name, ['Mappings', mapping_name, key_name, sub_key_name]))\n \n return matches\n", "issue": "cfn-lint 0.49.1 does not catch `/` as an invalid character in a Mapping element name\n*cfn-lint version: cfn-lint 0.49.1*\r\n\r\n*cfn-lint did not catch `/` as an invalid character in a Mapping element name*\r\n\r\ncfn-lint passed successfully with this mapping included in the template:\r\n```yaml\r\nMappings:\r\n NameServers:\r\n 10.90.0.0/16:\r\n NameServer1: 10.90.0.10\r\n NameServer2: 10.90.4.10\r\n 10.91.0.0/16:\r\n NameServer1: 10.91.0.10\r\n NameServer2: 10.91.4.10\r\n```\r\n\r\nHowever AWS rejected it:\r\n> Template format error: Mappings element name '10.93.0.0/16' must be non-empty and can contain only alphanumerics, '-' or '.'\r\n\r\n![Screen Shot 2021-05-12 at 11 34 41](https://user-images.githubusercontent.com/7205587/117905164-0e917c00-b316-11eb-8132-2da240ff892a.png)\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport re\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\nfrom cfnlint.helpers import REGEX_ALPHANUMERIC\n\n\nclass KeyName(CloudFormationLintRule):\n \"\"\"Check if Mapping Keys are type string\"\"\"\n id = 'E7003'\n shortdesc = 'Mapping keys are strings and alphanumeric'\n description = 'Check if Mappings keys are properly typed as strings and alphanumeric'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html'\n tags = ['mappings']\n\n def check_key(self, key, path, check_alphanumeric=True):\n \"\"\" Check the key name for string and alphanumeric\"\"\"\n matches = []\n if not isinstance(key, six.string_types):\n message = 'Mapping key ({0}) has to be a string.'\n matches.append(RuleMatch(path[:], message.format(key)))\n elif not re.match(REGEX_ALPHANUMERIC, key) and check_alphanumeric:\n message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric.'\n matches.append(RuleMatch(path[:], message.format(key)))\n\n return matches\n\n def match(self, cfn):\n matches = []\n\n mappings = cfn.template.get('Mappings', {})\n for mapping_name, mapping_value in mappings.items():\n if isinstance(mapping_value, dict):\n for key_name, key_value in mapping_value.items():\n matches.extend(self.check_key(\n key_name, ['Mappings', mapping_name, key_name], False))\n if isinstance(key_value, dict):\n for sub_key_name, _ in key_value.items():\n matches.extend(\n self.check_key(\n sub_key_name, ['Mappings', mapping_name, key_name, sub_key_name]))\n\n return matches\n", "path": "src/cfnlint/rules/mappings/KeyName.py"}]}
1,364
548
gh_patches_debug_8668
rasdani/github-patches
git_diff
wright-group__WrightTools-1132
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> shift supported Python 3 versions Since users are increasingly relying on 3.10 and 3.11, I propose we move testing from 3.7-9 to 3.8-11. </issue> <code> [start of setup.py] 1 #! /usr/bin/env python3 2 3 import os 4 from setuptools import setup, find_packages 5 6 7 here = os.path.abspath(os.path.dirname(__file__)) 8 9 10 def read(fname): 11 with open(os.path.join(here, fname)) as f: 12 return f.read() 13 14 15 extra_files = { 16 "WrightTools": [ 17 "datasets", 18 "datasets/*", 19 "datasets/*/*", 20 "datasets/*/*/*", 21 "datasets/*/*/*/*", 22 "CITATION", 23 "VERSION", 24 "WT5_VERSION", 25 ] 26 } 27 28 with open(os.path.join(here, "WrightTools", "VERSION")) as version_file: 29 version = version_file.read().strip() 30 31 docs_require = ["sphinx", "sphinx-gallery==0.8.2", "sphinx-rtd-theme"] 32 33 setup( 34 name="WrightTools", 35 packages=find_packages(exclude=("tests", "tests.*")), 36 package_data=extra_files, 37 python_requires=">=3.7", 38 install_requires=[ 39 "h5py", 40 "imageio", 41 "matplotlib>=3.4.0", 42 "numexpr", 43 "numpy>=1.15.0", 44 "pint", 45 "python-dateutil", 46 "scipy", 47 "tidy_headers>=1.0.0", 48 ], 49 extras_require={ 50 "docs": docs_require, 51 "dev": [ 52 "black", 53 "pre-commit", 54 "pydocstyle", 55 "pytest", 56 "pytest-cov", 57 "databroker>=1.2", 58 "msgpack", 59 ] 60 + docs_require, 61 }, 62 version=version, 63 description="Tools for loading, processing, and plotting multidimensional spectroscopy data.", 64 long_description=read("README.rst"), 65 author="WrightTools Developers", 66 license="MIT", 67 url="http://wright.tools", 68 keywords="spectroscopy science multidimensional visualization", 69 entry_points={"console_scripts": ["wt-tree=WrightTools.__main__:wt_tree"]}, 70 classifiers=[ 71 "Development Status :: 5 - Production/Stable", 72 "Intended Audience :: Science/Research", 73 "License :: OSI Approved :: MIT License", 74 "Framework :: Matplotlib", 75 "Natural Language :: English", 76 "Programming Language :: Python :: 3", 77 "Programming Language :: Python :: 3.7", 78 "Programming Language :: Python :: 3.8", 79 "Programming Language :: Python :: 3.9", 80 "Topic :: Scientific/Engineering", 81 ], 82 ) 83 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -74,9 +74,10 @@ "Framework :: Matplotlib", "Natural Language :: English", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Topic :: Scientific/Engineering", ], )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -74,9 +74,10 @@\n \"Framework :: Matplotlib\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.10\",\n+ \"Programming Language :: Python :: 3.11\",\n \"Topic :: Scientific/Engineering\",\n ],\n )\n", "issue": "shift supported Python 3 versions\nSince users are increasingly relying on 3.10 and 3.11, I propose we move testing from 3.7-9 to 3.8-11.\r\n\n", "before_files": [{"content": "#! /usr/bin/env python3\n\nimport os\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(fname):\n with open(os.path.join(here, fname)) as f:\n return f.read()\n\n\nextra_files = {\n \"WrightTools\": [\n \"datasets\",\n \"datasets/*\",\n \"datasets/*/*\",\n \"datasets/*/*/*\",\n \"datasets/*/*/*/*\",\n \"CITATION\",\n \"VERSION\",\n \"WT5_VERSION\",\n ]\n}\n\nwith open(os.path.join(here, \"WrightTools\", \"VERSION\")) as version_file:\n version = version_file.read().strip()\n\ndocs_require = [\"sphinx\", \"sphinx-gallery==0.8.2\", \"sphinx-rtd-theme\"]\n\nsetup(\n name=\"WrightTools\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n package_data=extra_files,\n python_requires=\">=3.7\",\n install_requires=[\n \"h5py\",\n \"imageio\",\n \"matplotlib>=3.4.0\",\n \"numexpr\",\n \"numpy>=1.15.0\",\n \"pint\",\n \"python-dateutil\",\n \"scipy\",\n \"tidy_headers>=1.0.0\",\n ],\n extras_require={\n \"docs\": docs_require,\n \"dev\": [\n \"black\",\n \"pre-commit\",\n \"pydocstyle\",\n \"pytest\",\n \"pytest-cov\",\n \"databroker>=1.2\",\n \"msgpack\",\n ]\n + docs_require,\n },\n version=version,\n description=\"Tools for loading, processing, and plotting multidimensional spectroscopy data.\",\n long_description=read(\"README.rst\"),\n author=\"WrightTools Developers\",\n license=\"MIT\",\n url=\"http://wright.tools\",\n keywords=\"spectroscopy science multidimensional visualization\",\n entry_points={\"console_scripts\": [\"wt-tree=WrightTools.__main__:wt_tree\"]},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Framework :: Matplotlib\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n ],\n)\n", "path": "setup.py"}]}
1,276
134
gh_patches_debug_35416
rasdani/github-patches
git_diff
zigpy__zha-device-handlers-1664
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] New yooksmart D10110 inverted with quirk **Describe the bug** I purchased a new yooksmart D10110 cover and paired with home assistant. The controls seemed inverted and I had to move the bar twice in order to get it to move. I read reports in the past with the suggestion to unpair and pair again, tried multiple times with no luck. So I disabled the quirk (apologies for the brute force: moved the file to a different directory and reloaded) and it works now. For completeness: Before: - buttons up and down wouldn't work - available button would be inverted (e.g.: cover was all the way down and the down button was enabled) - in order to control the cover I'd move the progress bar all the way to 0 or to 100 then the opposite in order to work After: - buttons up and down work - enabled button matches the direction of the cover: if open, it shows down button enabled **To Reproduce** Behavior is consistent across multiple pair/unpair cycles and full home assistant instance restarts **Additional context** Something that is possible, since the cover is new, is that they corrected the behavior in their firmware and the quirk isn't needed anymore. This device has: Firmware: 0x10013001 I can provide any debugging necessary. I'm using homeassistant official virtual machine image and keeping it up to date. Editted: formatting </issue> <code> [start of zhaquirks/yooksmart/D10110blinds.py] 1 """Device handler for Yooksmart D10110 roller blinds.""" 2 from zigpy.profiles import zha 3 from zigpy.quirks import CustomCluster, CustomDevice 4 from zigpy.zcl.clusters.closures import WindowCovering 5 from zigpy.zcl.clusters.general import ( 6 Basic, 7 Groups, 8 Identify, 9 Ota, 10 PollControl, 11 PowerConfiguration, 12 Scenes, 13 ) 14 15 from zhaquirks.const import ( 16 DEVICE_TYPE, 17 ENDPOINTS, 18 INPUT_CLUSTERS, 19 MODELS_INFO, 20 OUTPUT_CLUSTERS, 21 PROFILE_ID, 22 ) 23 24 25 class InvertedWindowCoveringCluster(CustomCluster, WindowCovering): 26 """WindowCovering cluster implementation. 27 28 This implementation inverts the reported covering percent for non standard 29 devices that don't follow the reporting spec. 30 """ 31 32 cluster_id = WindowCovering.cluster_id 33 CURRENT_POSITION_LIFT_PERCENTAGE = 0x0008 34 35 def _update_attribute(self, attrid, value): 36 if attrid == self.CURRENT_POSITION_LIFT_PERCENTAGE: 37 value = 100 - value 38 super()._update_attribute(attrid, value) 39 40 41 class D10110Blinds(CustomDevice): 42 """Custom device representing Yooksmart D10110 roller blinds.""" 43 44 signature = { 45 # <SimpleDescriptor endpoint=1 profile=260 device_type=514 46 # device_version=1 47 # input_clusters=[0, 1, 3, 4, 5, 32, 258] 48 # output_clusters=[3, 25]> 49 MODELS_INFO: [ 50 ("yooksmart", "D10110"), 51 ], 52 ENDPOINTS: { 53 1: { 54 PROFILE_ID: zha.PROFILE_ID, 55 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE, 56 INPUT_CLUSTERS: [ 57 Basic.cluster_id, 58 PowerConfiguration.cluster_id, 59 Identify.cluster_id, 60 Groups.cluster_id, 61 Scenes.cluster_id, 62 PollControl.cluster_id, 63 WindowCovering.cluster_id, 64 ], 65 OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id], 66 } 67 }, 68 } 69 70 replacement = { 71 ENDPOINTS: { 72 1: { 73 PROFILE_ID: zha.PROFILE_ID, 74 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE, 75 INPUT_CLUSTERS: [ 76 Basic.cluster_id, 77 PowerConfiguration.cluster_id, 78 Identify.cluster_id, 79 Groups.cluster_id, 80 Scenes.cluster_id, 81 PollControl.cluster_id, 82 InvertedWindowCoveringCluster, 83 ], 84 OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id], 85 } 86 } 87 } 88 [end of zhaquirks/yooksmart/D10110blinds.py] [start of zhaquirks/yooksmart/__init__.py] 1 """Yooksmart module for custom device handlers.""" 2 [end of zhaquirks/yooksmart/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zhaquirks/yooksmart/D10110blinds.py b/zhaquirks/yooksmart/D10110blinds.py deleted file mode 100644 --- a/zhaquirks/yooksmart/D10110blinds.py +++ /dev/null @@ -1,87 +0,0 @@ -"""Device handler for Yooksmart D10110 roller blinds.""" -from zigpy.profiles import zha -from zigpy.quirks import CustomCluster, CustomDevice -from zigpy.zcl.clusters.closures import WindowCovering -from zigpy.zcl.clusters.general import ( - Basic, - Groups, - Identify, - Ota, - PollControl, - PowerConfiguration, - Scenes, -) - -from zhaquirks.const import ( - DEVICE_TYPE, - ENDPOINTS, - INPUT_CLUSTERS, - MODELS_INFO, - OUTPUT_CLUSTERS, - PROFILE_ID, -) - - -class InvertedWindowCoveringCluster(CustomCluster, WindowCovering): - """WindowCovering cluster implementation. - - This implementation inverts the reported covering percent for non standard - devices that don't follow the reporting spec. - """ - - cluster_id = WindowCovering.cluster_id - CURRENT_POSITION_LIFT_PERCENTAGE = 0x0008 - - def _update_attribute(self, attrid, value): - if attrid == self.CURRENT_POSITION_LIFT_PERCENTAGE: - value = 100 - value - super()._update_attribute(attrid, value) - - -class D10110Blinds(CustomDevice): - """Custom device representing Yooksmart D10110 roller blinds.""" - - signature = { - # <SimpleDescriptor endpoint=1 profile=260 device_type=514 - # device_version=1 - # input_clusters=[0, 1, 3, 4, 5, 32, 258] - # output_clusters=[3, 25]> - MODELS_INFO: [ - ("yooksmart", "D10110"), - ], - ENDPOINTS: { - 1: { - PROFILE_ID: zha.PROFILE_ID, - DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE, - INPUT_CLUSTERS: [ - Basic.cluster_id, - PowerConfiguration.cluster_id, - Identify.cluster_id, - Groups.cluster_id, - Scenes.cluster_id, - PollControl.cluster_id, - WindowCovering.cluster_id, - ], - OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id], - } - }, - } - - replacement = { - ENDPOINTS: { - 1: { - PROFILE_ID: zha.PROFILE_ID, - DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE, - INPUT_CLUSTERS: [ - Basic.cluster_id, - PowerConfiguration.cluster_id, - Identify.cluster_id, - Groups.cluster_id, - Scenes.cluster_id, - PollControl.cluster_id, - InvertedWindowCoveringCluster, - ], - OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id], - } - } - } diff --git a/zhaquirks/yooksmart/__init__.py b/zhaquirks/yooksmart/__init__.py deleted file mode 100644 --- a/zhaquirks/yooksmart/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Yooksmart module for custom device handlers."""
{"golden_diff": "diff --git a/zhaquirks/yooksmart/D10110blinds.py b/zhaquirks/yooksmart/D10110blinds.py\ndeleted file mode 100644\n--- a/zhaquirks/yooksmart/D10110blinds.py\n+++ /dev/null\n@@ -1,87 +0,0 @@\n-\"\"\"Device handler for Yooksmart D10110 roller blinds.\"\"\"\n-from zigpy.profiles import zha\n-from zigpy.quirks import CustomCluster, CustomDevice\n-from zigpy.zcl.clusters.closures import WindowCovering\n-from zigpy.zcl.clusters.general import (\n- Basic,\n- Groups,\n- Identify,\n- Ota,\n- PollControl,\n- PowerConfiguration,\n- Scenes,\n-)\n-\n-from zhaquirks.const import (\n- DEVICE_TYPE,\n- ENDPOINTS,\n- INPUT_CLUSTERS,\n- MODELS_INFO,\n- OUTPUT_CLUSTERS,\n- PROFILE_ID,\n-)\n-\n-\n-class InvertedWindowCoveringCluster(CustomCluster, WindowCovering):\n- \"\"\"WindowCovering cluster implementation.\n-\n- This implementation inverts the reported covering percent for non standard\n- devices that don't follow the reporting spec.\n- \"\"\"\n-\n- cluster_id = WindowCovering.cluster_id\n- CURRENT_POSITION_LIFT_PERCENTAGE = 0x0008\n-\n- def _update_attribute(self, attrid, value):\n- if attrid == self.CURRENT_POSITION_LIFT_PERCENTAGE:\n- value = 100 - value\n- super()._update_attribute(attrid, value)\n-\n-\n-class D10110Blinds(CustomDevice):\n- \"\"\"Custom device representing Yooksmart D10110 roller blinds.\"\"\"\n-\n- signature = {\n- # <SimpleDescriptor endpoint=1 profile=260 device_type=514\n- # device_version=1\n- # input_clusters=[0, 1, 3, 4, 5, 32, 258]\n- # output_clusters=[3, 25]>\n- MODELS_INFO: [\n- (\"yooksmart\", \"D10110\"),\n- ],\n- ENDPOINTS: {\n- 1: {\n- PROFILE_ID: zha.PROFILE_ID,\n- DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n- INPUT_CLUSTERS: [\n- Basic.cluster_id,\n- PowerConfiguration.cluster_id,\n- Identify.cluster_id,\n- Groups.cluster_id,\n- Scenes.cluster_id,\n- PollControl.cluster_id,\n- WindowCovering.cluster_id,\n- ],\n- OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],\n- }\n- },\n- }\n-\n- replacement = {\n- ENDPOINTS: {\n- 1: {\n- PROFILE_ID: zha.PROFILE_ID,\n- DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n- INPUT_CLUSTERS: [\n- Basic.cluster_id,\n- PowerConfiguration.cluster_id,\n- Identify.cluster_id,\n- Groups.cluster_id,\n- Scenes.cluster_id,\n- PollControl.cluster_id,\n- InvertedWindowCoveringCluster,\n- ],\n- OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],\n- }\n- }\n- }\ndiff --git a/zhaquirks/yooksmart/__init__.py b/zhaquirks/yooksmart/__init__.py\ndeleted file mode 100644\n--- a/zhaquirks/yooksmart/__init__.py\n+++ /dev/null\n@@ -1 +0,0 @@\n-\"\"\"Yooksmart module for custom device handlers.\"\"\"\n", "issue": "[BUG] New yooksmart D10110 inverted with quirk\n**Describe the bug**\r\nI purchased a new yooksmart D10110 cover and paired with home assistant. The controls\r\nseemed inverted and I had to move the bar twice in order to get it to move. I read reports\r\nin the past with the suggestion to unpair and pair again, tried multiple times with no luck.\r\nSo I disabled the quirk (apologies for the brute force: moved the file to a different directory\r\nand reloaded) and it works now. For completeness:\r\nBefore:\r\n- buttons up and down wouldn't work\r\n- available button would be inverted (e.g.: cover was all the way down and the down button was enabled)\r\n- in order to control the cover I'd move the progress bar all the way to 0 or to 100 then the opposite in order to work\r\nAfter:\r\n- buttons up and down work\r\n- enabled button matches the direction of the cover: if open, it shows down button enabled\r\n\r\n**To Reproduce**\r\nBehavior is consistent across multiple pair/unpair cycles and full home assistant instance restarts\r\n\r\n**Additional context**\r\nSomething that is possible, since the cover is new, is that they corrected the behavior in their firmware\r\nand the quirk isn't needed anymore.\r\nThis device has: Firmware: 0x10013001\r\n\r\nI can provide any debugging necessary. I'm using homeassistant official virtual machine image and keeping\r\nit up to date.\r\n\r\nEditted: formatting\n", "before_files": [{"content": "\"\"\"Device handler for Yooksmart D10110 roller blinds.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Basic,\n Groups,\n Identify,\n Ota,\n PollControl,\n PowerConfiguration,\n Scenes,\n)\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n\nclass InvertedWindowCoveringCluster(CustomCluster, WindowCovering):\n \"\"\"WindowCovering cluster implementation.\n\n This implementation inverts the reported covering percent for non standard\n devices that don't follow the reporting spec.\n \"\"\"\n\n cluster_id = WindowCovering.cluster_id\n CURRENT_POSITION_LIFT_PERCENTAGE = 0x0008\n\n def _update_attribute(self, attrid, value):\n if attrid == self.CURRENT_POSITION_LIFT_PERCENTAGE:\n value = 100 - value\n super()._update_attribute(attrid, value)\n\n\nclass D10110Blinds(CustomDevice):\n \"\"\"Custom device representing Yooksmart D10110 roller blinds.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=514\n # device_version=1\n # input_clusters=[0, 1, 3, 4, 5, 32, 258]\n # output_clusters=[3, 25]>\n MODELS_INFO: [\n (\"yooksmart\", \"D10110\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n PollControl.cluster_id,\n InvertedWindowCoveringCluster,\n ],\n OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],\n }\n }\n }\n", "path": "zhaquirks/yooksmart/D10110blinds.py"}, {"content": "\"\"\"Yooksmart module for custom device handlers.\"\"\"\n", "path": "zhaquirks/yooksmart/__init__.py"}]}
1,682
826
gh_patches_debug_3138
rasdani/github-patches
git_diff
microsoft__botbuilder-python-1231
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [PORT] [Authentication] updates to support Arlington > Port this change from botbuilder-dotnet/master branch: https://github.com/microsoft/botbuilder-dotnet/pull/3734 # Changed projects * Microsoft.Bot.Connector * Microsoft.Bot.Connector.Tests [R9] </issue> <code> [start of libraries/botframework-connector/botframework/connector/auth/government_constants.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 from abc import ABC 4 5 6 class GovernmentConstants(ABC): 7 8 """ 9 Government Channel Service property value 10 """ 11 12 CHANNEL_SERVICE = "https://botframework.azure.us" 13 14 """ 15 TO CHANNEL FROM BOT: Login URL 16 """ 17 TO_CHANNEL_FROM_BOT_LOGIN_URL = ( 18 "https://login.microsoftonline.us/" 19 "cab8a31a-1906-4287-a0d8-4eef66b95f6e/" 20 "oauth2/v2.0/token" 21 ) 22 23 """ 24 TO CHANNEL FROM BOT: OAuth scope to request 25 """ 26 TO_CHANNEL_FROM_BOT_OAUTH_SCOPE = "https://api.botframework.us/.default" 27 28 """ 29 TO BOT FROM CHANNEL: Token issuer 30 """ 31 TO_BOT_FROM_CHANNEL_TOKEN_ISSUER = "https://api.botframework.us" 32 33 """ 34 TO BOT FROM CHANNEL: OpenID metadata document for tokens coming from MSA 35 """ 36 TO_BOT_FROM_CHANNEL_OPEN_ID_METADATA_URL = ( 37 "https://login.botframework.azure.us/v1/.well-known/openidconfiguration" 38 ) 39 40 """ 41 TO BOT FROM GOV EMULATOR: OpenID metadata document for tokens coming from MSA 42 """ 43 TO_BOT_FROM_EMULATOR_OPEN_ID_METADATA_URL = ( 44 "https://login.microsoftonline.us/" 45 "cab8a31a-1906-4287-a0d8-4eef66b95f6e/v2.0/" 46 ".well-known/openid-configuration" 47 ) 48 [end of libraries/botframework-connector/botframework/connector/auth/government_constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libraries/botframework-connector/botframework/connector/auth/government_constants.py b/libraries/botframework-connector/botframework/connector/auth/government_constants.py --- a/libraries/botframework-connector/botframework/connector/auth/government_constants.py +++ b/libraries/botframework-connector/botframework/connector/auth/government_constants.py @@ -15,9 +15,7 @@ TO CHANNEL FROM BOT: Login URL """ TO_CHANNEL_FROM_BOT_LOGIN_URL = ( - "https://login.microsoftonline.us/" - "cab8a31a-1906-4287-a0d8-4eef66b95f6e/" - "oauth2/v2.0/token" + "https://login.microsoftonline.us/MicrosoftServices.onmicrosoft.us" ) """
{"golden_diff": "diff --git a/libraries/botframework-connector/botframework/connector/auth/government_constants.py b/libraries/botframework-connector/botframework/connector/auth/government_constants.py\n--- a/libraries/botframework-connector/botframework/connector/auth/government_constants.py\n+++ b/libraries/botframework-connector/botframework/connector/auth/government_constants.py\n@@ -15,9 +15,7 @@\n TO CHANNEL FROM BOT: Login URL\n \"\"\"\n TO_CHANNEL_FROM_BOT_LOGIN_URL = (\n- \"https://login.microsoftonline.us/\"\n- \"cab8a31a-1906-4287-a0d8-4eef66b95f6e/\"\n- \"oauth2/v2.0/token\"\n+ \"https://login.microsoftonline.us/MicrosoftServices.onmicrosoft.us\"\n )\n \n \"\"\"\n", "issue": "[PORT] [Authentication] updates to support Arlington\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/3734\n\n\n\n\r\n# Changed projects\r\n* Microsoft.Bot.Connector\r\n* Microsoft.Bot.Connector.Tests\r\n\r\n[R9]\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nfrom abc import ABC\n\n\nclass GovernmentConstants(ABC):\n\n \"\"\"\n Government Channel Service property value\n \"\"\"\n\n CHANNEL_SERVICE = \"https://botframework.azure.us\"\n\n \"\"\"\n TO CHANNEL FROM BOT: Login URL\n \"\"\"\n TO_CHANNEL_FROM_BOT_LOGIN_URL = (\n \"https://login.microsoftonline.us/\"\n \"cab8a31a-1906-4287-a0d8-4eef66b95f6e/\"\n \"oauth2/v2.0/token\"\n )\n\n \"\"\"\n TO CHANNEL FROM BOT: OAuth scope to request\n \"\"\"\n TO_CHANNEL_FROM_BOT_OAUTH_SCOPE = \"https://api.botframework.us/.default\"\n\n \"\"\"\n TO BOT FROM CHANNEL: Token issuer\n \"\"\"\n TO_BOT_FROM_CHANNEL_TOKEN_ISSUER = \"https://api.botframework.us\"\n\n \"\"\"\n TO BOT FROM CHANNEL: OpenID metadata document for tokens coming from MSA\n \"\"\"\n TO_BOT_FROM_CHANNEL_OPEN_ID_METADATA_URL = (\n \"https://login.botframework.azure.us/v1/.well-known/openidconfiguration\"\n )\n\n \"\"\"\n TO BOT FROM GOV EMULATOR: OpenID metadata document for tokens coming from MSA\n \"\"\"\n TO_BOT_FROM_EMULATOR_OPEN_ID_METADATA_URL = (\n \"https://login.microsoftonline.us/\"\n \"cab8a31a-1906-4287-a0d8-4eef66b95f6e/v2.0/\"\n \".well-known/openid-configuration\"\n )\n", "path": "libraries/botframework-connector/botframework/connector/auth/government_constants.py"}]}
1,067
193
gh_patches_debug_272
rasdani/github-patches
git_diff
cupy__cupy-1028
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> cupy.copyto behaves differently from numpy.copyto when src is a python scalar Code: ```python import numpy import cupy def copyto_check(xp): x = xp.zeros(3, dtype=numpy.float32) # replace first and third items with 1.0 xp.copyto(x, 1.0, where=xp.asarray([True, False, True])) print(x) print('numpy', numpy.__version__) copyto_check(numpy) print('cupy', cupy.__version__) copyto_check(cupy) ``` Output: ``` numpy 1.14.0 [1. 0. 1.] cupy 2.2.0 [1. 1. 1.] ``` </issue> <code> [start of cupy/manipulation/basic.py] 1 import numpy 2 import six 3 4 from cupy import core 5 6 7 def copyto(dst, src, casting='same_kind', where=None): 8 """Copies values from one array to another with broadcasting. 9 10 This function can be called for arrays on different devices. In this case, 11 casting, ``where``, and broadcasting is not supported, and an exception is 12 raised if these are used. 13 14 Args: 15 dst (cupy.ndarray): Target array. 16 src (cupy.ndarray): Source array. 17 casting (str): Casting rule. See :func:`numpy.can_cast` for detail. 18 where (cupy.ndarray of bool): If specified, this array acts as a mask, 19 and an element is copied only if the corresponding element of 20 ``where`` is True. 21 22 .. seealso:: :func:`numpy.copyto` 23 24 """ 25 26 src_type = type(src) 27 src_is_python_scalar = (src_type in six.integer_types or 28 src_type in (bool, float, complex)) 29 if src_is_python_scalar: 30 src_dtype = numpy.dtype(type(src)) 31 can_cast = numpy.can_cast(src, dst.dtype, casting) 32 else: 33 src_dtype = src.dtype 34 can_cast = numpy.can_cast(src_dtype, dst.dtype, casting) 35 36 if not can_cast: 37 raise TypeError('Cannot cast %s to %s in %s casting mode' % 38 (src_dtype, dst.dtype, casting)) 39 if dst.size == 0: 40 return 41 42 if src_is_python_scalar: 43 dst.fill(src) 44 return 45 46 if where is None: 47 if _can_memcpy(dst, src): 48 dst.data.copy_from(src.data, src.nbytes) 49 else: 50 device = dst.device 51 with device: 52 if src.device != device: 53 src = src.copy() 54 core.elementwise_copy(src, dst) 55 else: 56 core.elementwise_copy_where(src, where, dst) 57 58 59 def _can_memcpy(dst, src): 60 c_contiguous = dst.flags.c_contiguous and src.flags.c_contiguous 61 f_contiguous = dst.flags.f_contiguous and src.flags.f_contiguous 62 return (c_contiguous or f_contiguous) and dst.dtype == src.dtype and \ 63 dst.size == src.size 64 [end of cupy/manipulation/basic.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cupy/manipulation/basic.py b/cupy/manipulation/basic.py --- a/cupy/manipulation/basic.py +++ b/cupy/manipulation/basic.py @@ -39,7 +39,7 @@ if dst.size == 0: return - if src_is_python_scalar: + if src_is_python_scalar and where is None: dst.fill(src) return
{"golden_diff": "diff --git a/cupy/manipulation/basic.py b/cupy/manipulation/basic.py\n--- a/cupy/manipulation/basic.py\n+++ b/cupy/manipulation/basic.py\n@@ -39,7 +39,7 @@\n if dst.size == 0:\n return\n \n- if src_is_python_scalar:\n+ if src_is_python_scalar and where is None:\n dst.fill(src)\n return\n", "issue": "cupy.copyto behaves differently from numpy.copyto when src is a python scalar\nCode:\r\n```python\r\nimport numpy\r\nimport cupy\r\n\r\ndef copyto_check(xp):\r\n x = xp.zeros(3, dtype=numpy.float32)\r\n # replace first and third items with 1.0\r\n xp.copyto(x, 1.0, where=xp.asarray([True, False, True]))\r\n print(x)\r\n\r\nprint('numpy', numpy.__version__)\r\ncopyto_check(numpy)\r\nprint('cupy', cupy.__version__)\r\ncopyto_check(cupy)\r\n```\r\nOutput:\r\n```\r\nnumpy 1.14.0\r\n[1. 0. 1.]\r\ncupy 2.2.0\r\n[1. 1. 1.]\r\n```\n", "before_files": [{"content": "import numpy\nimport six\n\nfrom cupy import core\n\n\ndef copyto(dst, src, casting='same_kind', where=None):\n \"\"\"Copies values from one array to another with broadcasting.\n\n This function can be called for arrays on different devices. In this case,\n casting, ``where``, and broadcasting is not supported, and an exception is\n raised if these are used.\n\n Args:\n dst (cupy.ndarray): Target array.\n src (cupy.ndarray): Source array.\n casting (str): Casting rule. See :func:`numpy.can_cast` for detail.\n where (cupy.ndarray of bool): If specified, this array acts as a mask,\n and an element is copied only if the corresponding element of\n ``where`` is True.\n\n .. seealso:: :func:`numpy.copyto`\n\n \"\"\"\n\n src_type = type(src)\n src_is_python_scalar = (src_type in six.integer_types or\n src_type in (bool, float, complex))\n if src_is_python_scalar:\n src_dtype = numpy.dtype(type(src))\n can_cast = numpy.can_cast(src, dst.dtype, casting)\n else:\n src_dtype = src.dtype\n can_cast = numpy.can_cast(src_dtype, dst.dtype, casting)\n\n if not can_cast:\n raise TypeError('Cannot cast %s to %s in %s casting mode' %\n (src_dtype, dst.dtype, casting))\n if dst.size == 0:\n return\n\n if src_is_python_scalar:\n dst.fill(src)\n return\n\n if where is None:\n if _can_memcpy(dst, src):\n dst.data.copy_from(src.data, src.nbytes)\n else:\n device = dst.device\n with device:\n if src.device != device:\n src = src.copy()\n core.elementwise_copy(src, dst)\n else:\n core.elementwise_copy_where(src, where, dst)\n\n\ndef _can_memcpy(dst, src):\n c_contiguous = dst.flags.c_contiguous and src.flags.c_contiguous\n f_contiguous = dst.flags.f_contiguous and src.flags.f_contiguous\n return (c_contiguous or f_contiguous) and dst.dtype == src.dtype and \\\n dst.size == src.size\n", "path": "cupy/manipulation/basic.py"}]}
1,307
92
gh_patches_debug_28700
rasdani/github-patches
git_diff
meltano__meltano-6552
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Feature]: Collect telemetry data about how `send_anonymous_usage_stats` was configured The project context (and its schema) should be updated to include the key `send_anonymous_usage_stats_source` with the value `ProjectSettingService.get_with_metadata('send_anonymous_usage_stats')[1]['source'].value`, which can be one of the following strings: - `auto` - `config_override` - `db` - `default` - `dotenv` - `env` - `inherited` - `meltano_env` - `meltano_yml` CC @pnadolny13 @aaronsteers </issue> <code> [start of src/meltano/core/tracking/contexts/project.py] 1 """Project context for the Snowplow tracker.""" 2 3 from __future__ import annotations 4 5 import uuid 6 from enum import Enum, auto 7 8 from cached_property import cached_property 9 from snowplow_tracker import SelfDescribingJson 10 from structlog.stdlib import get_logger 11 12 from meltano.core.project import Project 13 from meltano.core.project_settings_service import ProjectSettingsService 14 from meltano.core.tracking.schemas import ProjectContextSchema 15 from meltano.core.utils import hash_sha256 16 17 logger = get_logger(__name__) 18 19 20 class ProjectUUIDSource(Enum): 21 """The source of the `project_uuid` used for telemetry.""" 22 23 # The UUID was explicitly provided in the config as the `project_id`. 24 explicit = auto() 25 26 # The UUID was derived by hashing the `project_id` in the config. 27 derived = auto() 28 29 # The UUID was randomly generated (UUID v4) since no `project_id` was configured. 30 random = auto() 31 32 33 class ProjectContext(SelfDescribingJson): 34 """Tracking context for the Meltano project.""" 35 36 def __init__(self, project: Project, client_id: uuid.UUID): 37 """Initialize a meltano tracking "project" context. 38 39 Args: 40 project: The Meltano project. 41 client_id: The client ID from `analytics.json`. 42 """ 43 self.project = project 44 self.settings_service = ProjectSettingsService(project) 45 self.send_anonymous_usage_stats = self.settings_service.get( 46 "send_anonymous_usage_stats", True 47 ) 48 49 super().__init__( 50 ProjectContextSchema.url, 51 { 52 "context_uuid": str(uuid.uuid4()), 53 "project_uuid": str(self.project_uuid), 54 "project_uuid_source": self.project_uuid_source.name, 55 "client_uuid": str(client_id), 56 "environment_name_hash": ( 57 hash_sha256(self.project.active_environment.name) 58 if self.project.active_environment 59 else None 60 ), 61 }, 62 ) 63 64 @property 65 def project_uuid_source(self) -> ProjectUUIDSource: 66 """Obtain the source of the `project_uuid` used for telemetry. 67 68 Returns: 69 ProjectUUIDSource: The source of the `project_uuid` used for telemetry. 70 """ 71 # Ensure the `project_uuid` has been generated 72 self.project_uuid # noqa: WPS428 73 return self._project_uuid_source 74 75 @cached_property 76 def project_uuid(self) -> uuid.UUID: 77 """Obtain the `project_id` from the project config file. 78 79 If it is not found (e.g. first time run), generate a valid v4 UUID, and and store it in the 80 project config file. 81 82 Returns: 83 The project UUID. 84 """ 85 project_id_str = self.settings_service.get("project_id") 86 87 if project_id_str: 88 try: 89 # Project ID might already be a UUID 90 project_id = uuid.UUID(project_id_str) 91 except ValueError: 92 # If the project ID is not a UUID, then we hash it, and use the hash to make a UUID 93 project_id = uuid.UUID(hash_sha256(project_id_str)[::2]) 94 self._project_uuid_source = ProjectUUIDSource.derived 95 else: 96 self._project_uuid_source = ProjectUUIDSource.explicit 97 else: 98 project_id = uuid.uuid4() 99 self._project_uuid_source = ProjectUUIDSource.random 100 101 return project_id 102 [end of src/meltano/core/tracking/contexts/project.py] [start of src/meltano/core/tracking/schemas.py] 1 """Meltano Iglu schemas metadata & utilities.""" 2 3 from __future__ import annotations 4 5 from dataclasses import dataclass 6 7 DEFAULT_VENDOR = "com.meltano" 8 9 10 @dataclass 11 class IgluSchema: 12 """Dataclass to store the name, version, vendor, and URL for an Iglu schema.""" 13 14 name: str 15 version: str 16 vendor: str = DEFAULT_VENDOR 17 18 @property 19 def url(self) -> str: 20 """Construct an iglu schema URL. 21 22 Returns: 23 The URL to the schema. 24 """ 25 return f"iglu:{self.vendor}/{self.name}/jsonschema/{self.version}" 26 27 28 CliContextSchema = IgluSchema("cli_context", "1-1-0") 29 CliEventSchema = IgluSchema("cli_event", "1-0-1") 30 BlockEventSchema = IgluSchema("block_event", "1-0-0") 31 EnvironmentContextSchema = IgluSchema("environment_context", "1-0-0") 32 ExceptionContextSchema = IgluSchema("exception_context", "1-0-0") 33 ExitEventSchema = IgluSchema("exit_event", "1-0-0") 34 PluginsContextSchema = IgluSchema("plugins_context", "1-0-0") 35 ProjectContextSchema = IgluSchema("project_context", "1-0-0") 36 TelemetryStateChangeEventSchema = IgluSchema("telemetry_state_change_event", "1-0-0") 37 [end of src/meltano/core/tracking/schemas.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/meltano/core/tracking/contexts/project.py b/src/meltano/core/tracking/contexts/project.py --- a/src/meltano/core/tracking/contexts/project.py +++ b/src/meltano/core/tracking/contexts/project.py @@ -42,9 +42,10 @@ """ self.project = project self.settings_service = ProjectSettingsService(project) - self.send_anonymous_usage_stats = self.settings_service.get( - "send_anonymous_usage_stats", True - ) + ( + send_anonymous_usage_stats, + send_anonymous_usage_stats_metadata, + ) = self.settings_service.get_with_metadata("send_anonymous_usage_stats") super().__init__( ProjectContextSchema.url, @@ -58,6 +59,10 @@ if self.project.active_environment else None ), + "send_anonymous_usage_stats": send_anonymous_usage_stats, + "send_anonymous_usage_stats_source": ( + send_anonymous_usage_stats_metadata["source"].value + ), }, ) diff --git a/src/meltano/core/tracking/schemas.py b/src/meltano/core/tracking/schemas.py --- a/src/meltano/core/tracking/schemas.py +++ b/src/meltano/core/tracking/schemas.py @@ -32,5 +32,5 @@ ExceptionContextSchema = IgluSchema("exception_context", "1-0-0") ExitEventSchema = IgluSchema("exit_event", "1-0-0") PluginsContextSchema = IgluSchema("plugins_context", "1-0-0") -ProjectContextSchema = IgluSchema("project_context", "1-0-0") +ProjectContextSchema = IgluSchema("project_context", "1-1-0") TelemetryStateChangeEventSchema = IgluSchema("telemetry_state_change_event", "1-0-0")
{"golden_diff": "diff --git a/src/meltano/core/tracking/contexts/project.py b/src/meltano/core/tracking/contexts/project.py\n--- a/src/meltano/core/tracking/contexts/project.py\n+++ b/src/meltano/core/tracking/contexts/project.py\n@@ -42,9 +42,10 @@\n \"\"\"\n self.project = project\n self.settings_service = ProjectSettingsService(project)\n- self.send_anonymous_usage_stats = self.settings_service.get(\n- \"send_anonymous_usage_stats\", True\n- )\n+ (\n+ send_anonymous_usage_stats,\n+ send_anonymous_usage_stats_metadata,\n+ ) = self.settings_service.get_with_metadata(\"send_anonymous_usage_stats\")\n \n super().__init__(\n ProjectContextSchema.url,\n@@ -58,6 +59,10 @@\n if self.project.active_environment\n else None\n ),\n+ \"send_anonymous_usage_stats\": send_anonymous_usage_stats,\n+ \"send_anonymous_usage_stats_source\": (\n+ send_anonymous_usage_stats_metadata[\"source\"].value\n+ ),\n },\n )\n \ndiff --git a/src/meltano/core/tracking/schemas.py b/src/meltano/core/tracking/schemas.py\n--- a/src/meltano/core/tracking/schemas.py\n+++ b/src/meltano/core/tracking/schemas.py\n@@ -32,5 +32,5 @@\n ExceptionContextSchema = IgluSchema(\"exception_context\", \"1-0-0\")\n ExitEventSchema = IgluSchema(\"exit_event\", \"1-0-0\")\n PluginsContextSchema = IgluSchema(\"plugins_context\", \"1-0-0\")\n-ProjectContextSchema = IgluSchema(\"project_context\", \"1-0-0\")\n+ProjectContextSchema = IgluSchema(\"project_context\", \"1-1-0\")\n TelemetryStateChangeEventSchema = IgluSchema(\"telemetry_state_change_event\", \"1-0-0\")\n", "issue": "[Feature]: Collect telemetry data about how `send_anonymous_usage_stats` was configured\nThe project context (and its schema) should be updated to include the key `send_anonymous_usage_stats_source` with the value `ProjectSettingService.get_with_metadata('send_anonymous_usage_stats')[1]['source'].value`, which can be one of the following strings:\r\n- `auto`\r\n- `config_override`\r\n- `db`\r\n- `default`\r\n- `dotenv`\r\n- `env`\r\n- `inherited`\r\n- `meltano_env`\r\n- `meltano_yml`\r\n\r\nCC @pnadolny13 @aaronsteers \n", "before_files": [{"content": "\"\"\"Project context for the Snowplow tracker.\"\"\"\n\nfrom __future__ import annotations\n\nimport uuid\nfrom enum import Enum, auto\n\nfrom cached_property import cached_property\nfrom snowplow_tracker import SelfDescribingJson\nfrom structlog.stdlib import get_logger\n\nfrom meltano.core.project import Project\nfrom meltano.core.project_settings_service import ProjectSettingsService\nfrom meltano.core.tracking.schemas import ProjectContextSchema\nfrom meltano.core.utils import hash_sha256\n\nlogger = get_logger(__name__)\n\n\nclass ProjectUUIDSource(Enum):\n \"\"\"The source of the `project_uuid` used for telemetry.\"\"\"\n\n # The UUID was explicitly provided in the config as the `project_id`.\n explicit = auto()\n\n # The UUID was derived by hashing the `project_id` in the config.\n derived = auto()\n\n # The UUID was randomly generated (UUID v4) since no `project_id` was configured.\n random = auto()\n\n\nclass ProjectContext(SelfDescribingJson):\n \"\"\"Tracking context for the Meltano project.\"\"\"\n\n def __init__(self, project: Project, client_id: uuid.UUID):\n \"\"\"Initialize a meltano tracking \"project\" context.\n\n Args:\n project: The Meltano project.\n client_id: The client ID from `analytics.json`.\n \"\"\"\n self.project = project\n self.settings_service = ProjectSettingsService(project)\n self.send_anonymous_usage_stats = self.settings_service.get(\n \"send_anonymous_usage_stats\", True\n )\n\n super().__init__(\n ProjectContextSchema.url,\n {\n \"context_uuid\": str(uuid.uuid4()),\n \"project_uuid\": str(self.project_uuid),\n \"project_uuid_source\": self.project_uuid_source.name,\n \"client_uuid\": str(client_id),\n \"environment_name_hash\": (\n hash_sha256(self.project.active_environment.name)\n if self.project.active_environment\n else None\n ),\n },\n )\n\n @property\n def project_uuid_source(self) -> ProjectUUIDSource:\n \"\"\"Obtain the source of the `project_uuid` used for telemetry.\n\n Returns:\n ProjectUUIDSource: The source of the `project_uuid` used for telemetry.\n \"\"\"\n # Ensure the `project_uuid` has been generated\n self.project_uuid # noqa: WPS428\n return self._project_uuid_source\n\n @cached_property\n def project_uuid(self) -> uuid.UUID:\n \"\"\"Obtain the `project_id` from the project config file.\n\n If it is not found (e.g. first time run), generate a valid v4 UUID, and and store it in the\n project config file.\n\n Returns:\n The project UUID.\n \"\"\"\n project_id_str = self.settings_service.get(\"project_id\")\n\n if project_id_str:\n try:\n # Project ID might already be a UUID\n project_id = uuid.UUID(project_id_str)\n except ValueError:\n # If the project ID is not a UUID, then we hash it, and use the hash to make a UUID\n project_id = uuid.UUID(hash_sha256(project_id_str)[::2])\n self._project_uuid_source = ProjectUUIDSource.derived\n else:\n self._project_uuid_source = ProjectUUIDSource.explicit\n else:\n project_id = uuid.uuid4()\n self._project_uuid_source = ProjectUUIDSource.random\n\n return project_id\n", "path": "src/meltano/core/tracking/contexts/project.py"}, {"content": "\"\"\"Meltano Iglu schemas metadata & utilities.\"\"\"\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\nDEFAULT_VENDOR = \"com.meltano\"\n\n\n@dataclass\nclass IgluSchema:\n \"\"\"Dataclass to store the name, version, vendor, and URL for an Iglu schema.\"\"\"\n\n name: str\n version: str\n vendor: str = DEFAULT_VENDOR\n\n @property\n def url(self) -> str:\n \"\"\"Construct an iglu schema URL.\n\n Returns:\n The URL to the schema.\n \"\"\"\n return f\"iglu:{self.vendor}/{self.name}/jsonschema/{self.version}\"\n\n\nCliContextSchema = IgluSchema(\"cli_context\", \"1-1-0\")\nCliEventSchema = IgluSchema(\"cli_event\", \"1-0-1\")\nBlockEventSchema = IgluSchema(\"block_event\", \"1-0-0\")\nEnvironmentContextSchema = IgluSchema(\"environment_context\", \"1-0-0\")\nExceptionContextSchema = IgluSchema(\"exception_context\", \"1-0-0\")\nExitEventSchema = IgluSchema(\"exit_event\", \"1-0-0\")\nPluginsContextSchema = IgluSchema(\"plugins_context\", \"1-0-0\")\nProjectContextSchema = IgluSchema(\"project_context\", \"1-0-0\")\nTelemetryStateChangeEventSchema = IgluSchema(\"telemetry_state_change_event\", \"1-0-0\")\n", "path": "src/meltano/core/tracking/schemas.py"}]}
2,022
423
gh_patches_debug_28571
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-5219
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Poundland spider address parsing issue The addr:street_address field returned by the poundland.py spider is sometimes broken, giving results such as: `"addr:street_address": "5, 6, -, 5, 8, , T, a, f, f, , S, t, r, e, e, t"` The problem is caused by line 20 in the code: ` item["street_address"] = ", ".join(filter(None, store["address"].get("line")))` where is is assumed that "line" from the scraped JSON will be an array of values. But it is sometimes "line" is just a single string. When this happens, the string itself is split into individual characters, giving results like the one above. I guess that before applying that code we should test whether "line" is a single string. I don't think I know enough python to know the best way to fix this, and a quick Google suggests there may be a difference between Python 2 and Python 3 (which would make it difficult for me to test any solutions). </issue> <code> [start of locations/spiders/poundland.py] 1 import scrapy 2 3 from locations.dict_parser import DictParser 4 from locations.hours import OpeningHours 5 6 7 class PoundlandSpider(scrapy.Spider): 8 name = "poundland" 9 item_attributes = {"brand": "Poundland", "brand_wikidata": "Q1434528"} 10 start_urls = [ 11 "https://www.poundland.co.uk/rest/poundland/V1/locator/?searchCriteria[scope]=store-locator&searchCriteria[current_page]=1&searchCriteria[page_size]=10000" 12 ] 13 custom_settings = {"DEFAULT_REQUEST_HEADERS": {"Accept": "application/json"}} 14 15 def parse(self, response): 16 # We may have to handle pagination at some point 17 for store in response.json()["locations"]: 18 item = DictParser.parse(store) 19 20 item["street_address"] = ", ".join(filter(None, store["address"].get("line"))) 21 22 # "store_id" seems to be a better ref than "id" 23 item["ref"] = store.get("store_id") 24 item["website"] = "https://www.poundland.co.uk/store-finder/store_page/view/id/" + item["ref"] + "/" 25 26 oh = OpeningHours() 27 for rule in store["opening_hours"]: 28 if rule["hours"] == "Closed": 29 continue 30 open_time, close_time = rule["hours"].split(" - ") 31 oh.add_range(rule["day"][:2], open_time, close_time) 32 33 item["opening_hours"] = oh.as_opening_hours() 34 35 item["extras"] = {} 36 item["extras"]["atm"] = "yes" if store.get("atm") == "1" else "no" 37 item["extras"]["icestore"] = "yes" if store.get("icestore") == "1" else "no" 38 39 if store["is_pep_co_only"] == "1": 40 item["brand"] = "Pep&Co" 41 item["brand_wikidata"] = "Q24908166" 42 else: 43 if store.get("pepshopinshop") == "1": 44 # Pep and Poundland at this location 45 pep = item.copy() 46 47 pep["ref"] = pep["ref"] + "_pep" 48 49 pep["brand"] = "Pep&Co" 50 pep["brand_wikidata"] = "Q24908166" 51 52 pep["located_in"] = self.item_attributes["brand"] 53 pep["located_in_wikidata"] = self.item_attributes["brand_wikidata"] 54 55 yield pep 56 57 yield item 58 [end of locations/spiders/poundland.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/poundland.py b/locations/spiders/poundland.py --- a/locations/spiders/poundland.py +++ b/locations/spiders/poundland.py @@ -1,7 +1,9 @@ import scrapy +from locations.categories import Extras, apply_yes_no from locations.dict_parser import DictParser from locations.hours import OpeningHours +from locations.spiders.vapestore_gb import clean_address class PoundlandSpider(scrapy.Spider): @@ -17,7 +19,7 @@ for store in response.json()["locations"]: item = DictParser.parse(store) - item["street_address"] = ", ".join(filter(None, store["address"].get("line"))) + item["street_address"] = clean_address(store["address"].get("line")) # "store_id" seems to be a better ref than "id" item["ref"] = store.get("store_id") @@ -30,10 +32,9 @@ open_time, close_time = rule["hours"].split(" - ") oh.add_range(rule["day"][:2], open_time, close_time) - item["opening_hours"] = oh.as_opening_hours() + item["opening_hours"] = oh - item["extras"] = {} - item["extras"]["atm"] = "yes" if store.get("atm") == "1" else "no" + apply_yes_no(Extras.ATM, item, store.get("atm") == "1") item["extras"]["icestore"] = "yes" if store.get("icestore") == "1" else "no" if store["is_pep_co_only"] == "1":
{"golden_diff": "diff --git a/locations/spiders/poundland.py b/locations/spiders/poundland.py\n--- a/locations/spiders/poundland.py\n+++ b/locations/spiders/poundland.py\n@@ -1,7 +1,9 @@\n import scrapy\n \n+from locations.categories import Extras, apply_yes_no\n from locations.dict_parser import DictParser\n from locations.hours import OpeningHours\n+from locations.spiders.vapestore_gb import clean_address\n \n \n class PoundlandSpider(scrapy.Spider):\n@@ -17,7 +19,7 @@\n for store in response.json()[\"locations\"]:\n item = DictParser.parse(store)\n \n- item[\"street_address\"] = \", \".join(filter(None, store[\"address\"].get(\"line\")))\n+ item[\"street_address\"] = clean_address(store[\"address\"].get(\"line\"))\n \n # \"store_id\" seems to be a better ref than \"id\"\n item[\"ref\"] = store.get(\"store_id\")\n@@ -30,10 +32,9 @@\n open_time, close_time = rule[\"hours\"].split(\" - \")\n oh.add_range(rule[\"day\"][:2], open_time, close_time)\n \n- item[\"opening_hours\"] = oh.as_opening_hours()\n+ item[\"opening_hours\"] = oh\n \n- item[\"extras\"] = {}\n- item[\"extras\"][\"atm\"] = \"yes\" if store.get(\"atm\") == \"1\" else \"no\"\n+ apply_yes_no(Extras.ATM, item, store.get(\"atm\") == \"1\")\n item[\"extras\"][\"icestore\"] = \"yes\" if store.get(\"icestore\") == \"1\" else \"no\"\n \n if store[\"is_pep_co_only\"] == \"1\":\n", "issue": "Poundland spider address parsing issue\nThe addr:street_address field returned by the poundland.py spider is sometimes broken, giving results such as:\r\n`\"addr:street_address\": \"5, 6, -, 5, 8, , T, a, f, f, , S, t, r, e, e, t\"`\r\nThe problem is caused by line 20 in the code:\r\n` item[\"street_address\"] = \", \".join(filter(None, store[\"address\"].get(\"line\")))`\r\nwhere is is assumed that \"line\" from the scraped JSON will be an array of values. But it is sometimes \"line\" is just a single string. When this happens, the string itself is split into individual characters, giving results like the one above.\r\n\r\nI guess that before applying that code we should test whether \"line\" is a single string. I don't think I know enough python to know the best way to fix this, and a quick Google suggests there may be a difference between Python 2 and Python 3 (which would make it difficult for me to test any solutions).\n", "before_files": [{"content": "import scrapy\n\nfrom locations.dict_parser import DictParser\nfrom locations.hours import OpeningHours\n\n\nclass PoundlandSpider(scrapy.Spider):\n name = \"poundland\"\n item_attributes = {\"brand\": \"Poundland\", \"brand_wikidata\": \"Q1434528\"}\n start_urls = [\n \"https://www.poundland.co.uk/rest/poundland/V1/locator/?searchCriteria[scope]=store-locator&searchCriteria[current_page]=1&searchCriteria[page_size]=10000\"\n ]\n custom_settings = {\"DEFAULT_REQUEST_HEADERS\": {\"Accept\": \"application/json\"}}\n\n def parse(self, response):\n # We may have to handle pagination at some point\n for store in response.json()[\"locations\"]:\n item = DictParser.parse(store)\n\n item[\"street_address\"] = \", \".join(filter(None, store[\"address\"].get(\"line\")))\n\n # \"store_id\" seems to be a better ref than \"id\"\n item[\"ref\"] = store.get(\"store_id\")\n item[\"website\"] = \"https://www.poundland.co.uk/store-finder/store_page/view/id/\" + item[\"ref\"] + \"/\"\n\n oh = OpeningHours()\n for rule in store[\"opening_hours\"]:\n if rule[\"hours\"] == \"Closed\":\n continue\n open_time, close_time = rule[\"hours\"].split(\" - \")\n oh.add_range(rule[\"day\"][:2], open_time, close_time)\n\n item[\"opening_hours\"] = oh.as_opening_hours()\n\n item[\"extras\"] = {}\n item[\"extras\"][\"atm\"] = \"yes\" if store.get(\"atm\") == \"1\" else \"no\"\n item[\"extras\"][\"icestore\"] = \"yes\" if store.get(\"icestore\") == \"1\" else \"no\"\n\n if store[\"is_pep_co_only\"] == \"1\":\n item[\"brand\"] = \"Pep&Co\"\n item[\"brand_wikidata\"] = \"Q24908166\"\n else:\n if store.get(\"pepshopinshop\") == \"1\":\n # Pep and Poundland at this location\n pep = item.copy()\n\n pep[\"ref\"] = pep[\"ref\"] + \"_pep\"\n\n pep[\"brand\"] = \"Pep&Co\"\n pep[\"brand_wikidata\"] = \"Q24908166\"\n\n pep[\"located_in\"] = self.item_attributes[\"brand\"]\n pep[\"located_in_wikidata\"] = self.item_attributes[\"brand_wikidata\"]\n\n yield pep\n\n yield item\n", "path": "locations/spiders/poundland.py"}]}
1,443
379
gh_patches_debug_30382
rasdani/github-patches
git_diff
pytorch__audio-3
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Need API for saving to file. Currently we only have a load function. But after training the network it would be great if we can save the generated tensor to a file. @soumith I think we can reuse a lot of code from this repo https://github.com/MattVitelli/GRUV </issue> <code> [start of torchaudio/__init__.py] 1 import torch 2 3 from cffi import FFI 4 ffi = FFI() 5 from ._ext import th_sox 6 7 def load(filename, out=None): 8 if out is not None: 9 assert torch.is_tensor(out) 10 assert not out.is_cuda 11 else: 12 out = torch.FloatTensor() 13 14 if isinstance(out, torch.FloatTensor): 15 func = th_sox.libthsox_Float_read_audio_file 16 elif isinstance(out, torch.DoubleTensor): 17 func = th_sox.libthsox_Double_read_audio_file 18 elif isinstance(out, torch.ByteTensor): 19 func = th_sox.libthsox_Byte_read_audio_file 20 elif isinstance(out, torch.CharTensor): 21 func = th_sox.libthsox_Char_read_audio_file 22 elif isinstance(out, torch.ShortTensor): 23 func = th_sox.libthsox_Short_read_audio_file 24 elif isinstance(out, torch.IntTensor): 25 func = th_sox.libthsox_Int_read_audio_file 26 elif isinstance(out, torch.LongTensor): 27 func = th_sox.libthsox_Long_read_audio_file 28 29 sample_rate_p = ffi.new('int*') 30 func(bytes(filename), out, sample_rate_p) 31 sample_rate = sample_rate_p[0] 32 return out, sample_rate 33 [end of torchaudio/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchaudio/__init__.py b/torchaudio/__init__.py --- a/torchaudio/__init__.py +++ b/torchaudio/__init__.py @@ -1,32 +1,40 @@ +import os + import torch from cffi import FFI + ffi = FFI() from ._ext import th_sox + +def check_input(src): + if not torch.is_tensor(src): + raise TypeError('Expected a tensor, got %s' % type(src)) + if not src.__module__ == 'torch': + raise TypeError('Expected a CPU based tensor, got %s' % type(src)) + + def load(filename, out=None): if out is not None: - assert torch.is_tensor(out) - assert not out.is_cuda + check_input(out) else: out = torch.FloatTensor() - - if isinstance(out, torch.FloatTensor): - func = th_sox.libthsox_Float_read_audio_file - elif isinstance(out, torch.DoubleTensor): - func = th_sox.libthsox_Double_read_audio_file - elif isinstance(out, torch.ByteTensor): - func = th_sox.libthsox_Byte_read_audio_file - elif isinstance(out, torch.CharTensor): - func = th_sox.libthsox_Char_read_audio_file - elif isinstance(out, torch.ShortTensor): - func = th_sox.libthsox_Short_read_audio_file - elif isinstance(out, torch.IntTensor): - func = th_sox.libthsox_Int_read_audio_file - elif isinstance(out, torch.LongTensor): - func = th_sox.libthsox_Long_read_audio_file - - sample_rate_p = ffi.new('int*') + typename = type(out).__name__.replace('Tensor', '') + func = getattr(th_sox, 'libthsox_{}_read_audio_file'.format(typename)) + sample_rate_p = ffi.new('int*') func(bytes(filename), out, sample_rate_p) sample_rate = sample_rate_p[0] return out, sample_rate + + +def save(filepath, src, sample_rate): + filename, extension = os.path.splitext(filepath) + if type(sample_rate) != int: + raise TypeError('Sample rate should be a integer') + + check_input(src) + typename = type(src).__name__.replace('Tensor', '') + func = getattr(th_sox, 'libthsox_{}_write_audio_file'.format(typename)) + + func(bytes(filepath), src, extension[1:], sample_rate)
{"golden_diff": "diff --git a/torchaudio/__init__.py b/torchaudio/__init__.py\n--- a/torchaudio/__init__.py\n+++ b/torchaudio/__init__.py\n@@ -1,32 +1,40 @@\n+import os\n+\n import torch\n \n from cffi import FFI\n+\n ffi = FFI()\n from ._ext import th_sox\n \n+\n+def check_input(src):\n+ if not torch.is_tensor(src):\n+ raise TypeError('Expected a tensor, got %s' % type(src))\n+ if not src.__module__ == 'torch':\n+ raise TypeError('Expected a CPU based tensor, got %s' % type(src))\n+\n+\n def load(filename, out=None):\n if out is not None:\n- assert torch.is_tensor(out)\n- assert not out.is_cuda\n+ check_input(out)\n else:\n out = torch.FloatTensor()\n-\n- if isinstance(out, torch.FloatTensor):\n- func = th_sox.libthsox_Float_read_audio_file\n- elif isinstance(out, torch.DoubleTensor):\n- func = th_sox.libthsox_Double_read_audio_file\n- elif isinstance(out, torch.ByteTensor):\n- func = th_sox.libthsox_Byte_read_audio_file\n- elif isinstance(out, torch.CharTensor):\n- func = th_sox.libthsox_Char_read_audio_file\n- elif isinstance(out, torch.ShortTensor):\n- func = th_sox.libthsox_Short_read_audio_file\n- elif isinstance(out, torch.IntTensor):\n- func = th_sox.libthsox_Int_read_audio_file\n- elif isinstance(out, torch.LongTensor):\n- func = th_sox.libthsox_Long_read_audio_file\n- \n- sample_rate_p = ffi.new('int*') \n+ typename = type(out).__name__.replace('Tensor', '')\n+ func = getattr(th_sox, 'libthsox_{}_read_audio_file'.format(typename))\n+ sample_rate_p = ffi.new('int*')\n func(bytes(filename), out, sample_rate_p)\n sample_rate = sample_rate_p[0]\n return out, sample_rate\n+\n+\n+def save(filepath, src, sample_rate):\n+ filename, extension = os.path.splitext(filepath)\n+ if type(sample_rate) != int:\n+ raise TypeError('Sample rate should be a integer')\n+\n+ check_input(src)\n+ typename = type(src).__name__.replace('Tensor', '')\n+ func = getattr(th_sox, 'libthsox_{}_write_audio_file'.format(typename))\n+\n+ func(bytes(filepath), src, extension[1:], sample_rate)\n", "issue": "Need API for saving to file.\nCurrently we only have a load function. But after training the network it would be great if we can save the generated tensor to a file.\r\n\r\n@soumith I think we can reuse a lot of code from this repo https://github.com/MattVitelli/GRUV\n", "before_files": [{"content": "import torch\n\nfrom cffi import FFI\nffi = FFI()\nfrom ._ext import th_sox\n\ndef load(filename, out=None):\n if out is not None:\n assert torch.is_tensor(out)\n assert not out.is_cuda\n else:\n out = torch.FloatTensor()\n\n if isinstance(out, torch.FloatTensor):\n func = th_sox.libthsox_Float_read_audio_file\n elif isinstance(out, torch.DoubleTensor):\n func = th_sox.libthsox_Double_read_audio_file\n elif isinstance(out, torch.ByteTensor):\n func = th_sox.libthsox_Byte_read_audio_file\n elif isinstance(out, torch.CharTensor):\n func = th_sox.libthsox_Char_read_audio_file\n elif isinstance(out, torch.ShortTensor):\n func = th_sox.libthsox_Short_read_audio_file\n elif isinstance(out, torch.IntTensor):\n func = th_sox.libthsox_Int_read_audio_file\n elif isinstance(out, torch.LongTensor):\n func = th_sox.libthsox_Long_read_audio_file\n \n sample_rate_p = ffi.new('int*') \n func(bytes(filename), out, sample_rate_p)\n sample_rate = sample_rate_p[0]\n return out, sample_rate\n", "path": "torchaudio/__init__.py"}]}
933
580
gh_patches_debug_24978
rasdani/github-patches
git_diff
chainer__chainer-310
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> split_axis.backward fails on incomplete gradients When there is a None in the grad_outputs, split_axis fails to backprop the incomplete gradients. </issue> <code> [start of chainer/functions/split_axis.py] 1 import collections 2 3 import numpy 4 5 from chainer import cuda 6 from chainer import function 7 from chainer.utils import type_check 8 9 10 _args = 'float* y, float* x, int cdimy, int cdimx, int rdim, int coffset' 11 _preamble = ''' 12 #define COPY(statement) \ 13 int l = i / (rdim * cdimy); \ 14 int c = i / rdim % cdimy + coffset; \ 15 int r = i % rdim; \ 16 int idx = r + rdim * (c + cdimx * l); \ 17 statement; 18 ''' 19 20 21 class SplitAxis(function.Function): 22 23 """Function that splits multiple arrays towards the specified axis.""" 24 25 def __init__(self, indices_or_sections, axis): 26 if not isinstance(indices_or_sections, (int, collections.Iterable)): 27 raise TypeError('indices_or_sections must be integer or 1-D array') 28 self.indices_or_sections = indices_or_sections 29 self.axis = axis 30 31 def check_type_forward(self, in_types): 32 type_check.expect(in_types.size() == 1) 33 type_check.expect(in_types[0].ndim >= self.axis) 34 35 if isinstance(self.indices_or_sections, collections.Iterable): 36 max_index = type_check.Variable( 37 self.indices_or_sections[-1], 'max_index') 38 type_check.expect(in_types[0].shape[self.axis] > max_index) 39 else: 40 sections = type_check.Variable( 41 self.indices_or_sections, 'sections') 42 type_check.expect(in_types[0].shape[self.axis] % sections == 0) 43 44 def forward_cpu(self, x): 45 if isinstance(self.indices_or_sections, collections.Iterable): 46 cdimx = x[0].shape[self.axis] 47 ind = list(self.indices_or_sections) 48 ind.append(cdimx) 49 prev_i = 0 50 for i in ind: 51 cdimy = max(0, min(i, cdimx) - prev_i) 52 if cdimy == 0: 53 raise ValueError('Not support if shape contains 0') 54 prev_i = i 55 return tuple(numpy.split(x[0], self.indices_or_sections, self.axis)) 56 57 def forward_gpu(self, x): 58 xshape = x[0].shape 59 self.cdimx = xshape[self.axis] 60 self.rdim = numpy.prod(xshape[self.axis + 1:], dtype=int) 61 62 if isinstance(self.indices_or_sections, collections.Iterable): 63 ind = list(self.indices_or_sections) 64 ind.append(self.cdimx) 65 else: 66 sec = self.indices_or_sections 67 if self.cdimx % sec: 68 raise ValueError( 69 'array split does not result in an equal division') 70 ind = numpy.arange(1, sec + 1) * (self.cdimx // sec) 71 ys = [] 72 kernel = cuda.elementwise( 73 _args, 'COPY(y[i] = x[idx])', 'split_fwd', preamble=_preamble) 74 prev_i = 0 75 for i in ind: 76 cdimy = max(0, min(i, self.cdimx) - prev_i) 77 s = list(xshape) 78 s[self.axis] = cdimy 79 y = cuda.empty(tuple(s), dtype=x[0].dtype) 80 if cdimy == 0: 81 raise ValueError('Not support if shape contains 0') 82 kernel(y, x[0], cdimy, self.cdimx, self.rdim, prev_i) 83 prev_i = i 84 ys.append(y) 85 return tuple(ys) 86 87 def backward_cpu(self, x, gys): 88 return numpy.concatenate(gys, axis=self.axis), 89 90 def backward_gpu(self, x, gys): 91 gx = cuda.empty_like(x[0]) 92 coffset = 0 93 kernel = cuda.elementwise( 94 _args, 'COPY(x[idx] = y[i])', 'split_bwd', preamble=_preamble) 95 for gy in gys: 96 cdimy = gy.shape[self.axis] 97 if cdimy != 0: 98 kernel(gy, gx, cdimy, self.cdimx, self.rdim, coffset) 99 coffset += cdimy 100 return gx, 101 102 103 def split_axis(x, indices_or_sections, axis): 104 """Splits given variables along an axis. 105 106 Args: 107 x (tuple of Variables): Variables to be split. 108 indices_or_sections (int or 1-D array): If this argument is an integer, 109 N, the array will be divided into N equal arrays along axis. 110 If it is a 1-D array of sorted integers, it 111 indicates the positions where the array is split. 112 axis (int): Axis that the input array is split along. 113 114 Returns: 115 ``tuple`` or ``Variable``: Tuple of :class:`~chainer.Variable` objects 116 if the number of outputs is more than 1 or 117 :class:`~chainer.Variable` otherwise. 118 119 .. note:: 120 This function raises ``ValueError`` if at least 121 one of the outputs is splitted to zero-size 122 (i.e. `axis`-th value of its shape is zero). 123 124 """ 125 return SplitAxis(indices_or_sections, axis)(x) 126 [end of chainer/functions/split_axis.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/functions/split_axis.py b/chainer/functions/split_axis.py --- a/chainer/functions/split_axis.py +++ b/chainer/functions/split_axis.py @@ -1,6 +1,7 @@ import collections import numpy +import six from chainer import cuda from chainer import function @@ -85,14 +86,25 @@ return tuple(ys) def backward_cpu(self, x, gys): - return numpy.concatenate(gys, axis=self.axis), + if any(gy is None for gy in gys): + gx = numpy.zeros_like(x[0]) + gxs = numpy.split(gx, self.indices_or_sections, self.axis) + for gxi, gy in six.moves.zip(gxs, gys): + if gy is None: + continue + gxi[:] = gy + return gx, + else: + return numpy.concatenate(gys, axis=self.axis), def backward_gpu(self, x, gys): - gx = cuda.empty_like(x[0]) + gx = cuda.zeros_like(x[0]) coffset = 0 kernel = cuda.elementwise( _args, 'COPY(x[idx] = y[i])', 'split_bwd', preamble=_preamble) for gy in gys: + if gy is None: + continue cdimy = gy.shape[self.axis] if cdimy != 0: kernel(gy, gx, cdimy, self.cdimx, self.rdim, coffset)
{"golden_diff": "diff --git a/chainer/functions/split_axis.py b/chainer/functions/split_axis.py\n--- a/chainer/functions/split_axis.py\n+++ b/chainer/functions/split_axis.py\n@@ -1,6 +1,7 @@\n import collections\n \n import numpy\n+import six\n \n from chainer import cuda\n from chainer import function\n@@ -85,14 +86,25 @@\n return tuple(ys)\n \n def backward_cpu(self, x, gys):\n- return numpy.concatenate(gys, axis=self.axis),\n+ if any(gy is None for gy in gys):\n+ gx = numpy.zeros_like(x[0])\n+ gxs = numpy.split(gx, self.indices_or_sections, self.axis)\n+ for gxi, gy in six.moves.zip(gxs, gys):\n+ if gy is None:\n+ continue\n+ gxi[:] = gy\n+ return gx,\n+ else:\n+ return numpy.concatenate(gys, axis=self.axis),\n \n def backward_gpu(self, x, gys):\n- gx = cuda.empty_like(x[0])\n+ gx = cuda.zeros_like(x[0])\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(x[idx] = y[i])', 'split_bwd', preamble=_preamble)\n for gy in gys:\n+ if gy is None:\n+ continue\n cdimy = gy.shape[self.axis]\n if cdimy != 0:\n kernel(gy, gx, cdimy, self.cdimx, self.rdim, coffset)\n", "issue": "split_axis.backward fails on incomplete gradients\nWhen there is a None in the grad_outputs, split_axis fails to backprop the incomplete gradients.\n\n", "before_files": [{"content": "import collections\n\nimport numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\n_args = 'float* y, float* x, int cdimy, int cdimx, int rdim, int coffset'\n_preamble = '''\n#define COPY(statement) \\\n int l = i / (rdim * cdimy); \\\n int c = i / rdim % cdimy + coffset; \\\n int r = i % rdim; \\\n int idx = r + rdim * (c + cdimx * l); \\\n statement;\n'''\n\n\nclass SplitAxis(function.Function):\n\n \"\"\"Function that splits multiple arrays towards the specified axis.\"\"\"\n\n def __init__(self, indices_or_sections, axis):\n if not isinstance(indices_or_sections, (int, collections.Iterable)):\n raise TypeError('indices_or_sections must be integer or 1-D array')\n self.indices_or_sections = indices_or_sections\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n type_check.expect(in_types[0].ndim >= self.axis)\n\n if isinstance(self.indices_or_sections, collections.Iterable):\n max_index = type_check.Variable(\n self.indices_or_sections[-1], 'max_index')\n type_check.expect(in_types[0].shape[self.axis] > max_index)\n else:\n sections = type_check.Variable(\n self.indices_or_sections, 'sections')\n type_check.expect(in_types[0].shape[self.axis] % sections == 0)\n\n def forward_cpu(self, x):\n if isinstance(self.indices_or_sections, collections.Iterable):\n cdimx = x[0].shape[self.axis]\n ind = list(self.indices_or_sections)\n ind.append(cdimx)\n prev_i = 0\n for i in ind:\n cdimy = max(0, min(i, cdimx) - prev_i)\n if cdimy == 0:\n raise ValueError('Not support if shape contains 0')\n prev_i = i\n return tuple(numpy.split(x[0], self.indices_or_sections, self.axis))\n\n def forward_gpu(self, x):\n xshape = x[0].shape\n self.cdimx = xshape[self.axis]\n self.rdim = numpy.prod(xshape[self.axis + 1:], dtype=int)\n\n if isinstance(self.indices_or_sections, collections.Iterable):\n ind = list(self.indices_or_sections)\n ind.append(self.cdimx)\n else:\n sec = self.indices_or_sections\n if self.cdimx % sec:\n raise ValueError(\n 'array split does not result in an equal division')\n ind = numpy.arange(1, sec + 1) * (self.cdimx // sec)\n ys = []\n kernel = cuda.elementwise(\n _args, 'COPY(y[i] = x[idx])', 'split_fwd', preamble=_preamble)\n prev_i = 0\n for i in ind:\n cdimy = max(0, min(i, self.cdimx) - prev_i)\n s = list(xshape)\n s[self.axis] = cdimy\n y = cuda.empty(tuple(s), dtype=x[0].dtype)\n if cdimy == 0:\n raise ValueError('Not support if shape contains 0')\n kernel(y, x[0], cdimy, self.cdimx, self.rdim, prev_i)\n prev_i = i\n ys.append(y)\n return tuple(ys)\n\n def backward_cpu(self, x, gys):\n return numpy.concatenate(gys, axis=self.axis),\n\n def backward_gpu(self, x, gys):\n gx = cuda.empty_like(x[0])\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(x[idx] = y[i])', 'split_bwd', preamble=_preamble)\n for gy in gys:\n cdimy = gy.shape[self.axis]\n if cdimy != 0:\n kernel(gy, gx, cdimy, self.cdimx, self.rdim, coffset)\n coffset += cdimy\n return gx,\n\n\ndef split_axis(x, indices_or_sections, axis):\n \"\"\"Splits given variables along an axis.\n\n Args:\n x (tuple of Variables): Variables to be split.\n indices_or_sections (int or 1-D array): If this argument is an integer,\n N, the array will be divided into N equal arrays along axis.\n If it is a 1-D array of sorted integers, it\n indicates the positions where the array is split.\n axis (int): Axis that the input array is split along.\n\n Returns:\n ``tuple`` or ``Variable``: Tuple of :class:`~chainer.Variable` objects\n if the number of outputs is more than 1 or\n :class:`~chainer.Variable` otherwise.\n\n .. note::\n This function raises ``ValueError`` if at least\n one of the outputs is splitted to zero-size\n (i.e. `axis`-th value of its shape is zero).\n\n \"\"\"\n return SplitAxis(indices_or_sections, axis)(x)\n", "path": "chainer/functions/split_axis.py"}]}
1,977
349
gh_patches_debug_61068
rasdani/github-patches
git_diff
Mailu__Mailu-719
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Alternatives useless after podop After updating to master to get all the up-to-date fixes it also moves postfix to use podop and it seems to no longer support receiving external mail from alternative domains 😢 Sending internal mail between alternatives works as expected but not with external mail, a "relay denied" message is shown in the logs and when checking the postfix podop views it looks like alternative is never mentioned. </issue> <code> [start of core/admin/mailu/internal/views/postfix.py] 1 from mailu import db, models 2 from mailu.internal import internal 3 4 import flask 5 6 7 @internal.route("/postfix/domain/<domain_name>") 8 def postfix_mailbox_domain(domain_name): 9 domain = models.Domain.query.get(domain_name) or flask.abort(404) 10 return flask.jsonify(domain.name) 11 12 13 @internal.route("/postfix/mailbox/<email>") 14 def postfix_mailbox_map(email): 15 user = models.User.query.get(email) or flask.abort(404) 16 return flask.jsonify(user.email) 17 18 19 @internal.route("/postfix/alias/<alias>") 20 def postfix_alias_map(alias): 21 localpart, domain = alias.split('@', 1) if '@' in alias else (None, alias) 22 alternative = models.Alternative.query.get(domain) 23 if alternative: 24 domain = alternative.domain_name 25 email = '{}@{}'.format(localpart, domain) 26 if localpart is None: 27 return flask.jsonify(domain) 28 else: 29 alias_obj = models.Alias.resolve(localpart, domain) 30 if alias_obj: 31 return flask.jsonify(",".join(alias_obj.destination)) 32 user_obj = models.User.query.get(email) 33 if user_obj: 34 return flask.jsonify(user_obj.destination) 35 return flask.abort(404) 36 37 38 @internal.route("/postfix/transport/<email>") 39 def postfix_transport(email): 40 localpart, domain = email.split('@', 1) if '@' in email else (None, email) 41 relay = models.Relay.query.get(domain) or flask.abort(404) 42 return flask.jsonify("smtp:[{}]".format(relay.smtp)) 43 44 45 @internal.route("/postfix/sender/<sender>") 46 def postfix_sender(sender): 47 """ Simply reject any sender that pretends to be from a local domain 48 """ 49 localpart, domain_name = sender.split('@', 1) if '@' in sender else (None, sender) 50 domain = models.Domain.query.get(domain_name) 51 alternative = models.Alternative.query.get(domain_name) 52 if domain or alternative: 53 return flask.jsonify("REJECT") 54 return flask.abort(404) 55 [end of core/admin/mailu/internal/views/postfix.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/admin/mailu/internal/views/postfix.py b/core/admin/mailu/internal/views/postfix.py --- a/core/admin/mailu/internal/views/postfix.py +++ b/core/admin/mailu/internal/views/postfix.py @@ -6,7 +6,9 @@ @internal.route("/postfix/domain/<domain_name>") def postfix_mailbox_domain(domain_name): - domain = models.Domain.query.get(domain_name) or flask.abort(404) + domain = models.Domain.query.get(domain_name) or \ + models.Alternative.query.get(domain_name) or \ + flask.abort(404) return flask.jsonify(domain.name)
{"golden_diff": "diff --git a/core/admin/mailu/internal/views/postfix.py b/core/admin/mailu/internal/views/postfix.py\n--- a/core/admin/mailu/internal/views/postfix.py\n+++ b/core/admin/mailu/internal/views/postfix.py\n@@ -6,7 +6,9 @@\n \n @internal.route(\"/postfix/domain/<domain_name>\")\n def postfix_mailbox_domain(domain_name):\n- domain = models.Domain.query.get(domain_name) or flask.abort(404)\n+ domain = models.Domain.query.get(domain_name) or \\\n+ models.Alternative.query.get(domain_name) or \\\n+ flask.abort(404)\n return flask.jsonify(domain.name)\n", "issue": "Alternatives useless after podop\nAfter updating to master to get all the up-to-date fixes it also moves postfix to use podop and it seems to no longer support receiving external mail from alternative domains \ud83d\ude22 \r\n\r\nSending internal mail between alternatives works as expected but not with external mail, a \"relay denied\" message is shown in the logs and when checking the postfix podop views it looks like alternative is never mentioned.\n", "before_files": [{"content": "from mailu import db, models\nfrom mailu.internal import internal\n\nimport flask\n\n\[email protected](\"/postfix/domain/<domain_name>\")\ndef postfix_mailbox_domain(domain_name):\n domain = models.Domain.query.get(domain_name) or flask.abort(404)\n return flask.jsonify(domain.name)\n\n\[email protected](\"/postfix/mailbox/<email>\")\ndef postfix_mailbox_map(email):\n user = models.User.query.get(email) or flask.abort(404)\n return flask.jsonify(user.email)\n\n\[email protected](\"/postfix/alias/<alias>\")\ndef postfix_alias_map(alias):\n localpart, domain = alias.split('@', 1) if '@' in alias else (None, alias)\n alternative = models.Alternative.query.get(domain)\n if alternative:\n domain = alternative.domain_name\n email = '{}@{}'.format(localpart, domain)\n if localpart is None:\n return flask.jsonify(domain)\n else:\n alias_obj = models.Alias.resolve(localpart, domain)\n if alias_obj:\n return flask.jsonify(\",\".join(alias_obj.destination))\n user_obj = models.User.query.get(email)\n if user_obj:\n return flask.jsonify(user_obj.destination)\n return flask.abort(404)\n\n\[email protected](\"/postfix/transport/<email>\")\ndef postfix_transport(email):\n localpart, domain = email.split('@', 1) if '@' in email else (None, email)\n relay = models.Relay.query.get(domain) or flask.abort(404)\n return flask.jsonify(\"smtp:[{}]\".format(relay.smtp))\n\n\[email protected](\"/postfix/sender/<sender>\")\ndef postfix_sender(sender):\n \"\"\" Simply reject any sender that pretends to be from a local domain\n \"\"\"\n localpart, domain_name = sender.split('@', 1) if '@' in sender else (None, sender)\n domain = models.Domain.query.get(domain_name)\n alternative = models.Alternative.query.get(domain_name)\n if domain or alternative:\n return flask.jsonify(\"REJECT\")\n return flask.abort(404)\n", "path": "core/admin/mailu/internal/views/postfix.py"}]}
1,179
141
gh_patches_debug_34169
rasdani/github-patches
git_diff
conan-io__conan-center-index-253
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [package] catch2/2.9.2: Expected CMake scripts to be included in the package ### Package and Environment Details (include every applicable attribute) * Package Name/Version: **catch2/2.9.2** I expected to have access to cmake scripts that are installed with Catch2. The helper scripts are set to be installed. https://github.com/conan-io/conan-center-index/blob/6a7ff72be4e6fa6362112459f7319f6e6e565a99/recipes/catch2/2.x.x/conanfile.py#L33 Then they are deleted during packaging. https://github.com/conan-io/conan-center-index/blob/6a7ff72be4e6fa6362112459f7319f6e6e565a99/recipes/catch2/2.x.x/conanfile.py#L51 Currently, I am using the older bincrafters package (catch2/2.5.0@bincrafters/stable) which still includes the CMake scripts. I would need to maintain my own conan package to use the newer version of Catch2. </issue> <code> [start of recipes/catch2/2.x.x/conanfile.py] 1 #!/usr/bin/env python 2 3 import os 4 5 from conans import ConanFile, CMake, tools 6 7 8 class ConanRecipe(ConanFile): 9 name = "catch2" 10 description = "A modern, C++-native, header-only, framework for unit-tests, TDD and BDD" 11 topics = ("conan", "catch2", "header-only", "unit-test", "tdd", "bdd") 12 homepage = "https://github.com/catchorg/Catch2" 13 url = "https://github.com/conan-io/conan-center-index" 14 license = "BSL-1.0" 15 16 settings = "os", "compiler", "build_type", "arch" 17 18 generators = "cmake" 19 20 _source_subfolder = "source_subfolder" 21 22 def source(self): 23 tools.get(**self.conan_data["sources"][self.version]) 24 extracted_dir = "Catch2-" + self.version 25 os.rename(extracted_dir, self._source_subfolder) 26 27 _build_subfolder = "build_subfolder" 28 29 def _configure_cmake(self): 30 cmake = CMake(self) 31 cmake.definitions["BUILD_TESTING"] = "OFF" 32 cmake.definitions["CATCH_INSTALL_DOCS"] = "OFF" 33 cmake.definitions["CATCH_INSTALL_HELPERS"] = "ON" 34 cmake.configure( 35 source_folder=self._source_subfolder, 36 build_folder=self._build_subfolder 37 ) 38 return cmake 39 40 def build(self): 41 cmake = self._configure_cmake() 42 cmake.build() 43 44 def package(self): 45 self.copy(pattern="LICENSE.txt", dst="licenses", 46 src=self._source_subfolder) 47 48 cmake = self._configure_cmake() 49 cmake.install() 50 51 tools.rmdir(os.path.join(self.package_folder, "lib", "cmake")) 52 tools.rmdir(os.path.join(self.package_folder, "share")) 53 54 def package_id(self): 55 self.info.header_only() 56 [end of recipes/catch2/2.x.x/conanfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/recipes/catch2/2.x.x/conanfile.py b/recipes/catch2/2.x.x/conanfile.py --- a/recipes/catch2/2.x.x/conanfile.py +++ b/recipes/catch2/2.x.x/conanfile.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - import os from conans import ConanFile, CMake, tools @@ -12,20 +10,16 @@ homepage = "https://github.com/catchorg/Catch2" url = "https://github.com/conan-io/conan-center-index" license = "BSL-1.0" - settings = "os", "compiler", "build_type", "arch" - generators = "cmake" - _source_subfolder = "source_subfolder" + _build_subfolder = "build_subfolder" def source(self): tools.get(**self.conan_data["sources"][self.version]) extracted_dir = "Catch2-" + self.version os.rename(extracted_dir, self._source_subfolder) - _build_subfolder = "build_subfolder" - def _configure_cmake(self): cmake = CMake(self) cmake.definitions["BUILD_TESTING"] = "OFF" @@ -42,14 +36,18 @@ cmake.build() def package(self): - self.copy(pattern="LICENSE.txt", dst="licenses", - src=self._source_subfolder) - + self.copy(pattern="LICENSE.txt", dst="licenses", src=self._source_subfolder) cmake = self._configure_cmake() cmake.install() - tools.rmdir(os.path.join(self.package_folder, "lib", "cmake")) tools.rmdir(os.path.join(self.package_folder, "share")) + for cmake_file in ["ParseAndAddCatchTests.cmake", "Catch.cmake"]: + self.copy(cmake_file, + src=os.path.join(self._source_subfolder, "contrib"), + dst=os.path.join("lib", "cmake", "Catch2")) def package_id(self): self.info.header_only() + + def package_info(self): + self.cpp_info.builddirs = [os.path.join("lib", "cmake", "Catch2")]
{"golden_diff": "diff --git a/recipes/catch2/2.x.x/conanfile.py b/recipes/catch2/2.x.x/conanfile.py\n--- a/recipes/catch2/2.x.x/conanfile.py\n+++ b/recipes/catch2/2.x.x/conanfile.py\n@@ -1,5 +1,3 @@\n-#!/usr/bin/env python\n-\n import os\n \n from conans import ConanFile, CMake, tools\n@@ -12,20 +10,16 @@\n homepage = \"https://github.com/catchorg/Catch2\"\n url = \"https://github.com/conan-io/conan-center-index\"\n license = \"BSL-1.0\"\n-\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n-\n generators = \"cmake\"\n-\n _source_subfolder = \"source_subfolder\"\n+ _build_subfolder = \"build_subfolder\"\n \n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"Catch2-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n \n- _build_subfolder = \"build_subfolder\"\n-\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions[\"BUILD_TESTING\"] = \"OFF\"\n@@ -42,14 +36,18 @@\n cmake.build()\n \n def package(self):\n- self.copy(pattern=\"LICENSE.txt\", dst=\"licenses\",\n- src=self._source_subfolder)\n-\n+ self.copy(pattern=\"LICENSE.txt\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n-\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n+ for cmake_file in [\"ParseAndAddCatchTests.cmake\", \"Catch.cmake\"]:\n+ self.copy(cmake_file,\n+ src=os.path.join(self._source_subfolder, \"contrib\"),\n+ dst=os.path.join(\"lib\", \"cmake\", \"Catch2\"))\n \n def package_id(self):\n self.info.header_only()\n+\n+ def package_info(self):\n+ self.cpp_info.builddirs = [os.path.join(\"lib\", \"cmake\", \"Catch2\")]\n", "issue": "[package] catch2/2.9.2: Expected CMake scripts to be included in the package \n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **catch2/2.9.2**\r\n\r\nI expected to have access to cmake scripts that are installed with Catch2.\r\n\r\nThe helper scripts are set to be installed.\r\n\r\nhttps://github.com/conan-io/conan-center-index/blob/6a7ff72be4e6fa6362112459f7319f6e6e565a99/recipes/catch2/2.x.x/conanfile.py#L33\r\n\r\nThen they are deleted during packaging.\r\n\r\nhttps://github.com/conan-io/conan-center-index/blob/6a7ff72be4e6fa6362112459f7319f6e6e565a99/recipes/catch2/2.x.x/conanfile.py#L51\r\n\r\nCurrently, I am using the older bincrafters package (catch2/2.5.0@bincrafters/stable) which still includes the CMake scripts. I would need to maintain my own conan package to use the newer version of Catch2.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\n\nfrom conans import ConanFile, CMake, tools\n\n\nclass ConanRecipe(ConanFile):\n name = \"catch2\"\n description = \"A modern, C++-native, header-only, framework for unit-tests, TDD and BDD\"\n topics = (\"conan\", \"catch2\", \"header-only\", \"unit-test\", \"tdd\", \"bdd\")\n homepage = \"https://github.com/catchorg/Catch2\"\n url = \"https://github.com/conan-io/conan-center-index\"\n license = \"BSL-1.0\"\n\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n\n generators = \"cmake\"\n\n _source_subfolder = \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"Catch2-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n _build_subfolder = \"build_subfolder\"\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions[\"BUILD_TESTING\"] = \"OFF\"\n cmake.definitions[\"CATCH_INSTALL_DOCS\"] = \"OFF\"\n cmake.definitions[\"CATCH_INSTALL_HELPERS\"] = \"ON\"\n cmake.configure(\n source_folder=self._source_subfolder,\n build_folder=self._build_subfolder\n )\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE.txt\", dst=\"licenses\",\n src=self._source_subfolder)\n\n cmake = self._configure_cmake()\n cmake.install()\n\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n\n def package_id(self):\n self.info.header_only()\n", "path": "recipes/catch2/2.x.x/conanfile.py"}]}
1,349
509
gh_patches_debug_5
rasdani/github-patches
git_diff
freedomofpress__securedrop-1117
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update kernel module blacklist During an installation last week, we encountered an issue with the kernel module blacklist. The install was using the new generation of Intel NUCs ([NUC5i5RYK](http://www.amazon.com/dp/B00SD9ISIQ) and [NUC5i5RYH](http://www.amazon.com/dp/B00SD9IS1S/)). Unlike the previous generation of NUCs, which did not include wireless networking hardware by default, the new generation includes wireless networking hardware for Wifi and Bluetooth on the motherboard. This means that Ubuntu running on the servers not only loaded the high-level kernel modules for wifi and bluetooth support (`iwlwifi` and `bluetooth`), it also loaded modules necessary for support on the specific (included) hardware: `iwlmvm` and `btusb`. When the `remove kernel modules` Ansible role ran, it failed with an error because it could not remove the top-level modules without removing their dependencies first. A quickfix to get this working on the new hardware was to change `disabled_kernel_modules` in `group_vars/securedrop.yml` from: ``` yml disabled_kernel_modules: - bluetooth - iwlwifi ``` to: ``` yml disabled_kernel_modules: - btusb - bluetooth - iwlmvm - iwlwifi ``` The order of the modules is important! We need to make sure the the dependencies are removed prior to the target modules that depend on them. This list is also likely specific to the new generation of Intel NUCs. If we want to support a wider variety of hardware, we may want to try being smart about removing kernel modules and their dependencies, e.g. something akin to this technique from [Stack Exchange](https://askubuntu.com/questions/317230/how-can-i-temporarily-disable-a-kernel-module). Finally, we need to make sure this updated module blacklist still works on the old hardware as well. </issue> <code> [start of securedrop/version.py] 1 __version__ = '0.3.4' 2 [end of securedrop/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/securedrop/version.py b/securedrop/version.py --- a/securedrop/version.py +++ b/securedrop/version.py @@ -1 +1 @@ -__version__ = '0.3.4' +__version__ = '0.3.5'
{"golden_diff": "diff --git a/securedrop/version.py b/securedrop/version.py\n--- a/securedrop/version.py\n+++ b/securedrop/version.py\n@@ -1 +1 @@\n-__version__ = '0.3.4'\n+__version__ = '0.3.5'\n", "issue": "Update kernel module blacklist\nDuring an installation last week, we encountered an issue with the kernel module blacklist. The install was using the new generation of Intel NUCs ([NUC5i5RYK](http://www.amazon.com/dp/B00SD9ISIQ) and [NUC5i5RYH](http://www.amazon.com/dp/B00SD9IS1S/)). Unlike the previous generation of NUCs, which did not include wireless networking hardware by default, the new generation includes wireless networking hardware for Wifi and Bluetooth on the motherboard.\n\nThis means that Ubuntu running on the servers not only loaded the high-level kernel modules for wifi and bluetooth support (`iwlwifi` and `bluetooth`), it also loaded modules necessary for support on the specific (included) hardware: `iwlmvm` and `btusb`. When the `remove kernel modules` Ansible role ran, it failed with an error because it could not remove the top-level modules without removing their dependencies first.\n\nA quickfix to get this working on the new hardware was to change `disabled_kernel_modules` in `group_vars/securedrop.yml` from:\n\n``` yml\ndisabled_kernel_modules:\n - bluetooth\n - iwlwifi\n```\n\nto:\n\n``` yml\ndisabled_kernel_modules:\n - btusb\n - bluetooth\n - iwlmvm\n - iwlwifi\n```\n\nThe order of the modules is important! We need to make sure the the dependencies are removed prior to the target modules that depend on them.\n\nThis list is also likely specific to the new generation of Intel NUCs. If we want to support a wider variety of hardware, we may want to try being smart about removing kernel modules and their dependencies, e.g. something akin to this technique from [Stack Exchange](https://askubuntu.com/questions/317230/how-can-i-temporarily-disable-a-kernel-module).\n\nFinally, we need to make sure this updated module blacklist still works on the old hardware as well.\n\n", "before_files": [{"content": "__version__ = '0.3.4'\n", "path": "securedrop/version.py"}]}
964
63
gh_patches_debug_6718
rasdani/github-patches
git_diff
getmoto__moto-556
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix S3 issues with botocore 1.3.29 botocore 1.3.29 breaks s3 in tests </issue> <code> [start of moto/__init__.py] 1 from __future__ import unicode_literals 2 import logging 3 logging.getLogger('boto').setLevel(logging.CRITICAL) 4 5 __title__ = 'moto' 6 __version__ = '0.4.22' 7 8 from .autoscaling import mock_autoscaling # flake8: noqa 9 from .awslambda import mock_lambda # flake8: noqa 10 from .cloudformation import mock_cloudformation # flake8: noqa 11 from .cloudwatch import mock_cloudwatch # flake8: noqa 12 from .datapipeline import mock_datapipeline # flake8: noqa 13 from .dynamodb import mock_dynamodb # flake8: noqa 14 from .dynamodb2 import mock_dynamodb2 # flake8: noqa 15 from .ec2 import mock_ec2 # flake8: noqa 16 from .ecs import mock_ecs # flake8: noqa 17 from .elb import mock_elb # flake8: noqa 18 from .emr import mock_emr # flake8: noqa 19 from .glacier import mock_glacier # flake8: noqa 20 from .iam import mock_iam # flake8: noqa 21 from .kinesis import mock_kinesis # flake8: noqa 22 from .kms import mock_kms # flake8: noqa 23 from .rds import mock_rds # flake8: noqa 24 from .rds2 import mock_rds2 # flake8: noqa 25 from .redshift import mock_redshift # flake8: noqa 26 from .s3 import mock_s3 # flake8: noqa 27 from .s3bucket_path import mock_s3bucket_path # flake8: noqa 28 from .ses import mock_ses # flake8: noqa 29 from .sns import mock_sns # flake8: noqa 30 from .sqs import mock_sqs # flake8: noqa 31 from .sts import mock_sts # flake8: noqa 32 from .route53 import mock_route53 # flake8: noqa 33 from .swf import mock_swf # flake8: noqa 34 [end of moto/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/moto/__init__.py b/moto/__init__.py --- a/moto/__init__.py +++ b/moto/__init__.py @@ -31,3 +31,13 @@ from .sts import mock_sts # flake8: noqa from .route53 import mock_route53 # flake8: noqa from .swf import mock_swf # flake8: noqa + + +try: + # Need to monkey-patch botocore requests back to underlying urllib3 classes + from botocore.awsrequest import HTTPSConnectionPool, HTTPConnectionPool, HTTPConnection, VerifiedHTTPSConnection +except ImportError: + pass +else: + HTTPSConnectionPool.ConnectionCls = VerifiedHTTPSConnection + HTTPConnectionPool.ConnectionCls = HTTPConnection
{"golden_diff": "diff --git a/moto/__init__.py b/moto/__init__.py\n--- a/moto/__init__.py\n+++ b/moto/__init__.py\n@@ -31,3 +31,13 @@\n from .sts import mock_sts # flake8: noqa\n from .route53 import mock_route53 # flake8: noqa\n from .swf import mock_swf # flake8: noqa\n+\n+\n+try:\n+ # Need to monkey-patch botocore requests back to underlying urllib3 classes\n+ from botocore.awsrequest import HTTPSConnectionPool, HTTPConnectionPool, HTTPConnection, VerifiedHTTPSConnection\n+except ImportError:\n+ pass\n+else:\n+ HTTPSConnectionPool.ConnectionCls = VerifiedHTTPSConnection\n+ HTTPConnectionPool.ConnectionCls = HTTPConnection\n", "issue": "Fix S3 issues with botocore 1.3.29\nbotocore 1.3.29 breaks s3 in tests\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nimport logging\nlogging.getLogger('boto').setLevel(logging.CRITICAL)\n\n__title__ = 'moto'\n__version__ = '0.4.22'\n\nfrom .autoscaling import mock_autoscaling # flake8: noqa\nfrom .awslambda import mock_lambda # flake8: noqa\nfrom .cloudformation import mock_cloudformation # flake8: noqa\nfrom .cloudwatch import mock_cloudwatch # flake8: noqa\nfrom .datapipeline import mock_datapipeline # flake8: noqa\nfrom .dynamodb import mock_dynamodb # flake8: noqa\nfrom .dynamodb2 import mock_dynamodb2 # flake8: noqa\nfrom .ec2 import mock_ec2 # flake8: noqa\nfrom .ecs import mock_ecs # flake8: noqa\nfrom .elb import mock_elb # flake8: noqa\nfrom .emr import mock_emr # flake8: noqa\nfrom .glacier import mock_glacier # flake8: noqa\nfrom .iam import mock_iam # flake8: noqa\nfrom .kinesis import mock_kinesis # flake8: noqa\nfrom .kms import mock_kms # flake8: noqa\nfrom .rds import mock_rds # flake8: noqa\nfrom .rds2 import mock_rds2 # flake8: noqa\nfrom .redshift import mock_redshift # flake8: noqa\nfrom .s3 import mock_s3 # flake8: noqa\nfrom .s3bucket_path import mock_s3bucket_path # flake8: noqa\nfrom .ses import mock_ses # flake8: noqa\nfrom .sns import mock_sns # flake8: noqa\nfrom .sqs import mock_sqs # flake8: noqa\nfrom .sts import mock_sts # flake8: noqa\nfrom .route53 import mock_route53 # flake8: noqa\nfrom .swf import mock_swf # flake8: noqa\n", "path": "moto/__init__.py"}]}
1,095
181
gh_patches_debug_4681
rasdani/github-patches
git_diff
awslabs__gluonts-1159
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Multiprocessing hangs when num_workers > len(dataset) ## Description I'm trying to serialize a predictor trained on multiple cores. When calling the `serialize` method nothing happens. Running the same code, but without specifying `num_workers`, it works as expected. ## To Reproduce ```python from pathlib import Path from typing import Optional from gluonts.dataset.multivariate_grouper import MultivariateGrouper from gluonts.dataset.common import TrainDatasets from gluonts.model.gpvar import GPVAREstimator from gluonts.dataset.repository.datasets import get_dataset from gluonts.mx.trainer import Trainer def load_multivariate_dataset(dataset_name: str, target_dim: Optional[int] = None): ds = get_dataset(dataset_name) if target_dim is None: target_dim = len(ds.train) grouper = MultivariateGrouper(max_target_dim=target_dim) meta = ds.metadata meta.feat_static_cat[0].cardinality = target_dim return (TrainDatasets( metadata=meta, train=grouper(ds.train), test=grouper(ds.test) ), target_dim) ds, target_dim = load_multivariate_dataset("exchange_rate") metadata = ds.metadata estimator = GPVAREstimator( prediction_length=metadata.prediction_length, freq=metadata.freq, target_dim=target_dim, trainer=Trainer( epochs=2, num_batches_per_epoch=10, batch_size=8, ), ) predictor = estimator.train(training_data=ds.train, num_workers=2) predictor.serialize(Path("/tmp")) ``` ## Error message or code output Nothing happens. ## Environment - Operating system: Mac OSX 10.15.7 - Python version: 3.6.12 - GluonTS version: 0.6.0 - MXNet version: 1.7.0post1 </issue> <code> [start of src/gluonts/itertools.py] 1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"). 4 # You may not use this file except in compliance with the License. 5 # A copy of the License is located at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # or in the "license" file accompanying this file. This file is distributed 10 # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 11 # express or implied. See the License for the specific language governing 12 # permissions and limitations under the License. 13 14 from typing import Iterable, Iterator, List, TypeVar 15 import itertools 16 import random 17 18 T = TypeVar("T") 19 20 21 def cyclic(it): 22 """Like `itertools.cycle`, but does not store the data.""" 23 24 while True: 25 yield from it 26 27 28 def batcher(iterable: Iterable[T], batch_size: int) -> Iterator[List[T]]: 29 """Groups elements from `iterable` into batches of size `batch_size`. 30 31 >>> list(batcher("ABCDEFG", 3)) 32 [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']] 33 34 Unlike the grouper proposed in the documentation of itertools, `batcher` 35 doesn't fill up missing values. 36 """ 37 it: Iterator[T] = iter(iterable) 38 39 def get_batch(): 40 return list(itertools.islice(it, batch_size)) 41 42 # has an empty list so that we have a 2D array for sure 43 return iter(get_batch, []) 44 45 46 class cached(Iterable): 47 """ 48 An iterable wrapper, which caches values in a list the first time it is iterated. 49 50 The primary use-case for this is to avoid re-computing the element of the sequence, 51 in case the inner iterable does it on demand. 52 53 This should be used to wrap deterministic iterables, i.e. iterables where the data 54 generation process is not random, and that yield the same elements when iterated 55 multiple times. 56 """ 57 58 def __init__(self, iterable: Iterable) -> None: 59 self.iterable = iterable 60 self.cache = None 61 62 def __iter__(self): 63 if self.cache is None: 64 self.cache = [] 65 for element in self.iterable: 66 yield element 67 self.cache.append(element) 68 else: 69 yield from self.cache 70 71 72 def pseudo_shuffled(iterator: Iterator, shuffle_buffer_length: int): 73 """ 74 An iterator that yields item from a given iterator in a pseudo-shuffled order. 75 """ 76 shuffle_buffer = [] 77 78 for element in iterator: 79 shuffle_buffer.append(element) 80 if len(shuffle_buffer) >= shuffle_buffer_length: 81 yield shuffle_buffer.pop(random.randrange(len(shuffle_buffer))) 82 83 while shuffle_buffer: 84 yield shuffle_buffer.pop(random.randrange(len(shuffle_buffer))) 85 [end of src/gluonts/itertools.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/gluonts/itertools.py b/src/gluonts/itertools.py --- a/src/gluonts/itertools.py +++ b/src/gluonts/itertools.py @@ -21,8 +21,13 @@ def cyclic(it): """Like `itertools.cycle`, but does not store the data.""" + at_least_one = False while True: - yield from it + for el in it: + at_least_one = True + yield el + if not at_least_one: + break def batcher(iterable: Iterable[T], batch_size: int) -> Iterator[List[T]]:
{"golden_diff": "diff --git a/src/gluonts/itertools.py b/src/gluonts/itertools.py\n--- a/src/gluonts/itertools.py\n+++ b/src/gluonts/itertools.py\n@@ -21,8 +21,13 @@\n def cyclic(it):\n \"\"\"Like `itertools.cycle`, but does not store the data.\"\"\"\n \n+ at_least_one = False\n while True:\n- yield from it\n+ for el in it:\n+ at_least_one = True\n+ yield el\n+ if not at_least_one:\n+ break\n \n \n def batcher(iterable: Iterable[T], batch_size: int) -> Iterator[List[T]]:\n", "issue": "Multiprocessing hangs when num_workers > len(dataset)\n## Description\r\nI'm trying to serialize a predictor trained on multiple cores. When calling the `serialize` method nothing happens.\r\nRunning the same code, but without specifying `num_workers`, it works as expected.\r\n\r\n## To Reproduce\r\n\r\n```python\r\nfrom pathlib import Path\r\nfrom typing import Optional\r\n\r\nfrom gluonts.dataset.multivariate_grouper import MultivariateGrouper\r\nfrom gluonts.dataset.common import TrainDatasets\r\nfrom gluonts.model.gpvar import GPVAREstimator\r\nfrom gluonts.dataset.repository.datasets import get_dataset\r\nfrom gluonts.mx.trainer import Trainer\r\n\r\n\r\ndef load_multivariate_dataset(dataset_name: str, target_dim: Optional[int] = None):\r\n ds = get_dataset(dataset_name)\r\n\r\n if target_dim is None:\r\n target_dim = len(ds.train)\r\n\r\n grouper = MultivariateGrouper(max_target_dim=target_dim)\r\n\r\n meta = ds.metadata\r\n meta.feat_static_cat[0].cardinality = target_dim\r\n\r\n return (TrainDatasets(\r\n metadata=meta,\r\n train=grouper(ds.train),\r\n test=grouper(ds.test)\r\n ), target_dim)\r\n\r\n\r\nds, target_dim = load_multivariate_dataset(\"exchange_rate\")\r\nmetadata = ds.metadata\r\n\r\nestimator = GPVAREstimator(\r\n prediction_length=metadata.prediction_length,\r\n freq=metadata.freq,\r\n target_dim=target_dim,\r\n trainer=Trainer(\r\n epochs=2,\r\n num_batches_per_epoch=10,\r\n batch_size=8,\r\n ),\r\n)\r\n\r\npredictor = estimator.train(training_data=ds.train, num_workers=2)\r\n\r\npredictor.serialize(Path(\"/tmp\"))\r\n\r\n```\r\n\r\n## Error message or code output\r\nNothing happens.\r\n\r\n\r\n## Environment\r\n- Operating system: Mac OSX 10.15.7\r\n- Python version: 3.6.12\r\n- GluonTS version: 0.6.0\r\n- MXNet version: 1.7.0post1\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import Iterable, Iterator, List, TypeVar\nimport itertools\nimport random\n\nT = TypeVar(\"T\")\n\n\ndef cyclic(it):\n \"\"\"Like `itertools.cycle`, but does not store the data.\"\"\"\n\n while True:\n yield from it\n\n\ndef batcher(iterable: Iterable[T], batch_size: int) -> Iterator[List[T]]:\n \"\"\"Groups elements from `iterable` into batches of size `batch_size`.\n\n >>> list(batcher(\"ABCDEFG\", 3))\n [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]\n\n Unlike the grouper proposed in the documentation of itertools, `batcher`\n doesn't fill up missing values.\n \"\"\"\n it: Iterator[T] = iter(iterable)\n\n def get_batch():\n return list(itertools.islice(it, batch_size))\n\n # has an empty list so that we have a 2D array for sure\n return iter(get_batch, [])\n\n\nclass cached(Iterable):\n \"\"\"\n An iterable wrapper, which caches values in a list the first time it is iterated.\n\n The primary use-case for this is to avoid re-computing the element of the sequence,\n in case the inner iterable does it on demand.\n\n This should be used to wrap deterministic iterables, i.e. iterables where the data\n generation process is not random, and that yield the same elements when iterated\n multiple times.\n \"\"\"\n\n def __init__(self, iterable: Iterable) -> None:\n self.iterable = iterable\n self.cache = None\n\n def __iter__(self):\n if self.cache is None:\n self.cache = []\n for element in self.iterable:\n yield element\n self.cache.append(element)\n else:\n yield from self.cache\n\n\ndef pseudo_shuffled(iterator: Iterator, shuffle_buffer_length: int):\n \"\"\"\n An iterator that yields item from a given iterator in a pseudo-shuffled order.\n \"\"\"\n shuffle_buffer = []\n\n for element in iterator:\n shuffle_buffer.append(element)\n if len(shuffle_buffer) >= shuffle_buffer_length:\n yield shuffle_buffer.pop(random.randrange(len(shuffle_buffer)))\n\n while shuffle_buffer:\n yield shuffle_buffer.pop(random.randrange(len(shuffle_buffer)))\n", "path": "src/gluonts/itertools.py"}]}
1,748
152
gh_patches_debug_149
rasdani/github-patches
git_diff
apache__tvm-6399
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `import tvm` now requires pytest With the merge of #6331, `import tvm` now requires pytest. I created this issue just to check whether this is something intentional or something that we want to fix. The chain from `import tvm` to `import pytest` happens due to the `from .import testing` on `python/tvm/__init__.py`. There is nothing actually done with that import. https://github.com/apache/incubator-tvm/blob/a4ebb16ed76bfea4ce4eed7be7ea73d4a01027e2/python/tvm/__init__.py#L53-L56 Within `python/tvm/testing.py` then there is the `import pytest`. I was thinking that we might want to remove these lines from `__init__.py`, so that we don't load `tvm.testing` and will only import it when required. I'm happy to submit a PR removing those lines, in case there is an understanding that it makes sense. cc @tqchen </issue> <code> [start of python/tvm/__init__.py] 1 # Licensed to the Apache Software Foundation (ASF) under one 2 # or more contributor license agreements. See the NOTICE file 3 # distributed with this work for additional information 4 # regarding copyright ownership. The ASF licenses this file 5 # to you under the Apache License, Version 2.0 (the 6 # "License"); you may not use this file except in compliance 7 # with the License. You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, 12 # software distributed under the License is distributed on an 13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 # KIND, either express or implied. See the License for the 15 # specific language governing permissions and limitations 16 # under the License. 17 # pylint: disable=redefined-builtin, wildcard-import 18 """TVM: Open Deep Learning Compiler Stack.""" 19 import multiprocessing 20 import sys 21 import traceback 22 23 # top-level alias 24 # tvm._ffi 25 from ._ffi.base import TVMError, __version__ 26 from ._ffi.runtime_ctypes import DataTypeCode, DataType 27 from ._ffi import register_object, register_func, register_extension, get_global_func 28 29 # top-level alias 30 # tvm.runtime 31 from .runtime.object import Object 32 from .runtime.ndarray import context, cpu, gpu, opencl, cl, vulkan, metal, mtl 33 from .runtime.ndarray import vpi, rocm, ext_dev, micro_dev, hexagon 34 from .runtime import ndarray as nd 35 36 # tvm.error 37 from . import error 38 39 # tvm.ir 40 from .ir import IRModule 41 from .ir import transform 42 from .ir import container 43 from . import ir 44 45 # tvm.tir 46 from . import tir 47 48 # tvm.target 49 from . import target 50 51 # tvm.te 52 from . import te 53 54 # tvm.testing 55 from . import testing 56 57 # tvm.driver 58 from .driver import build, lower 59 60 # tvm.parser 61 from . import parser 62 63 # tvm tir hybrid script 64 from . import hybrid 65 66 # others 67 from . import arith 68 69 # support infra 70 from . import support 71 72 # Contrib initializers 73 from .contrib import rocm as _rocm, nvcc as _nvcc, sdaccel as _sdaccel 74 75 76 def tvm_wrap_excepthook(exception_hook): 77 """Wrap given excepthook with TVM additional work.""" 78 79 def wrapper(exctype, value, trbk): 80 """Clean subprocesses when TVM is interrupted.""" 81 exception_hook(exctype, value, trbk) 82 if hasattr(multiprocessing, 'active_children'): 83 # pylint: disable=not-callable 84 for p in multiprocessing.active_children(): 85 p.terminate() 86 87 return wrapper 88 89 90 sys.excepthook = tvm_wrap_excepthook(sys.excepthook) 91 [end of python/tvm/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/tvm/__init__.py b/python/tvm/__init__.py --- a/python/tvm/__init__.py +++ b/python/tvm/__init__.py @@ -51,9 +51,6 @@ # tvm.te from . import te -# tvm.testing -from . import testing - # tvm.driver from .driver import build, lower
{"golden_diff": "diff --git a/python/tvm/__init__.py b/python/tvm/__init__.py\n--- a/python/tvm/__init__.py\n+++ b/python/tvm/__init__.py\n@@ -51,9 +51,6 @@\n # tvm.te\n from . import te\n \n-# tvm.testing\n-from . import testing\n-\n # tvm.driver\n from .driver import build, lower\n", "issue": "`import tvm` now requires pytest\nWith the merge of #6331, `import tvm` now requires pytest. I created this issue just to check whether this is something intentional or something that we want to fix.\r\n\r\nThe chain from `import tvm` to `import pytest` happens due to the `from .import testing` on `python/tvm/__init__.py`. There is nothing actually done with that import.\r\n\r\nhttps://github.com/apache/incubator-tvm/blob/a4ebb16ed76bfea4ce4eed7be7ea73d4a01027e2/python/tvm/__init__.py#L53-L56\r\n\r\nWithin `python/tvm/testing.py` then there is the `import pytest`. I was thinking that we might want to remove these lines from `__init__.py`, so that we don't load `tvm.testing` and will only import it when required. I'm happy to submit a PR removing those lines, in case there is an understanding that it makes sense.\r\n\r\ncc @tqchen \n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=redefined-builtin, wildcard-import\n\"\"\"TVM: Open Deep Learning Compiler Stack.\"\"\"\nimport multiprocessing\nimport sys\nimport traceback\n\n# top-level alias\n# tvm._ffi\nfrom ._ffi.base import TVMError, __version__\nfrom ._ffi.runtime_ctypes import DataTypeCode, DataType\nfrom ._ffi import register_object, register_func, register_extension, get_global_func\n\n# top-level alias\n# tvm.runtime\nfrom .runtime.object import Object\nfrom .runtime.ndarray import context, cpu, gpu, opencl, cl, vulkan, metal, mtl\nfrom .runtime.ndarray import vpi, rocm, ext_dev, micro_dev, hexagon\nfrom .runtime import ndarray as nd\n\n# tvm.error\nfrom . import error\n\n# tvm.ir\nfrom .ir import IRModule\nfrom .ir import transform\nfrom .ir import container\nfrom . import ir\n\n# tvm.tir\nfrom . import tir\n\n# tvm.target\nfrom . import target\n\n# tvm.te\nfrom . import te\n\n# tvm.testing\nfrom . import testing\n\n# tvm.driver\nfrom .driver import build, lower\n\n# tvm.parser\nfrom . import parser\n\n# tvm tir hybrid script\nfrom . import hybrid\n\n# others\nfrom . import arith\n\n# support infra\nfrom . import support\n\n# Contrib initializers\nfrom .contrib import rocm as _rocm, nvcc as _nvcc, sdaccel as _sdaccel\n\n\ndef tvm_wrap_excepthook(exception_hook):\n \"\"\"Wrap given excepthook with TVM additional work.\"\"\"\n\n def wrapper(exctype, value, trbk):\n \"\"\"Clean subprocesses when TVM is interrupted.\"\"\"\n exception_hook(exctype, value, trbk)\n if hasattr(multiprocessing, 'active_children'):\n # pylint: disable=not-callable\n for p in multiprocessing.active_children():\n p.terminate()\n\n return wrapper\n\n\nsys.excepthook = tvm_wrap_excepthook(sys.excepthook)\n", "path": "python/tvm/__init__.py"}]}
1,562
88
gh_patches_debug_1023
rasdani/github-patches
git_diff
pyca__cryptography-4037
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug in HKDF? I think the computation of [`max_length`](https://github.com/pyca/cryptography/blob/66460d8f62b3f27a009bb61be6ce7675c8451b6e/src/cryptography/hazmat/primitives/kdf/hkdf.py#L70) in `src/cryptography/hazmat/primitives/kdf/hkdf.py` is wrong. [RFC5869](https://tools.ietf.org/html/rfc5869) states on page 3 that the input `L` of the HKDF-Expand function describes the "length of output keying material in octets (<= 255*HashLen)". An octet consists of 8 bit. Currently, `max_length` is computed as: ``` max_length = 255 * (algorithm.digest_size // 8) ``` The problem is, that `algorithm.digest_size` returns the size of the digest in bytes. (There are 8 bits per byte). Therefore, the division by 8 is wrong, and thus, `max_length` is unnecessarily small. (same applies for the computation of `salt` as well ([line 33](https://github.com/pyca/cryptography/blob/66460d8f62b3f27a009bb61be6ce7675c8451b6e/src/cryptography/hazmat/primitives/kdf/hkdf.py#L33)), in the case where `salt is None`) </issue> <code> [start of src/cryptography/hazmat/primitives/kdf/hkdf.py] 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 from __future__ import absolute_import, division, print_function 6 7 import six 8 9 from cryptography import utils 10 from cryptography.exceptions import ( 11 AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons 12 ) 13 from cryptography.hazmat.backends.interfaces import HMACBackend 14 from cryptography.hazmat.primitives import constant_time, hmac 15 from cryptography.hazmat.primitives.kdf import KeyDerivationFunction 16 17 18 @utils.register_interface(KeyDerivationFunction) 19 class HKDF(object): 20 def __init__(self, algorithm, length, salt, info, backend): 21 if not isinstance(backend, HMACBackend): 22 raise UnsupportedAlgorithm( 23 "Backend object does not implement HMACBackend.", 24 _Reasons.BACKEND_MISSING_INTERFACE 25 ) 26 27 self._algorithm = algorithm 28 29 if not (salt is None or isinstance(salt, bytes)): 30 raise TypeError("salt must be bytes.") 31 32 if salt is None: 33 salt = b"\x00" * self._algorithm.digest_size 34 35 self._salt = salt 36 37 self._backend = backend 38 39 self._hkdf_expand = HKDFExpand(self._algorithm, length, info, backend) 40 41 def _extract(self, key_material): 42 h = hmac.HMAC(self._salt, self._algorithm, backend=self._backend) 43 h.update(key_material) 44 return h.finalize() 45 46 def derive(self, key_material): 47 if not isinstance(key_material, bytes): 48 raise TypeError("key_material must be bytes.") 49 50 return self._hkdf_expand.derive(self._extract(key_material)) 51 52 def verify(self, key_material, expected_key): 53 if not constant_time.bytes_eq(self.derive(key_material), expected_key): 54 raise InvalidKey 55 56 57 @utils.register_interface(KeyDerivationFunction) 58 class HKDFExpand(object): 59 def __init__(self, algorithm, length, info, backend): 60 if not isinstance(backend, HMACBackend): 61 raise UnsupportedAlgorithm( 62 "Backend object does not implement HMACBackend.", 63 _Reasons.BACKEND_MISSING_INTERFACE 64 ) 65 66 self._algorithm = algorithm 67 68 self._backend = backend 69 70 max_length = 255 * (algorithm.digest_size // 8) 71 72 if length > max_length: 73 raise ValueError( 74 "Can not derive keys larger than {0} octets.".format( 75 max_length 76 )) 77 78 self._length = length 79 80 if not (info is None or isinstance(info, bytes)): 81 raise TypeError("info must be bytes.") 82 83 if info is None: 84 info = b"" 85 86 self._info = info 87 88 self._used = False 89 90 def _expand(self, key_material): 91 output = [b""] 92 counter = 1 93 94 while self._algorithm.digest_size * (len(output) - 1) < self._length: 95 h = hmac.HMAC(key_material, self._algorithm, backend=self._backend) 96 h.update(output[-1]) 97 h.update(self._info) 98 h.update(six.int2byte(counter)) 99 output.append(h.finalize()) 100 counter += 1 101 102 return b"".join(output)[:self._length] 103 104 def derive(self, key_material): 105 if not isinstance(key_material, bytes): 106 raise TypeError("key_material must be bytes.") 107 108 if self._used: 109 raise AlreadyFinalized 110 111 self._used = True 112 return self._expand(key_material) 113 114 def verify(self, key_material, expected_key): 115 if not constant_time.bytes_eq(self.derive(key_material), expected_key): 116 raise InvalidKey 117 [end of src/cryptography/hazmat/primitives/kdf/hkdf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cryptography/hazmat/primitives/kdf/hkdf.py b/src/cryptography/hazmat/primitives/kdf/hkdf.py --- a/src/cryptography/hazmat/primitives/kdf/hkdf.py +++ b/src/cryptography/hazmat/primitives/kdf/hkdf.py @@ -67,7 +67,7 @@ self._backend = backend - max_length = 255 * (algorithm.digest_size // 8) + max_length = 255 * algorithm.digest_size if length > max_length: raise ValueError(
{"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/kdf/hkdf.py b/src/cryptography/hazmat/primitives/kdf/hkdf.py\n--- a/src/cryptography/hazmat/primitives/kdf/hkdf.py\n+++ b/src/cryptography/hazmat/primitives/kdf/hkdf.py\n@@ -67,7 +67,7 @@\n \n self._backend = backend\n \n- max_length = 255 * (algorithm.digest_size // 8)\n+ max_length = 255 * algorithm.digest_size\n \n if length > max_length:\n raise ValueError(\n", "issue": "Bug in HKDF?\nI think the computation of [`max_length`](https://github.com/pyca/cryptography/blob/66460d8f62b3f27a009bb61be6ce7675c8451b6e/src/cryptography/hazmat/primitives/kdf/hkdf.py#L70) in `src/cryptography/hazmat/primitives/kdf/hkdf.py` is wrong.\r\n\r\n[RFC5869](https://tools.ietf.org/html/rfc5869) states on page 3 that the input `L` of the HKDF-Expand function describes the \"length of output keying material in octets (<= 255*HashLen)\".\r\nAn octet consists of 8 bit. \r\n\r\nCurrently, `max_length` is computed as:\r\n\r\n```\r\nmax_length = 255 * (algorithm.digest_size // 8)\r\n```\r\n\r\nThe problem is, that `algorithm.digest_size` returns the size of the digest in bytes. (There are 8 bits per byte). Therefore, the division by 8 is wrong, and thus, `max_length` is unnecessarily small.\r\n\r\n(same applies for the computation of `salt` as well ([line 33](https://github.com/pyca/cryptography/blob/66460d8f62b3f27a009bb61be6ce7675c8451b6e/src/cryptography/hazmat/primitives/kdf/hkdf.py#L33)), in the case where `salt is None`)\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import HMACBackend\nfrom cryptography.hazmat.primitives import constant_time, hmac\nfrom cryptography.hazmat.primitives.kdf import KeyDerivationFunction\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDF(object):\n def __init__(self, algorithm, length, salt, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n if not (salt is None or isinstance(salt, bytes)):\n raise TypeError(\"salt must be bytes.\")\n\n if salt is None:\n salt = b\"\\x00\" * self._algorithm.digest_size\n\n self._salt = salt\n\n self._backend = backend\n\n self._hkdf_expand = HKDFExpand(self._algorithm, length, info, backend)\n\n def _extract(self, key_material):\n h = hmac.HMAC(self._salt, self._algorithm, backend=self._backend)\n h.update(key_material)\n return h.finalize()\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n return self._hkdf_expand.derive(self._extract(key_material))\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDFExpand(object):\n def __init__(self, algorithm, length, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n self._backend = backend\n\n max_length = 255 * (algorithm.digest_size // 8)\n\n if length > max_length:\n raise ValueError(\n \"Can not derive keys larger than {0} octets.\".format(\n max_length\n ))\n\n self._length = length\n\n if not (info is None or isinstance(info, bytes)):\n raise TypeError(\"info must be bytes.\")\n\n if info is None:\n info = b\"\"\n\n self._info = info\n\n self._used = False\n\n def _expand(self, key_material):\n output = [b\"\"]\n counter = 1\n\n while self._algorithm.digest_size * (len(output) - 1) < self._length:\n h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)\n h.update(output[-1])\n h.update(self._info)\n h.update(six.int2byte(counter))\n output.append(h.finalize())\n counter += 1\n\n return b\"\".join(output)[:self._length]\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n if self._used:\n raise AlreadyFinalized\n\n self._used = True\n return self._expand(key_material)\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n", "path": "src/cryptography/hazmat/primitives/kdf/hkdf.py"}]}
1,944
132
gh_patches_debug_18475
rasdani/github-patches
git_diff
getnikola__nikola-1957
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> handle include tag in mako templates Currently templates used via include tags are not considered dependencies. It's not hard. handle include tag in mako templates Currently templates used via include tags are not considered dependencies. It's not hard. </issue> <code> [start of nikola/plugins/template/mako.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright © 2012-2015 Roberto Alsina and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 """Mako template handler.""" 28 29 from __future__ import unicode_literals, print_function, absolute_import 30 import os 31 import shutil 32 import sys 33 import tempfile 34 35 from mako import util, lexer 36 from mako.lookup import TemplateLookup 37 from mako.template import Template 38 from markupsafe import Markup # It's ok, Mako requires it 39 40 from nikola.plugin_categories import TemplateSystem 41 from nikola.utils import makedirs, get_logger, STDERR_HANDLER 42 43 LOGGER = get_logger('mako', STDERR_HANDLER) 44 45 46 class MakoTemplates(TemplateSystem): 47 48 """Support for Mako templates.""" 49 50 name = "mako" 51 52 lookup = None 53 cache = {} 54 filters = {} 55 directories = [] 56 cache_dir = None 57 58 def get_deps(self, filename): 59 """Get dependencies for a template (internal function).""" 60 text = util.read_file(filename) 61 lex = lexer.Lexer(text=text, filename=filename) 62 lex.parse() 63 64 deps = [] 65 for n in lex.template.nodes: 66 keyword = getattr(n, 'keyword', None) 67 if keyword in ["inherit", "namespace"]: 68 deps.append(n.attributes['file']) 69 # TODO: include tags are not handled 70 return deps 71 72 def set_directories(self, directories, cache_folder): 73 """Create a new template lookup with set directories.""" 74 cache_dir = os.path.join(cache_folder, '.mako.tmp') 75 # Workaround for a Mako bug, Issue #825 76 if sys.version_info[0] == 2: 77 try: 78 os.path.abspath(cache_dir).decode('ascii') 79 except UnicodeEncodeError: 80 cache_dir = tempfile.mkdtemp() 81 LOGGER.warning('Because of a Mako bug, setting cache_dir to {0}'.format(cache_dir)) 82 if os.path.exists(cache_dir): 83 shutil.rmtree(cache_dir) 84 self.directories = directories 85 self.cache_dir = cache_dir 86 self.create_lookup() 87 88 def inject_directory(self, directory): 89 """Add a directory to the lookup and recreate it if it's not there yet.""" 90 if directory not in self.directories: 91 self.directories.append(directory) 92 self.create_lookup() 93 94 def create_lookup(self): 95 """Create a template lookup.""" 96 self.lookup = TemplateLookup( 97 directories=self.directories, 98 module_directory=self.cache_dir, 99 output_encoding='utf-8') 100 101 def set_site(self, site): 102 """Set the Nikola site.""" 103 self.site = site 104 self.filters.update(self.site.config['TEMPLATE_FILTERS']) 105 106 def render_template(self, template_name, output_name, context): 107 """Render the template into output_name using context.""" 108 context['striphtml'] = striphtml 109 template = self.lookup.get_template(template_name) 110 data = template.render_unicode(**context) 111 if output_name is not None: 112 makedirs(os.path.dirname(output_name)) 113 with open(output_name, 'w+') as output: 114 output.write(data) 115 return data 116 117 def render_template_to_string(self, template, context): 118 """Render template to a string using context.""" 119 context.update(self.filters) 120 return Template(template).render(**context) 121 122 def template_deps(self, template_name): 123 """Generate list of dependencies for a template.""" 124 # We can cache here because dependencies should 125 # not change between runs 126 if self.cache.get(template_name, None) is None: 127 template = self.lookup.get_template(template_name) 128 dep_filenames = self.get_deps(template.filename) 129 deps = [template.filename] 130 for fname in dep_filenames: 131 deps += self.template_deps(fname) 132 self.cache[template_name] = tuple(deps) 133 return list(self.cache[template_name]) 134 135 136 def striphtml(text): 137 """Strip HTML tags from text.""" 138 return Markup(text).striptags() 139 [end of nikola/plugins/template/mako.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nikola/plugins/template/mako.py b/nikola/plugins/template/mako.py --- a/nikola/plugins/template/mako.py +++ b/nikola/plugins/template/mako.py @@ -32,7 +32,7 @@ import sys import tempfile -from mako import util, lexer +from mako import util, lexer, parsetree from mako.lookup import TemplateLookup from mako.template import Template from markupsafe import Markup # It's ok, Mako requires it @@ -64,9 +64,8 @@ deps = [] for n in lex.template.nodes: keyword = getattr(n, 'keyword', None) - if keyword in ["inherit", "namespace"]: + if keyword in ["inherit", "namespace"] or isinstance(n, parsetree.IncludeTag): deps.append(n.attributes['file']) - # TODO: include tags are not handled return deps def set_directories(self, directories, cache_folder):
{"golden_diff": "diff --git a/nikola/plugins/template/mako.py b/nikola/plugins/template/mako.py\n--- a/nikola/plugins/template/mako.py\n+++ b/nikola/plugins/template/mako.py\n@@ -32,7 +32,7 @@\n import sys\n import tempfile\n \n-from mako import util, lexer\n+from mako import util, lexer, parsetree\n from mako.lookup import TemplateLookup\n from mako.template import Template\n from markupsafe import Markup # It's ok, Mako requires it\n@@ -64,9 +64,8 @@\n deps = []\n for n in lex.template.nodes:\n keyword = getattr(n, 'keyword', None)\n- if keyword in [\"inherit\", \"namespace\"]:\n+ if keyword in [\"inherit\", \"namespace\"] or isinstance(n, parsetree.IncludeTag):\n deps.append(n.attributes['file'])\n- # TODO: include tags are not handled\n return deps\n \n def set_directories(self, directories, cache_folder):\n", "issue": "handle include tag in mako templates\nCurrently templates used via include tags are not considered dependencies. It's not hard.\n\nhandle include tag in mako templates\nCurrently templates used via include tags are not considered dependencies. It's not hard.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Mako template handler.\"\"\"\n\nfrom __future__ import unicode_literals, print_function, absolute_import\nimport os\nimport shutil\nimport sys\nimport tempfile\n\nfrom mako import util, lexer\nfrom mako.lookup import TemplateLookup\nfrom mako.template import Template\nfrom markupsafe import Markup # It's ok, Mako requires it\n\nfrom nikola.plugin_categories import TemplateSystem\nfrom nikola.utils import makedirs, get_logger, STDERR_HANDLER\n\nLOGGER = get_logger('mako', STDERR_HANDLER)\n\n\nclass MakoTemplates(TemplateSystem):\n\n \"\"\"Support for Mako templates.\"\"\"\n\n name = \"mako\"\n\n lookup = None\n cache = {}\n filters = {}\n directories = []\n cache_dir = None\n\n def get_deps(self, filename):\n \"\"\"Get dependencies for a template (internal function).\"\"\"\n text = util.read_file(filename)\n lex = lexer.Lexer(text=text, filename=filename)\n lex.parse()\n\n deps = []\n for n in lex.template.nodes:\n keyword = getattr(n, 'keyword', None)\n if keyword in [\"inherit\", \"namespace\"]:\n deps.append(n.attributes['file'])\n # TODO: include tags are not handled\n return deps\n\n def set_directories(self, directories, cache_folder):\n \"\"\"Create a new template lookup with set directories.\"\"\"\n cache_dir = os.path.join(cache_folder, '.mako.tmp')\n # Workaround for a Mako bug, Issue #825\n if sys.version_info[0] == 2:\n try:\n os.path.abspath(cache_dir).decode('ascii')\n except UnicodeEncodeError:\n cache_dir = tempfile.mkdtemp()\n LOGGER.warning('Because of a Mako bug, setting cache_dir to {0}'.format(cache_dir))\n if os.path.exists(cache_dir):\n shutil.rmtree(cache_dir)\n self.directories = directories\n self.cache_dir = cache_dir\n self.create_lookup()\n\n def inject_directory(self, directory):\n \"\"\"Add a directory to the lookup and recreate it if it's not there yet.\"\"\"\n if directory not in self.directories:\n self.directories.append(directory)\n self.create_lookup()\n\n def create_lookup(self):\n \"\"\"Create a template lookup.\"\"\"\n self.lookup = TemplateLookup(\n directories=self.directories,\n module_directory=self.cache_dir,\n output_encoding='utf-8')\n\n def set_site(self, site):\n \"\"\"Set the Nikola site.\"\"\"\n self.site = site\n self.filters.update(self.site.config['TEMPLATE_FILTERS'])\n\n def render_template(self, template_name, output_name, context):\n \"\"\"Render the template into output_name using context.\"\"\"\n context['striphtml'] = striphtml\n template = self.lookup.get_template(template_name)\n data = template.render_unicode(**context)\n if output_name is not None:\n makedirs(os.path.dirname(output_name))\n with open(output_name, 'w+') as output:\n output.write(data)\n return data\n\n def render_template_to_string(self, template, context):\n \"\"\"Render template to a string using context.\"\"\"\n context.update(self.filters)\n return Template(template).render(**context)\n\n def template_deps(self, template_name):\n \"\"\"Generate list of dependencies for a template.\"\"\"\n # We can cache here because dependencies should\n # not change between runs\n if self.cache.get(template_name, None) is None:\n template = self.lookup.get_template(template_name)\n dep_filenames = self.get_deps(template.filename)\n deps = [template.filename]\n for fname in dep_filenames:\n deps += self.template_deps(fname)\n self.cache[template_name] = tuple(deps)\n return list(self.cache[template_name])\n\n\ndef striphtml(text):\n \"\"\"Strip HTML tags from text.\"\"\"\n return Markup(text).striptags()\n", "path": "nikola/plugins/template/mako.py"}]}
1,963
218
gh_patches_debug_339
rasdani/github-patches
git_diff
pyro-ppl__pyro-3164
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PyTorch 2.0 compatibility: Explicit PyTorch 1.x check causing issues with packages that depend on PyTorch / pyro (e.g. BoTorch) ### Issue Description The explicit check for PyTorch 1.x here (https://github.com/pyro-ppl/pyro/blob/dev/pyro/distributions/torch_patch.py#L10) is causing problems when another package has a dependency on PyTorch + Pyro, since PyTorch is now at 2.0. For example, it is causing BoTorch tests to fail here (https://github.com/pytorch/botorch/pull/1551). Could this check be removed to allow for PyTorch 2.0? ### Environment Mac OS 11.7.1 Python 3.10 PyTorch 2.0 Pyro 1.8.3 ### Code Snippet https://github.com/pytorch/botorch/actions/runs/3659534850/jobs/6185642011 </issue> <code> [start of pyro/distributions/torch_patch.py] 1 # Copyright (c) 2017-2019 Uber Technologies, Inc. 2 # SPDX-License-Identifier: Apache-2.0 3 4 import functools 5 import math 6 import weakref 7 8 import torch 9 10 assert torch.__version__.startswith("1.") 11 12 13 def patch_dependency(target, root_module=torch): 14 parts = target.split(".") 15 assert parts[0] == root_module.__name__ 16 module = root_module 17 for part in parts[1:-1]: 18 module = getattr(module, part) 19 name = parts[-1] 20 old_fn = getattr(module, name, None) 21 old_fn = getattr(old_fn, "_pyro_unpatched", old_fn) # ensure patching is idempotent 22 23 def decorator(new_fn): 24 try: 25 functools.update_wrapper(new_fn, old_fn) 26 except Exception: 27 for attr in functools.WRAPPER_ASSIGNMENTS: 28 if hasattr(old_fn, attr): 29 setattr(new_fn, attr, getattr(old_fn, attr)) 30 new_fn._pyro_unpatched = old_fn 31 setattr(module, name, new_fn) 32 return new_fn 33 34 return decorator 35 36 37 # TODO: Move upstream to allow for pickle serialization of transforms 38 @patch_dependency("torch.distributions.transforms.Transform.__getstate__") 39 def _Transform__getstate__(self): 40 attrs = {} 41 for k, v in self.__dict__.items(): 42 if isinstance(v, weakref.ref): 43 attrs[k] = None 44 else: 45 attrs[k] = v 46 return attrs 47 48 49 # TODO move upstream 50 @patch_dependency("torch.distributions.transforms.Transform.clear_cache") 51 def _Transform_clear_cache(self): 52 if self._cache_size == 1: 53 self._cached_x_y = None, None 54 55 56 # TODO move upstream 57 @patch_dependency("torch.distributions.TransformedDistribution.clear_cache") 58 def _TransformedDistribution_clear_cache(self): 59 for t in self.transforms: 60 t.clear_cache() 61 62 63 # TODO fix https://github.com/pytorch/pytorch/issues/48054 upstream 64 @patch_dependency("torch.distributions.HalfCauchy.log_prob") 65 def _HalfCauchy_logprob(self, value): 66 if self._validate_args: 67 self._validate_sample(value) 68 value = torch.as_tensor( 69 value, dtype=self.base_dist.scale.dtype, device=self.base_dist.scale.device 70 ) 71 log_prob = self.base_dist.log_prob(value) + math.log(2) 72 log_prob.masked_fill_(value.expand(log_prob.shape) < 0, -float("inf")) 73 return log_prob 74 75 76 # TODO fix batch_shape have an extra singleton dimension upstream 77 @patch_dependency("torch.distributions.constraints._PositiveDefinite.check") 78 def _PositiveDefinite_check(self, value): 79 matrix_shape = value.shape[-2:] 80 batch_shape = value.shape[:-2] 81 flattened_value = value.reshape((-1,) + matrix_shape) 82 return torch.stack( 83 [torch.linalg.eigvalsh(v)[:1] > 0.0 for v in flattened_value] 84 ).view(batch_shape) 85 86 87 @patch_dependency("torch.distributions.constraints._CorrCholesky.check") 88 def _CorrCholesky_check(self, value): 89 row_norm = torch.linalg.norm(value.detach(), dim=-1) 90 unit_row_norm = (row_norm - 1.0).abs().le(1e-4).all(dim=-1) 91 return torch.distributions.constraints.lower_cholesky.check(value) & unit_row_norm 92 93 94 # This adds a __call__ method to satisfy sphinx. 95 @patch_dependency("torch.distributions.utils.lazy_property.__call__") 96 def _lazy_property__call__(self): 97 raise NotImplementedError 98 99 100 __all__ = [] 101 [end of pyro/distributions/torch_patch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyro/distributions/torch_patch.py b/pyro/distributions/torch_patch.py --- a/pyro/distributions/torch_patch.py +++ b/pyro/distributions/torch_patch.py @@ -7,8 +7,6 @@ import torch -assert torch.__version__.startswith("1.") - def patch_dependency(target, root_module=torch): parts = target.split(".")
{"golden_diff": "diff --git a/pyro/distributions/torch_patch.py b/pyro/distributions/torch_patch.py\n--- a/pyro/distributions/torch_patch.py\n+++ b/pyro/distributions/torch_patch.py\n@@ -7,8 +7,6 @@\n \n import torch\n \n-assert torch.__version__.startswith(\"1.\")\n-\n \n def patch_dependency(target, root_module=torch):\n parts = target.split(\".\")\n", "issue": "PyTorch 2.0 compatibility: Explicit PyTorch 1.x check causing issues with packages that depend on PyTorch / pyro (e.g. BoTorch)\n### Issue Description\r\nThe explicit check for PyTorch 1.x here (https://github.com/pyro-ppl/pyro/blob/dev/pyro/distributions/torch_patch.py#L10) is causing problems when another package has a dependency on PyTorch + Pyro, since PyTorch is now at 2.0. For example, it is causing BoTorch tests to fail here (https://github.com/pytorch/botorch/pull/1551).\r\n\r\nCould this check be removed to allow for PyTorch 2.0?\r\n\r\n### Environment\r\nMac OS 11.7.1\r\nPython 3.10\r\nPyTorch 2.0\r\nPyro 1.8.3\r\n\r\n### Code Snippet\r\nhttps://github.com/pytorch/botorch/actions/runs/3659534850/jobs/6185642011\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport functools\nimport math\nimport weakref\n\nimport torch\n\nassert torch.__version__.startswith(\"1.\")\n\n\ndef patch_dependency(target, root_module=torch):\n parts = target.split(\".\")\n assert parts[0] == root_module.__name__\n module = root_module\n for part in parts[1:-1]:\n module = getattr(module, part)\n name = parts[-1]\n old_fn = getattr(module, name, None)\n old_fn = getattr(old_fn, \"_pyro_unpatched\", old_fn) # ensure patching is idempotent\n\n def decorator(new_fn):\n try:\n functools.update_wrapper(new_fn, old_fn)\n except Exception:\n for attr in functools.WRAPPER_ASSIGNMENTS:\n if hasattr(old_fn, attr):\n setattr(new_fn, attr, getattr(old_fn, attr))\n new_fn._pyro_unpatched = old_fn\n setattr(module, name, new_fn)\n return new_fn\n\n return decorator\n\n\n# TODO: Move upstream to allow for pickle serialization of transforms\n@patch_dependency(\"torch.distributions.transforms.Transform.__getstate__\")\ndef _Transform__getstate__(self):\n attrs = {}\n for k, v in self.__dict__.items():\n if isinstance(v, weakref.ref):\n attrs[k] = None\n else:\n attrs[k] = v\n return attrs\n\n\n# TODO move upstream\n@patch_dependency(\"torch.distributions.transforms.Transform.clear_cache\")\ndef _Transform_clear_cache(self):\n if self._cache_size == 1:\n self._cached_x_y = None, None\n\n\n# TODO move upstream\n@patch_dependency(\"torch.distributions.TransformedDistribution.clear_cache\")\ndef _TransformedDistribution_clear_cache(self):\n for t in self.transforms:\n t.clear_cache()\n\n\n# TODO fix https://github.com/pytorch/pytorch/issues/48054 upstream\n@patch_dependency(\"torch.distributions.HalfCauchy.log_prob\")\ndef _HalfCauchy_logprob(self, value):\n if self._validate_args:\n self._validate_sample(value)\n value = torch.as_tensor(\n value, dtype=self.base_dist.scale.dtype, device=self.base_dist.scale.device\n )\n log_prob = self.base_dist.log_prob(value) + math.log(2)\n log_prob.masked_fill_(value.expand(log_prob.shape) < 0, -float(\"inf\"))\n return log_prob\n\n\n# TODO fix batch_shape have an extra singleton dimension upstream\n@patch_dependency(\"torch.distributions.constraints._PositiveDefinite.check\")\ndef _PositiveDefinite_check(self, value):\n matrix_shape = value.shape[-2:]\n batch_shape = value.shape[:-2]\n flattened_value = value.reshape((-1,) + matrix_shape)\n return torch.stack(\n [torch.linalg.eigvalsh(v)[:1] > 0.0 for v in flattened_value]\n ).view(batch_shape)\n\n\n@patch_dependency(\"torch.distributions.constraints._CorrCholesky.check\")\ndef _CorrCholesky_check(self, value):\n row_norm = torch.linalg.norm(value.detach(), dim=-1)\n unit_row_norm = (row_norm - 1.0).abs().le(1e-4).all(dim=-1)\n return torch.distributions.constraints.lower_cholesky.check(value) & unit_row_norm\n\n\n# This adds a __call__ method to satisfy sphinx.\n@patch_dependency(\"torch.distributions.utils.lazy_property.__call__\")\ndef _lazy_property__call__(self):\n raise NotImplementedError\n\n\n__all__ = []\n", "path": "pyro/distributions/torch_patch.py"}]}
1,764
87
gh_patches_debug_20698
rasdani/github-patches
git_diff
freqtrade__freqtrade-5530
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> API Server under 2021.8 <!-- Have you searched for similar issues before posting it? Did you have a VERY good look at the [documentation](https://www.freqtrade.io/en/latest/) and are sure that the question is not explained there Please do not use the question template to report bugs or to request new features. --> ## Describe your environment * Operating system: Windows Server 2019 * Python Version: Miniconda 3 * CCXT version: 1.55.56_____ (`pip freeze | grep ccxt`) * Freqtrade Version: 2021.8 (`freqtrade -V` or `docker-compose run --rm freqtrade -V` for Freqtrade running in docker) ## Your question This might be a bug, I post it as question, since I am nor sure for 100%. (OS and Miniconda configuration works fine for Freqtrade since 2020.12) Trading works fine under Telegram with current version. With current version and activated API Server, the system remains idle and does not begin to trade. Play button in GUI is pushed. Even no trades visible in DB, opened in a SQlite explorer. API Server web GUI works excellent. (Trading with API Server works fine under 2021.7) *Ask the question you have not been able to find an answer in our [Documentation](https://www.freqtrade.io/en/latest/)* </issue> <code> [start of freqtrade/rpc/api_server/uvicorn_threaded.py] 1 import contextlib 2 import threading 3 import time 4 5 import uvicorn 6 7 8 class UvicornServer(uvicorn.Server): 9 """ 10 Multithreaded server - as found in https://github.com/encode/uvicorn/issues/742 11 12 Removed install_signal_handlers() override based on changes from this commit: 13 https://github.com/encode/uvicorn/commit/ce2ef45a9109df8eae038c0ec323eb63d644cbc6 14 15 Cannot rely on asyncio.get_event_loop() to create new event loop because of this check: 16 https://github.com/python/cpython/blob/4d7f11e05731f67fd2c07ec2972c6cb9861d52be/Lib/asyncio/events.py#L638 17 18 Fix by overriding run() and forcing creation of new event loop if uvloop is available 19 """ 20 21 def run(self, sockets=None): 22 import asyncio 23 24 """ 25 Parent implementation calls self.config.setup_event_loop(), 26 but we need to create uvloop event loop manually 27 """ 28 try: 29 import uvloop # noqa 30 except ImportError: # pragma: no cover 31 from uvicorn.loops.asyncio import asyncio_setup 32 asyncio_setup() 33 else: 34 asyncio.set_event_loop(uvloop.new_event_loop()) 35 try: 36 loop = asyncio.get_event_loop() 37 except RuntimeError: 38 # When running in a thread, we'll not have an eventloop yet. 39 loop = asyncio.new_event_loop() 40 loop.run_until_complete(self.serve(sockets=sockets)) 41 42 @contextlib.contextmanager 43 def run_in_thread(self): 44 self.thread = threading.Thread(target=self.run) 45 self.thread.start() 46 while not self.started: 47 time.sleep(1e-3) 48 49 def cleanup(self): 50 self.should_exit = True 51 self.thread.join() 52 [end of freqtrade/rpc/api_server/uvicorn_threaded.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/freqtrade/rpc/api_server/uvicorn_threaded.py b/freqtrade/rpc/api_server/uvicorn_threaded.py --- a/freqtrade/rpc/api_server/uvicorn_threaded.py +++ b/freqtrade/rpc/api_server/uvicorn_threaded.py @@ -5,6 +5,20 @@ import uvicorn +def asyncio_setup() -> None: # pragma: no cover + # Set eventloop for win32 setups + # Reverts a change done in uvicorn 0.15.0 - which now sets the eventloop + # via policy. + import sys + + if sys.version_info >= (3, 8) and sys.platform == "win32": + import asyncio + import selectors + selector = selectors.SelectSelector() + loop = asyncio.SelectorEventLoop(selector) + asyncio.set_event_loop(loop) + + class UvicornServer(uvicorn.Server): """ Multithreaded server - as found in https://github.com/encode/uvicorn/issues/742 @@ -28,7 +42,7 @@ try: import uvloop # noqa except ImportError: # pragma: no cover - from uvicorn.loops.asyncio import asyncio_setup + asyncio_setup() else: asyncio.set_event_loop(uvloop.new_event_loop())
{"golden_diff": "diff --git a/freqtrade/rpc/api_server/uvicorn_threaded.py b/freqtrade/rpc/api_server/uvicorn_threaded.py\n--- a/freqtrade/rpc/api_server/uvicorn_threaded.py\n+++ b/freqtrade/rpc/api_server/uvicorn_threaded.py\n@@ -5,6 +5,20 @@\n import uvicorn\n \n \n+def asyncio_setup() -> None: # pragma: no cover\n+ # Set eventloop for win32 setups\n+ # Reverts a change done in uvicorn 0.15.0 - which now sets the eventloop\n+ # via policy.\n+ import sys\n+\n+ if sys.version_info >= (3, 8) and sys.platform == \"win32\":\n+ import asyncio\n+ import selectors\n+ selector = selectors.SelectSelector()\n+ loop = asyncio.SelectorEventLoop(selector)\n+ asyncio.set_event_loop(loop)\n+\n+\n class UvicornServer(uvicorn.Server):\n \"\"\"\n Multithreaded server - as found in https://github.com/encode/uvicorn/issues/742\n@@ -28,7 +42,7 @@\n try:\n import uvloop # noqa\n except ImportError: # pragma: no cover\n- from uvicorn.loops.asyncio import asyncio_setup\n+\n asyncio_setup()\n else:\n asyncio.set_event_loop(uvloop.new_event_loop())\n", "issue": "API Server under 2021.8\n<!-- \r\nHave you searched for similar issues before posting it?\r\nDid you have a VERY good look at the [documentation](https://www.freqtrade.io/en/latest/) and are sure that the question is not explained there\r\n\r\nPlease do not use the question template to report bugs or to request new features.\r\n-->\r\n\r\n## Describe your environment\r\n\r\n * Operating system: Windows Server 2019\r\n * Python Version: Miniconda 3\r\n * CCXT version: 1.55.56_____ (`pip freeze | grep ccxt`)\r\n * Freqtrade Version: 2021.8 (`freqtrade -V` or `docker-compose run --rm freqtrade -V` for Freqtrade running in docker)\r\n \r\n## Your question\r\nThis might be a bug, I post it as question, since I am nor sure for 100%.\r\n\r\n(OS and Miniconda configuration works fine for Freqtrade since 2020.12)\r\nTrading works fine under Telegram with current version.\r\nWith current version and activated API Server, the system remains idle and does not begin to trade. Play button in GUI is pushed. Even no trades visible in DB, opened in a SQlite explorer.\r\nAPI Server web GUI works excellent.\r\n(Trading with API Server works fine under 2021.7)\r\n\r\n*Ask the question you have not been able to find an answer in our [Documentation](https://www.freqtrade.io/en/latest/)*\r\n\n", "before_files": [{"content": "import contextlib\nimport threading\nimport time\n\nimport uvicorn\n\n\nclass UvicornServer(uvicorn.Server):\n \"\"\"\n Multithreaded server - as found in https://github.com/encode/uvicorn/issues/742\n\n Removed install_signal_handlers() override based on changes from this commit:\n https://github.com/encode/uvicorn/commit/ce2ef45a9109df8eae038c0ec323eb63d644cbc6\n\n Cannot rely on asyncio.get_event_loop() to create new event loop because of this check:\n https://github.com/python/cpython/blob/4d7f11e05731f67fd2c07ec2972c6cb9861d52be/Lib/asyncio/events.py#L638\n\n Fix by overriding run() and forcing creation of new event loop if uvloop is available\n \"\"\"\n\n def run(self, sockets=None):\n import asyncio\n\n \"\"\"\n Parent implementation calls self.config.setup_event_loop(),\n but we need to create uvloop event loop manually\n \"\"\"\n try:\n import uvloop # noqa\n except ImportError: # pragma: no cover\n from uvicorn.loops.asyncio import asyncio_setup\n asyncio_setup()\n else:\n asyncio.set_event_loop(uvloop.new_event_loop())\n try:\n loop = asyncio.get_event_loop()\n except RuntimeError:\n # When running in a thread, we'll not have an eventloop yet.\n loop = asyncio.new_event_loop()\n loop.run_until_complete(self.serve(sockets=sockets))\n\n @contextlib.contextmanager\n def run_in_thread(self):\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n while not self.started:\n time.sleep(1e-3)\n\n def cleanup(self):\n self.should_exit = True\n self.thread.join()\n", "path": "freqtrade/rpc/api_server/uvicorn_threaded.py"}]}
1,390
311
gh_patches_debug_25617
rasdani/github-patches
git_diff
saleor__saleor-3169
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> User type should be able to return `created` and `lastLogin` fields There is no way to obtain information when a user was registered and when she/he logged last time. </issue> <code> [start of saleor/graphql/account/resolvers.py] 1 from django.db.models import Q 2 from i18naddress import get_validation_rules 3 4 from ...account import models 5 from ...core.utils import get_client_ip, get_country_by_ip 6 from ..utils import filter_by_query_param 7 from .types import AddressValidationData, ChoiceValue 8 9 USER_SEARCH_FIELDS = ( 10 'email', 'default_shipping_address__first_name', 11 'default_shipping_address__last_name', 'default_shipping_address__city', 12 'default_shipping_address__country') 13 14 15 def resolve_customers(info, query): 16 qs = models.User.objects.filter( 17 Q(is_staff=False) | (Q(is_staff=True) & Q(orders__isnull=False)) 18 ).prefetch_related('addresses') 19 return filter_by_query_param( 20 queryset=qs, query=query, search_fields=USER_SEARCH_FIELDS) 21 22 23 def resolve_staff_users(info, query): 24 qs = models.User.objects.filter(is_staff=True) 25 return filter_by_query_param( 26 queryset=qs, query=query, search_fields=USER_SEARCH_FIELDS) 27 28 29 def resolve_address_validator(info, input): 30 country_code = input['country_code'] 31 if not country_code: 32 client_ip = get_client_ip(info.context) 33 country = get_country_by_ip(client_ip) 34 if country: 35 country_code = country.code 36 else: 37 return None 38 params = { 39 'country_code': country_code, 40 'country_area': input['country_area'], 41 'city_area': input['city_area']} 42 rules = get_validation_rules(params) 43 44 return AddressValidationData( 45 country_code=rules.country_code, 46 country_name=rules.country_name, 47 address_format=rules.address_format, 48 address_latin_format=rules.address_latin_format, 49 allowed_fields=rules.allowed_fields, 50 required_fields=rules.required_fields, 51 upper_fields=rules.upper_fields, 52 country_area_type=rules.country_area_type, 53 country_area_choices=[ 54 ChoiceValue(area[0], area[1]) 55 for area in rules.country_area_choices], 56 city_type=rules.city_type, 57 city_area_choices=[ 58 ChoiceValue(area[0], area[1]) for area in rules.city_area_choices], 59 postal_code_type=rules.postal_code_type, 60 postal_code_matchers=[ 61 compiled.pattern for compiled in rules.postal_code_matchers], 62 postal_code_examples=rules.postal_code_examples, 63 postal_code_prefix=rules.postal_code_prefix 64 ) 65 [end of saleor/graphql/account/resolvers.py] [start of saleor/graphql/account/types.py] 1 import graphene 2 from django.contrib.auth import get_user_model 3 from graphene import relay 4 5 from ...account import models 6 from ...core.permissions import get_permissions 7 from ..core.types.common import ( 8 CountableDjangoObjectType, CountryDisplay, PermissionDisplay) 9 from ..utils import format_permissions_for_display 10 11 12 class AddressInput(graphene.InputObjectType): 13 first_name = graphene.String(description='Given name.') 14 last_name = graphene.String(description='Family name.') 15 company_name = graphene.String(description='Company or organization.') 16 street_address_1 = graphene.String(description='Address.') 17 street_address_2 = graphene.String(description='Address.') 18 city = graphene.String(description='City.') 19 city_area = graphene.String(description='District.') 20 postal_code = graphene.String(description='Postal code.') 21 country = graphene.String(description='Country.') 22 country_area = graphene.String(description='State or province.') 23 phone = graphene.String(description='Phone number.') 24 25 26 class Address(CountableDjangoObjectType): 27 country = graphene.Field( 28 CountryDisplay, required=True, description='Default shop\'s country') 29 30 class Meta: 31 exclude_fields = ['user_set', 'user_addresses'] 32 description = 'Represents user address data.' 33 interfaces = [relay.Node] 34 model = models.Address 35 36 def resolve_country(self, info): 37 return CountryDisplay( 38 code=self.country.code, country=self.country.name) 39 40 41 class User(CountableDjangoObjectType): 42 permissions = graphene.List(PermissionDisplay) 43 44 class Meta: 45 exclude_fields = [ 46 'date_joined', 'password', 'is_superuser', 47 'OrderEvent_set', 'last_login'] 48 description = 'Represents user data.' 49 interfaces = [relay.Node] 50 model = get_user_model() 51 filter_fields = ['is_staff'] 52 53 def resolve_permissions(self, info, **kwargs): 54 if self.is_superuser: 55 permissions = get_permissions() 56 else: 57 permissions = self.user_permissions.prefetch_related( 58 'content_type').order_by('codename') 59 return format_permissions_for_display(permissions) 60 61 62 class AddressValidationInput(graphene.InputObjectType): 63 country_code = graphene.String() 64 country_area = graphene.String() 65 city_area = graphene.String() 66 67 68 class ChoiceValue(graphene.ObjectType): 69 raw = graphene.String() 70 verbose = graphene.String() 71 72 73 class AddressValidationData(graphene.ObjectType): 74 country_code = graphene.String() 75 country_name = graphene.String() 76 address_format = graphene.String() 77 address_latin_format = graphene.String() 78 allowed_fields = graphene.List(graphene.String) 79 required_fields = graphene.List(graphene.String) 80 upper_fields = graphene.List(graphene.String) 81 country_area_type = graphene.String() 82 country_area_choices = graphene.List(ChoiceValue) 83 city_type = graphene.String() 84 city_area_choices = graphene.List(ChoiceValue) 85 postal_code_type = graphene.String() 86 postal_code_matchers = graphene.List(graphene.String) 87 postal_code_examples = graphene.List(graphene.String) 88 postal_code_prefix = graphene.String() 89 [end of saleor/graphql/account/types.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/graphql/account/resolvers.py b/saleor/graphql/account/resolvers.py --- a/saleor/graphql/account/resolvers.py +++ b/saleor/graphql/account/resolvers.py @@ -16,14 +16,16 @@ qs = models.User.objects.filter( Q(is_staff=False) | (Q(is_staff=True) & Q(orders__isnull=False)) ).prefetch_related('addresses') - return filter_by_query_param( + qs = filter_by_query_param( queryset=qs, query=query, search_fields=USER_SEARCH_FIELDS) + return qs.distinct() def resolve_staff_users(info, query): qs = models.User.objects.filter(is_staff=True) - return filter_by_query_param( + qs = filter_by_query_param( queryset=qs, query=query, search_fields=USER_SEARCH_FIELDS) + return qs.distinct() def resolve_address_validator(info, input): diff --git a/saleor/graphql/account/types.py b/saleor/graphql/account/types.py --- a/saleor/graphql/account/types.py +++ b/saleor/graphql/account/types.py @@ -42,9 +42,7 @@ permissions = graphene.List(PermissionDisplay) class Meta: - exclude_fields = [ - 'date_joined', 'password', 'is_superuser', - 'OrderEvent_set', 'last_login'] + exclude_fields = ['password', 'is_superuser', 'OrderEvent_set'] description = 'Represents user data.' interfaces = [relay.Node] model = get_user_model()
{"golden_diff": "diff --git a/saleor/graphql/account/resolvers.py b/saleor/graphql/account/resolvers.py\n--- a/saleor/graphql/account/resolvers.py\n+++ b/saleor/graphql/account/resolvers.py\n@@ -16,14 +16,16 @@\n qs = models.User.objects.filter(\n Q(is_staff=False) | (Q(is_staff=True) & Q(orders__isnull=False))\n ).prefetch_related('addresses')\n- return filter_by_query_param(\n+ qs = filter_by_query_param(\n queryset=qs, query=query, search_fields=USER_SEARCH_FIELDS)\n+ return qs.distinct()\n \n \n def resolve_staff_users(info, query):\n qs = models.User.objects.filter(is_staff=True)\n- return filter_by_query_param(\n+ qs = filter_by_query_param(\n queryset=qs, query=query, search_fields=USER_SEARCH_FIELDS)\n+ return qs.distinct()\n \n \n def resolve_address_validator(info, input):\ndiff --git a/saleor/graphql/account/types.py b/saleor/graphql/account/types.py\n--- a/saleor/graphql/account/types.py\n+++ b/saleor/graphql/account/types.py\n@@ -42,9 +42,7 @@\n permissions = graphene.List(PermissionDisplay)\n \n class Meta:\n- exclude_fields = [\n- 'date_joined', 'password', 'is_superuser',\n- 'OrderEvent_set', 'last_login']\n+ exclude_fields = ['password', 'is_superuser', 'OrderEvent_set']\n description = 'Represents user data.'\n interfaces = [relay.Node]\n model = get_user_model()\n", "issue": "User type should be able to return `created` and `lastLogin` fields\nThere is no way to obtain information when a user was registered and when she/he logged last time.\n", "before_files": [{"content": "from django.db.models import Q\nfrom i18naddress import get_validation_rules\n\nfrom ...account import models\nfrom ...core.utils import get_client_ip, get_country_by_ip\nfrom ..utils import filter_by_query_param\nfrom .types import AddressValidationData, ChoiceValue\n\nUSER_SEARCH_FIELDS = (\n 'email', 'default_shipping_address__first_name',\n 'default_shipping_address__last_name', 'default_shipping_address__city',\n 'default_shipping_address__country')\n\n\ndef resolve_customers(info, query):\n qs = models.User.objects.filter(\n Q(is_staff=False) | (Q(is_staff=True) & Q(orders__isnull=False))\n ).prefetch_related('addresses')\n return filter_by_query_param(\n queryset=qs, query=query, search_fields=USER_SEARCH_FIELDS)\n\n\ndef resolve_staff_users(info, query):\n qs = models.User.objects.filter(is_staff=True)\n return filter_by_query_param(\n queryset=qs, query=query, search_fields=USER_SEARCH_FIELDS)\n\n\ndef resolve_address_validator(info, input):\n country_code = input['country_code']\n if not country_code:\n client_ip = get_client_ip(info.context)\n country = get_country_by_ip(client_ip)\n if country:\n country_code = country.code\n else:\n return None\n params = {\n 'country_code': country_code,\n 'country_area': input['country_area'],\n 'city_area': input['city_area']}\n rules = get_validation_rules(params)\n\n return AddressValidationData(\n country_code=rules.country_code,\n country_name=rules.country_name,\n address_format=rules.address_format,\n address_latin_format=rules.address_latin_format,\n allowed_fields=rules.allowed_fields,\n required_fields=rules.required_fields,\n upper_fields=rules.upper_fields,\n country_area_type=rules.country_area_type,\n country_area_choices=[\n ChoiceValue(area[0], area[1])\n for area in rules.country_area_choices],\n city_type=rules.city_type,\n city_area_choices=[\n ChoiceValue(area[0], area[1]) for area in rules.city_area_choices],\n postal_code_type=rules.postal_code_type,\n postal_code_matchers=[\n compiled.pattern for compiled in rules.postal_code_matchers],\n postal_code_examples=rules.postal_code_examples,\n postal_code_prefix=rules.postal_code_prefix\n )\n", "path": "saleor/graphql/account/resolvers.py"}, {"content": "import graphene\nfrom django.contrib.auth import get_user_model\nfrom graphene import relay\n\nfrom ...account import models\nfrom ...core.permissions import get_permissions\nfrom ..core.types.common import (\n CountableDjangoObjectType, CountryDisplay, PermissionDisplay)\nfrom ..utils import format_permissions_for_display\n\n\nclass AddressInput(graphene.InputObjectType):\n first_name = graphene.String(description='Given name.')\n last_name = graphene.String(description='Family name.')\n company_name = graphene.String(description='Company or organization.')\n street_address_1 = graphene.String(description='Address.')\n street_address_2 = graphene.String(description='Address.')\n city = graphene.String(description='City.')\n city_area = graphene.String(description='District.')\n postal_code = graphene.String(description='Postal code.')\n country = graphene.String(description='Country.')\n country_area = graphene.String(description='State or province.')\n phone = graphene.String(description='Phone number.')\n\n\nclass Address(CountableDjangoObjectType):\n country = graphene.Field(\n CountryDisplay, required=True, description='Default shop\\'s country')\n\n class Meta:\n exclude_fields = ['user_set', 'user_addresses']\n description = 'Represents user address data.'\n interfaces = [relay.Node]\n model = models.Address\n\n def resolve_country(self, info):\n return CountryDisplay(\n code=self.country.code, country=self.country.name)\n\n\nclass User(CountableDjangoObjectType):\n permissions = graphene.List(PermissionDisplay)\n\n class Meta:\n exclude_fields = [\n 'date_joined', 'password', 'is_superuser',\n 'OrderEvent_set', 'last_login']\n description = 'Represents user data.'\n interfaces = [relay.Node]\n model = get_user_model()\n filter_fields = ['is_staff']\n\n def resolve_permissions(self, info, **kwargs):\n if self.is_superuser:\n permissions = get_permissions()\n else:\n permissions = self.user_permissions.prefetch_related(\n 'content_type').order_by('codename')\n return format_permissions_for_display(permissions)\n\n\nclass AddressValidationInput(graphene.InputObjectType):\n country_code = graphene.String()\n country_area = graphene.String()\n city_area = graphene.String()\n\n\nclass ChoiceValue(graphene.ObjectType):\n raw = graphene.String()\n verbose = graphene.String()\n\n\nclass AddressValidationData(graphene.ObjectType):\n country_code = graphene.String()\n country_name = graphene.String()\n address_format = graphene.String()\n address_latin_format = graphene.String()\n allowed_fields = graphene.List(graphene.String)\n required_fields = graphene.List(graphene.String)\n upper_fields = graphene.List(graphene.String)\n country_area_type = graphene.String()\n country_area_choices = graphene.List(ChoiceValue)\n city_type = graphene.String()\n city_area_choices = graphene.List(ChoiceValue)\n postal_code_type = graphene.String()\n postal_code_matchers = graphene.List(graphene.String)\n postal_code_examples = graphene.List(graphene.String)\n postal_code_prefix = graphene.String()\n", "path": "saleor/graphql/account/types.py"}]}
2,020
341
gh_patches_debug_25
rasdani/github-patches
git_diff
Zeroto521__my-data-toolkit-543
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PERF: `to_set` speeds up especial for large data <!-- Thanks for contributing a pull request! Please follow these standard acronyms to start the commit message: - ENH: enhancement - BUG: bug fix - DOC: documentation - TYP: type annotations - TST: addition or modification of tests - MAINT: maintenance commit (refactoring, typos, etc.) - BLD: change related to building - REL: related to releasing - API: an (incompatible) API change - DEP: deprecate something, or remove a deprecated object - DEV: development tool or utility - REV: revert an earlier commit - PERF: performance improvement - BOT: always commit via a bot - CI: related to CI or CD - CLN: Code cleanup --> - [ ] closes #xxxx - [x] whatsnew entry | data | `set(s)` | `set(s.unique())` | | -------------------- | ---------------- | ----------------- | | small, `list(range(10)` | 1.83 µs ± 31.6 ns | 1.17 ms ± 144 µs | | large, `list(range(10)*1000` | 9.67 µs ± 564 ns | 255 µs ± 14.9 µs | </issue> <code> [start of dtoolkit/accessor/index/to_set.py] 1 import pandas as pd 2 3 from dtoolkit.accessor.register import register_index_method 4 5 6 @register_index_method 7 def to_set(index: pd.Index) -> set: 8 """ 9 Return a :keyword:`set` of the values. 10 11 A sugary syntax wraps :keyword:`set`:: 12 13 set(index) 14 15 Different to :meth:`~pandas.Index.unique`, it returns :class:`~pandas.Index`. 16 17 Returns 18 ------- 19 set 20 21 See Also 22 -------- 23 pandas.Index.unique 24 25 Examples 26 -------- 27 >>> import dtoolkit.accessor 28 >>> import pandas as pd 29 >>> i = pd.Index([1, 2, 2]) 30 >>> i 31 Int64Index([1, 2, 2], dtype='int64') 32 >>> i.to_set() 33 {1, 2} 34 """ 35 36 return set(index) 37 [end of dtoolkit/accessor/index/to_set.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dtoolkit/accessor/index/to_set.py b/dtoolkit/accessor/index/to_set.py --- a/dtoolkit/accessor/index/to_set.py +++ b/dtoolkit/accessor/index/to_set.py @@ -33,4 +33,4 @@ {1, 2} """ - return set(index) + return set(index.unique())
{"golden_diff": "diff --git a/dtoolkit/accessor/index/to_set.py b/dtoolkit/accessor/index/to_set.py\n--- a/dtoolkit/accessor/index/to_set.py\n+++ b/dtoolkit/accessor/index/to_set.py\n@@ -33,4 +33,4 @@\n {1, 2}\n \"\"\"\n \n- return set(index)\n+ return set(index.unique())\n", "issue": "PERF: `to_set` speeds up especial for large data\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [ ] closes #xxxx\r\n- [x] whatsnew entry\r\n\r\n\r\n| data | `set(s)` | `set(s.unique())` |\r\n| -------------------- | ---------------- | ----------------- |\r\n| small, `list(range(10)` | 1.83 \u00b5s \u00b1 31.6 ns | 1.17 ms \u00b1 144 \u00b5s |\r\n| large, `list(range(10)*1000` | 9.67 \u00b5s \u00b1 564 ns | 255 \u00b5s \u00b1 14.9 \u00b5s |\r\n\n", "before_files": [{"content": "import pandas as pd\n\nfrom dtoolkit.accessor.register import register_index_method\n\n\n@register_index_method\ndef to_set(index: pd.Index) -> set:\n \"\"\"\n Return a :keyword:`set` of the values.\n\n A sugary syntax wraps :keyword:`set`::\n\n set(index)\n\n Different to :meth:`~pandas.Index.unique`, it returns :class:`~pandas.Index`.\n\n Returns\n -------\n set\n\n See Also\n --------\n pandas.Index.unique\n\n Examples\n --------\n >>> import dtoolkit.accessor\n >>> import pandas as pd\n >>> i = pd.Index([1, 2, 2])\n >>> i\n Int64Index([1, 2, 2], dtype='int64')\n >>> i.to_set()\n {1, 2}\n \"\"\"\n\n return set(index)\n", "path": "dtoolkit/accessor/index/to_set.py"}]}
1,107
84
gh_patches_debug_13395
rasdani/github-patches
git_diff
facebookresearch__xformers-57
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [bug] Failing triton dropout test # 🐛 Bug See https://app.circleci.com/pipelines/github/facebookresearch/xformers/212/workflows/8988c71c-84f5-4bd0-bd59-ac7d293c2370/jobs/398 Not sure why this happens just now, looking into that ## Command can repro locally with ` pytest tests -k dropout -x -v ` </issue> <code> [start of xformers/triton/k_dropout.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. 2 # 3 # This source code is licensed under the BSD license found in the 4 # LICENSE file in the root directory of this source tree. 5 6 7 # CREDITS: This comes almost as-is from the Triton dropout tutorial 8 # https://raw.githubusercontent.com/openai/triton/master/python/tutorials/04-low-memory-dropout.py 9 10 import triton 11 import triton.language as tl 12 13 14 # fmt: off 15 @triton.autotune( 16 configs=[ 17 triton.Config({"BLOCK_SIZE" : 256}, num_warps=1), 18 triton.Config({"BLOCK_SIZE" : 512}, num_warps=2), 19 triton.Config({"BLOCK_SIZE" : 1024}, num_warps=4), 20 triton.Config({"BLOCK_SIZE" : 2048}, num_warps=8), 21 triton.Config({"BLOCK_SIZE" : 4096}, num_warps=8), 22 ], 23 key=["N"], 24 ) 25 @triton.jit 26 def k_dropout( 27 Y, X, S, 28 stride, 29 N, 30 p, 31 **meta, 32 ): 33 """ 34 Apply dropout on an input tensor 35 Y : Output (M, N) 36 X : Input (M, N) 37 S : Seeds (M,) 38 p : dropout probability 39 """ 40 # fmt: on 41 42 # compute memory offsets of elements handled by this instance 43 BLOCK_SIZE = meta["BLOCK_SIZE"] 44 row = tl.program_id(axis=0) 45 col = tl.program_id(axis=1) 46 offsets = row * stride + col * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) 47 mask = col * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) < N 48 49 # load data from x 50 x_ptrs = X + offsets 51 x = tl.load(x_ptrs, mask=mask) 52 53 # randomly prune it 54 seed = S + row 55 random = tl.rand(seed.to(tl.int32), offsets) 56 x_keep = random > p 57 58 # write-back 59 zero = 0. 60 zero = zero.to(x.dtype) 61 output = tl.where(x_keep, (x / (1 - p)).to(x.dtype), zero) 62 y_ptrs = Y + offsets 63 tl.store(y_ptrs, output, mask=mask) 64 [end of xformers/triton/k_dropout.py] [start of xformers/triton/dropout.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. 2 # 3 # This source code is licensed under the BSD license found in the 4 # LICENSE file in the root directory of this source tree. 5 6 7 # CREDITS: This comes almost as-is from the Triton dropout tutorial 8 # https://raw.githubusercontent.com/openai/triton/master/python/tutorials/04-low-memory-dropout.py 9 10 import torch 11 import triton 12 from torch.cuda.amp import custom_bwd, custom_fwd 13 14 from xformers.triton.k_dropout import k_dropout 15 16 17 # Helper to handle the SPMD launch grid and error cases 18 class _dropout(torch.autograd.Function): 19 @staticmethod 20 @custom_fwd(cast_inputs=torch.float16) 21 def forward(ctx, x, p): 22 # Soft-flatten an hypothetical 3rd dimension 23 x_ = x.reshape(-1, x.shape[-1]) 24 y = torch.empty_like(x_) 25 _, N = x_.shape 26 27 assert y.stride(-1) == 1 and x_.stride(-1) == 1 28 29 # Generate one seed per sample 30 # seed max is int32 max for positive numbers: 2**16 31 seeds = torch.randint(65536, (x_.shape[0],), device=x.device).to(torch.int32) 32 33 # SPMD launch grid 34 def grid(meta): 35 return ( 36 x_.shape[0], 37 triton.cdiv(x_.shape[1], meta["BLOCK_SIZE"]), 38 ) 39 40 k_dropout[grid](y, x_, seeds, y.stride(0), N, p) 41 42 ctx.save_for_backward(seeds) 43 ctx.p = p 44 45 return y.reshape_as(x) 46 47 @staticmethod 48 @custom_bwd 49 def backward(ctx, grad_out): 50 (seeds,) = ctx.saved_tensors 51 52 # Soft-flatten an hypothetical 3rd dimension 53 grad_out_ = grad_out.reshape(-1, grad_out.shape[-1]) 54 grad_in = torch.empty_like(grad_out_) 55 _, N = grad_out_.shape 56 57 assert grad_in.stride(-1) == 1 and grad_out_.stride(-1) == 1 58 59 # SPMD launch grid 60 def grid(meta): 61 return ( 62 grad_out_.shape[0], 63 triton.cdiv(grad_out_.shape[1], meta["BLOCK_SIZE"]), 64 ) 65 66 k_dropout[grid](grad_in, grad_out_, seeds, grad_out_.stride(0), N, ctx.p) 67 68 return grad_in.reshape_as(grad_out), None 69 70 71 def dropout(x: torch.Tensor, p: float): 72 return _dropout.apply(x, p) 73 [end of xformers/triton/dropout.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/xformers/triton/dropout.py b/xformers/triton/dropout.py --- a/xformers/triton/dropout.py +++ b/xformers/triton/dropout.py @@ -69,4 +69,7 @@ def dropout(x: torch.Tensor, p: float): - return _dropout.apply(x, p) + if p > 0.0: + return _dropout.apply(x, p) + + return x diff --git a/xformers/triton/k_dropout.py b/xformers/triton/k_dropout.py --- a/xformers/triton/k_dropout.py +++ b/xformers/triton/k_dropout.py @@ -24,7 +24,7 @@ ) @triton.jit def k_dropout( - Y, X, S, + Y, X, SEEDS, stride, N, p, @@ -51,7 +51,7 @@ x = tl.load(x_ptrs, mask=mask) # randomly prune it - seed = S + row + seed = SEEDS + row random = tl.rand(seed.to(tl.int32), offsets) x_keep = random > p
{"golden_diff": "diff --git a/xformers/triton/dropout.py b/xformers/triton/dropout.py\n--- a/xformers/triton/dropout.py\n+++ b/xformers/triton/dropout.py\n@@ -69,4 +69,7 @@\n \n \n def dropout(x: torch.Tensor, p: float):\n- return _dropout.apply(x, p)\n+ if p > 0.0:\n+ return _dropout.apply(x, p)\n+\n+ return x\ndiff --git a/xformers/triton/k_dropout.py b/xformers/triton/k_dropout.py\n--- a/xformers/triton/k_dropout.py\n+++ b/xformers/triton/k_dropout.py\n@@ -24,7 +24,7 @@\n )\n @triton.jit\n def k_dropout(\n- Y, X, S,\n+ Y, X, SEEDS,\n stride,\n N,\n p,\n@@ -51,7 +51,7 @@\n x = tl.load(x_ptrs, mask=mask)\n \n # randomly prune it\n- seed = S + row\n+ seed = SEEDS + row\n random = tl.rand(seed.to(tl.int32), offsets)\n x_keep = random > p\n", "issue": "[bug] Failing triton dropout test \n# \ud83d\udc1b Bug\r\n\r\nSee https://app.circleci.com/pipelines/github/facebookresearch/xformers/212/workflows/8988c71c-84f5-4bd0-bd59-ac7d293c2370/jobs/398\r\n\r\nNot sure why this happens just now, looking into that\r\n\r\n## Command\r\ncan repro locally with ` pytest tests -k dropout -x -v `\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n# CREDITS: This comes almost as-is from the Triton dropout tutorial\n# https://raw.githubusercontent.com/openai/triton/master/python/tutorials/04-low-memory-dropout.py\n\nimport triton\nimport triton.language as tl\n\n\n# fmt: off\[email protected](\n configs=[\n triton.Config({\"BLOCK_SIZE\" : 256}, num_warps=1),\n triton.Config({\"BLOCK_SIZE\" : 512}, num_warps=2),\n triton.Config({\"BLOCK_SIZE\" : 1024}, num_warps=4),\n triton.Config({\"BLOCK_SIZE\" : 2048}, num_warps=8),\n triton.Config({\"BLOCK_SIZE\" : 4096}, num_warps=8),\n ],\n key=[\"N\"],\n)\[email protected]\ndef k_dropout(\n Y, X, S,\n stride,\n N,\n p,\n **meta,\n):\n \"\"\"\n Apply dropout on an input tensor\n Y : Output (M, N)\n X : Input (M, N)\n S : Seeds (M,)\n p : dropout probability\n \"\"\"\n # fmt: on\n\n # compute memory offsets of elements handled by this instance\n BLOCK_SIZE = meta[\"BLOCK_SIZE\"]\n row = tl.program_id(axis=0)\n col = tl.program_id(axis=1)\n offsets = row * stride + col * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)\n mask = col * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) < N\n\n # load data from x\n x_ptrs = X + offsets\n x = tl.load(x_ptrs, mask=mask)\n\n # randomly prune it\n seed = S + row\n random = tl.rand(seed.to(tl.int32), offsets)\n x_keep = random > p\n\n # write-back\n zero = 0.\n zero = zero.to(x.dtype)\n output = tl.where(x_keep, (x / (1 - p)).to(x.dtype), zero)\n y_ptrs = Y + offsets\n tl.store(y_ptrs, output, mask=mask)\n", "path": "xformers/triton/k_dropout.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n# CREDITS: This comes almost as-is from the Triton dropout tutorial\n# https://raw.githubusercontent.com/openai/triton/master/python/tutorials/04-low-memory-dropout.py\n\nimport torch\nimport triton\nfrom torch.cuda.amp import custom_bwd, custom_fwd\n\nfrom xformers.triton.k_dropout import k_dropout\n\n\n# Helper to handle the SPMD launch grid and error cases\nclass _dropout(torch.autograd.Function):\n @staticmethod\n @custom_fwd(cast_inputs=torch.float16)\n def forward(ctx, x, p):\n # Soft-flatten an hypothetical 3rd dimension\n x_ = x.reshape(-1, x.shape[-1])\n y = torch.empty_like(x_)\n _, N = x_.shape\n\n assert y.stride(-1) == 1 and x_.stride(-1) == 1\n\n # Generate one seed per sample\n # seed max is int32 max for positive numbers: 2**16\n seeds = torch.randint(65536, (x_.shape[0],), device=x.device).to(torch.int32)\n\n # SPMD launch grid\n def grid(meta):\n return (\n x_.shape[0],\n triton.cdiv(x_.shape[1], meta[\"BLOCK_SIZE\"]),\n )\n\n k_dropout[grid](y, x_, seeds, y.stride(0), N, p)\n\n ctx.save_for_backward(seeds)\n ctx.p = p\n\n return y.reshape_as(x)\n\n @staticmethod\n @custom_bwd\n def backward(ctx, grad_out):\n (seeds,) = ctx.saved_tensors\n\n # Soft-flatten an hypothetical 3rd dimension\n grad_out_ = grad_out.reshape(-1, grad_out.shape[-1])\n grad_in = torch.empty_like(grad_out_)\n _, N = grad_out_.shape\n\n assert grad_in.stride(-1) == 1 and grad_out_.stride(-1) == 1\n\n # SPMD launch grid\n def grid(meta):\n return (\n grad_out_.shape[0],\n triton.cdiv(grad_out_.shape[1], meta[\"BLOCK_SIZE\"]),\n )\n\n k_dropout[grid](grad_in, grad_out_, seeds, grad_out_.stride(0), N, ctx.p)\n\n return grad_in.reshape_as(grad_out), None\n\n\ndef dropout(x: torch.Tensor, p: float):\n return _dropout.apply(x, p)\n", "path": "xformers/triton/dropout.py"}]}
2,027
275
gh_patches_debug_25915
rasdani/github-patches
git_diff
microsoft__AzureTRE-1653
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Health check endpoint should log all the service status as it queries Currently the `/health` endpoint queries Cosmos / Service Bus / the RP - and returns the statuses. If any are not ok, the response is a 503. There is currently no way to query that endpoint when the gateway has blocked access - so we at least need it to log the results so we can track back and see what service was down, when. </issue> <code> [start of api_app/api/routes/health.py] 1 from fastapi import APIRouter 2 from models.schemas.status import HealthCheck, ServiceStatus, StatusEnum 3 from resources import strings 4 from services.health_checker import create_resource_processor_status, create_state_store_status, create_service_bus_status 5 from fastapi import HTTPException, status 6 7 router = APIRouter() 8 9 10 @router.get("/health", name=strings.API_GET_HEALTH_STATUS) 11 async def health_check() -> HealthCheck: 12 cosmos_status, cosmos_message = create_state_store_status() 13 sb_status, sb_message = await create_service_bus_status() 14 rp_status, rp_message = create_resource_processor_status() 15 services = [ServiceStatus(service=strings.COSMOS_DB, status=cosmos_status, message=cosmos_message), 16 ServiceStatus(service=strings.SERVICE_BUS, status=sb_status, message=sb_message), 17 ServiceStatus(service=strings.RESOURCE_PROCESSOR, status=rp_status, message=rp_message)] 18 health_check_result = HealthCheck(services=services) 19 if cosmos_status == StatusEnum.not_ok or sb_status == StatusEnum.not_ok or rp_status == StatusEnum.not_ok: 20 raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=health_check_result.json()) 21 return health_check_result 22 [end of api_app/api/routes/health.py] [start of api_app/_version.py] 1 __version__ = "0.2.10" 2 [end of api_app/_version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/api_app/_version.py b/api_app/_version.py --- a/api_app/_version.py +++ b/api_app/_version.py @@ -1 +1 @@ -__version__ = "0.2.10" +__version__ = "0.2.11" diff --git a/api_app/api/routes/health.py b/api_app/api/routes/health.py --- a/api_app/api/routes/health.py +++ b/api_app/api/routes/health.py @@ -3,6 +3,7 @@ from resources import strings from services.health_checker import create_resource_processor_status, create_state_store_status, create_service_bus_status from fastapi import HTTPException, status +import logging router = APIRouter() @@ -17,5 +18,8 @@ ServiceStatus(service=strings.RESOURCE_PROCESSOR, status=rp_status, message=rp_message)] health_check_result = HealthCheck(services=services) if cosmos_status == StatusEnum.not_ok or sb_status == StatusEnum.not_ok or rp_status == StatusEnum.not_ok: + logging.error(f'Cosmos Status: {cosmos_status}, message: {cosmos_message}') + logging.error(f'Service Bus Status: {sb_status}, message: {sb_message}') + logging.error(f'Resource Processor Status: {rp_status}, message: {rp_message}') raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=health_check_result.json()) return health_check_result
{"golden_diff": "diff --git a/api_app/_version.py b/api_app/_version.py\n--- a/api_app/_version.py\n+++ b/api_app/_version.py\n@@ -1 +1 @@\n-__version__ = \"0.2.10\"\n+__version__ = \"0.2.11\"\ndiff --git a/api_app/api/routes/health.py b/api_app/api/routes/health.py\n--- a/api_app/api/routes/health.py\n+++ b/api_app/api/routes/health.py\n@@ -3,6 +3,7 @@\n from resources import strings\n from services.health_checker import create_resource_processor_status, create_state_store_status, create_service_bus_status\n from fastapi import HTTPException, status\n+import logging\n \n router = APIRouter()\n \n@@ -17,5 +18,8 @@\n ServiceStatus(service=strings.RESOURCE_PROCESSOR, status=rp_status, message=rp_message)]\n health_check_result = HealthCheck(services=services)\n if cosmos_status == StatusEnum.not_ok or sb_status == StatusEnum.not_ok or rp_status == StatusEnum.not_ok:\n+ logging.error(f'Cosmos Status: {cosmos_status}, message: {cosmos_message}')\n+ logging.error(f'Service Bus Status: {sb_status}, message: {sb_message}')\n+ logging.error(f'Resource Processor Status: {rp_status}, message: {rp_message}')\n raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=health_check_result.json())\n return health_check_result\n", "issue": "Health check endpoint should log all the service status as it queries\nCurrently the `/health` endpoint queries Cosmos / Service Bus / the RP - and returns the statuses. If any are not ok, the response is a 503.\r\n\r\nThere is currently no way to query that endpoint when the gateway has blocked access - so we at least need it to log the results so we can track back and see what service was down, when.\n", "before_files": [{"content": "from fastapi import APIRouter\nfrom models.schemas.status import HealthCheck, ServiceStatus, StatusEnum\nfrom resources import strings\nfrom services.health_checker import create_resource_processor_status, create_state_store_status, create_service_bus_status\nfrom fastapi import HTTPException, status\n\nrouter = APIRouter()\n\n\[email protected](\"/health\", name=strings.API_GET_HEALTH_STATUS)\nasync def health_check() -> HealthCheck:\n cosmos_status, cosmos_message = create_state_store_status()\n sb_status, sb_message = await create_service_bus_status()\n rp_status, rp_message = create_resource_processor_status()\n services = [ServiceStatus(service=strings.COSMOS_DB, status=cosmos_status, message=cosmos_message),\n ServiceStatus(service=strings.SERVICE_BUS, status=sb_status, message=sb_message),\n ServiceStatus(service=strings.RESOURCE_PROCESSOR, status=rp_status, message=rp_message)]\n health_check_result = HealthCheck(services=services)\n if cosmos_status == StatusEnum.not_ok or sb_status == StatusEnum.not_ok or rp_status == StatusEnum.not_ok:\n raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=health_check_result.json())\n return health_check_result\n", "path": "api_app/api/routes/health.py"}, {"content": "__version__ = \"0.2.10\"\n", "path": "api_app/_version.py"}]}
953
321
gh_patches_debug_26067
rasdani/github-patches
git_diff
beeware__toga-543
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> A single column table leads to only showing first letter of value ## Expected Behavior Table like this: **Filename** xxx yyy zzz ## Current Behavior **Filename** x y z ## Steps to reproduce Use toga.Table(headings=['Filename'], data=['xxx', 'yyy', 'zzz'], on_select=self.handle_name_select) ## Analysis The problem seems to be in the ListSource class. `def _create_row(self, data): if isinstance(data, dict): row = Row(**data) else: row = Row(**dict(zip(self._accessors, data))) row._source = self return row' In list_source.py line 56 it says: `row = Row(**dict(zip(self._accessors, data)))` but the data parameter is a string when using a list of strings as data, leading to the zipping of the individual characters. When passing in the data as [('xxx',), ('yyy',), ('zzz',)] the error does not occur. So either the API should make it explicit that it expects a list of lists, or handle the data-is-a-list-of-strings case correctly </issue> <code> [start of src/core/toga/sources/list_source.py] 1 from .base import Source 2 3 4 class Row: 5 def __init__(self, **data): 6 self._attrs = list(data.keys()) 7 self._source = None 8 for name, value in data.items(): 9 setattr(self, name, value) 10 11 ###################################################################### 12 # Utility wrappers 13 ###################################################################### 14 15 def __setattr__(self, attr, value): 16 super().__setattr__(attr, value) 17 if attr in self._attrs: 18 if self._source is not None: 19 self._source._notify('change', item=self) 20 21 22 class ListSource(Source): 23 """A data source to store a list of multiple data values, in a row-like fashion. 24 25 Args: 26 data (`list`): The data in the list. Each entry in the list should have the 27 same number of entries as there are accessors. 28 accessors (`list`): A list of attribute names for accessing the value 29 in each column of the row. 30 """ 31 def __init__(self, data, accessors): 32 super().__init__() 33 self._accessors = accessors 34 self._data = [] 35 for value in data: 36 self._data.append(self._create_row(value)) 37 38 ###################################################################### 39 # Methods required by the ListSource interface 40 ###################################################################### 41 42 def __len__(self): 43 return len(self._data) 44 45 def __getitem__(self, index): 46 return self._data[index] 47 48 ###################################################################### 49 # Factory methods for new rows 50 ###################################################################### 51 52 def _create_row(self, data): 53 if isinstance(data, dict): 54 row = Row(**data) 55 else: 56 row = Row(**dict(zip(self._accessors, data))) 57 row._source = self 58 return row 59 60 ###################################################################### 61 # Utility methods to make ListSources more list-like 62 ###################################################################### 63 64 def __setitem__(self, index, value): 65 row = self._create_row(value) 66 self._data[index] = row 67 self._notify('insert', index=index, item=row) 68 69 def __iter__(self): 70 return iter(self._data) 71 72 def clear(self): 73 self._data = [] 74 self._notify('clear') 75 76 def insert(self, index, *values, **named): 77 # Coalesce values and data into a single data dictionary, 78 # and use that to create the data row. Explicitly named data override. 79 row = self._create_row(dict(zip(self._accessors, values), **named)) 80 self._data.insert(index, row) 81 self._notify('insert', index=index, item=row) 82 return row 83 84 def prepend(self, *values, **named): 85 return self.insert(0, *values, **named) 86 87 def append(self, *values, **named): 88 return self.insert(len(self), *values, **named) 89 90 def remove(self, row): 91 self._data.remove(row) 92 self._notify('remove', item=row) 93 return row 94 95 def index(self, row): 96 return self._data.index(row) 97 [end of src/core/toga/sources/list_source.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/core/toga/sources/list_source.py b/src/core/toga/sources/list_source.py --- a/src/core/toga/sources/list_source.py +++ b/src/core/toga/sources/list_source.py @@ -24,7 +24,7 @@ Args: data (`list`): The data in the list. Each entry in the list should have the - same number of entries as there are accessors. + same number of entries as there are accessors. accessors (`list`): A list of attribute names for accessing the value in each column of the row. """ @@ -50,10 +50,20 @@ ###################################################################### def _create_row(self, data): + """Create a Row object from the given data. + Args: + data (any): The type of `data` determines how it is handled + ``dict``: each key corresponds to a column accessor + iterables, except ``str`` and ``dict``: each item corresponds to a column + all else: `data` will fill the first column + """ + if isinstance(data, dict): row = Row(**data) - else: + elif hasattr(data, '__iter__') and not isinstance(data, str): row = Row(**dict(zip(self._accessors, data))) + else: + row = Row(**{self._accessors[0]: data}) row._source = self return row
{"golden_diff": "diff --git a/src/core/toga/sources/list_source.py b/src/core/toga/sources/list_source.py\n--- a/src/core/toga/sources/list_source.py\n+++ b/src/core/toga/sources/list_source.py\n@@ -24,7 +24,7 @@\n \n Args:\n data (`list`): The data in the list. Each entry in the list should have the\n- same number of entries as there are accessors.\n+ same number of entries as there are accessors. \n accessors (`list`): A list of attribute names for accessing the value\n in each column of the row.\n \"\"\"\n@@ -50,10 +50,20 @@\n ######################################################################\n \n def _create_row(self, data):\n+ \"\"\"Create a Row object from the given data.\n+ Args:\n+ data (any): The type of `data` determines how it is handled\n+ ``dict``: each key corresponds to a column accessor\n+ iterables, except ``str`` and ``dict``: each item corresponds to a column\n+ all else: `data` will fill the first column\n+ \"\"\"\n+\n if isinstance(data, dict):\n row = Row(**data)\n- else:\n+ elif hasattr(data, '__iter__') and not isinstance(data, str):\n row = Row(**dict(zip(self._accessors, data)))\n+ else:\n+ row = Row(**{self._accessors[0]: data})\n row._source = self\n return row\n", "issue": "A single column table leads to only showing first letter of value\n## Expected Behavior\r\n\r\nTable like this:\r\n**Filename**\r\nxxx\r\nyyy\r\nzzz\r\n\r\n## Current Behavior\r\n**Filename**\r\nx\r\ny\r\nz\r\n\r\n## Steps to reproduce\r\n\r\nUse toga.Table(headings=['Filename'], data=['xxx', 'yyy', 'zzz'], on_select=self.handle_name_select)\r\n\r\n## Analysis\r\nThe problem seems to be in the ListSource class.\r\n`def _create_row(self, data):\r\n if isinstance(data, dict):\r\n row = Row(**data)\r\n else:\r\n row = Row(**dict(zip(self._accessors, data)))\r\n row._source = self\r\n return row'\r\n\r\nIn list_source.py line 56 it says:\r\n\r\n`row = Row(**dict(zip(self._accessors, data)))`\r\n\r\nbut the data parameter is a string when using a list of strings as data, leading to the zipping of the individual characters. When passing in the data as [('xxx',), ('yyy',), ('zzz',)] the error does not occur.\r\n\r\nSo either the API should make it explicit that it expects a list of lists, or handle the data-is-a-list-of-strings case correctly\n", "before_files": [{"content": "from .base import Source\n\n\nclass Row:\n def __init__(self, **data):\n self._attrs = list(data.keys())\n self._source = None\n for name, value in data.items():\n setattr(self, name, value)\n\n ######################################################################\n # Utility wrappers\n ######################################################################\n\n def __setattr__(self, attr, value):\n super().__setattr__(attr, value)\n if attr in self._attrs:\n if self._source is not None:\n self._source._notify('change', item=self)\n\n\nclass ListSource(Source):\n \"\"\"A data source to store a list of multiple data values, in a row-like fashion.\n\n Args:\n data (`list`): The data in the list. Each entry in the list should have the\n same number of entries as there are accessors.\n accessors (`list`): A list of attribute names for accessing the value\n in each column of the row.\n \"\"\"\n def __init__(self, data, accessors):\n super().__init__()\n self._accessors = accessors\n self._data = []\n for value in data:\n self._data.append(self._create_row(value))\n\n ######################################################################\n # Methods required by the ListSource interface\n ######################################################################\n\n def __len__(self):\n return len(self._data)\n\n def __getitem__(self, index):\n return self._data[index]\n\n ######################################################################\n # Factory methods for new rows\n ######################################################################\n\n def _create_row(self, data):\n if isinstance(data, dict):\n row = Row(**data)\n else:\n row = Row(**dict(zip(self._accessors, data)))\n row._source = self\n return row\n\n ######################################################################\n # Utility methods to make ListSources more list-like\n ######################################################################\n\n def __setitem__(self, index, value):\n row = self._create_row(value)\n self._data[index] = row\n self._notify('insert', index=index, item=row)\n\n def __iter__(self):\n return iter(self._data)\n\n def clear(self):\n self._data = []\n self._notify('clear')\n\n def insert(self, index, *values, **named):\n # Coalesce values and data into a single data dictionary,\n # and use that to create the data row. Explicitly named data override.\n row = self._create_row(dict(zip(self._accessors, values), **named))\n self._data.insert(index, row)\n self._notify('insert', index=index, item=row)\n return row\n\n def prepend(self, *values, **named):\n return self.insert(0, *values, **named)\n\n def append(self, *values, **named):\n return self.insert(len(self), *values, **named)\n\n def remove(self, row):\n self._data.remove(row)\n self._notify('remove', item=row)\n return row\n\n def index(self, row):\n return self._data.index(row)\n", "path": "src/core/toga/sources/list_source.py"}]}
1,627
328
gh_patches_debug_37784
rasdani/github-patches
git_diff
bokeh__bokeh-8738
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Windows phantomjs not killed on selenium termination I reinstalled a fresh python environment on windows with python 3.7 and pythonenv I use only pip for package dependencies When I tried to run tests some failed because temp files were locked. <img width="726" alt="win32error" src="https://user-images.githubusercontent.com/18531147/54091987-214f4580-4387-11e9-9584-6a117a356ad2.png"> <img width="257" alt="test_failures" src="https://user-images.githubusercontent.com/18531147/54091989-24e2cc80-4387-11e9-9c42-3573dabd1813.PNG"> When driver terminate phantomjs is not correctly killed: <img width="294" alt="proc_pantomjs" src="https://user-images.githubusercontent.com/18531147/54092002-45128b80-4387-11e9-9967-bf74b1e41bd7.PNG"> </issue> <code> [start of bokeh/io/webdriver.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors. 3 # All rights reserved. 4 # 5 # The full license is in the file LICENSE.txt, distributed with this software. 6 #----------------------------------------------------------------------------- 7 ''' 8 9 ''' 10 11 #----------------------------------------------------------------------------- 12 # Boilerplate 13 #----------------------------------------------------------------------------- 14 from __future__ import absolute_import, division, print_function, unicode_literals 15 16 import logging 17 log = logging.getLogger(__name__) 18 19 #----------------------------------------------------------------------------- 20 # Imports 21 #----------------------------------------------------------------------------- 22 23 # Standard library imports 24 import atexit 25 import signal 26 import warnings 27 from os.path import devnull 28 29 # External imports 30 31 # Bokeh imports 32 from ..util.dependencies import import_required, detect_phantomjs 33 34 #----------------------------------------------------------------------------- 35 # Globals and constants 36 #----------------------------------------------------------------------------- 37 38 __all__ = ( 39 'create_phantomjs_webdriver', 40 'terminate_webdriver', 41 'webdriver_control', 42 ) 43 44 #----------------------------------------------------------------------------- 45 # General API 46 #----------------------------------------------------------------------------- 47 48 #----------------------------------------------------------------------------- 49 # Dev API 50 #----------------------------------------------------------------------------- 51 52 def create_phantomjs_webdriver(): 53 with warnings.catch_warnings(): 54 warnings.filterwarnings("ignore", ".*", UserWarning, "selenium.webdriver.phantomjs.webdriver") 55 56 webdriver = import_required('selenium.webdriver', 57 'To use bokeh.io image export functions you need selenium ' + 58 '("conda install -c bokeh selenium" or "pip install selenium")') 59 60 phantomjs_path = detect_phantomjs() 61 return webdriver.PhantomJS(executable_path=phantomjs_path, service_log_path=devnull) 62 63 def terminate_webdriver(driver): 64 if driver.name == "phantomjs": 65 # https://github.com/seleniumhq/selenium/issues/767 66 if driver.service.process: 67 driver.service.process.send_signal(signal.SIGTERM) 68 69 try: 70 driver.quit() 71 except (IOError, OSError): # IOError for Python 2.7 72 pass 73 74 #----------------------------------------------------------------------------- 75 # Private API 76 #----------------------------------------------------------------------------- 77 78 class _WebdriverState(object): 79 ''' 80 81 ''' 82 83 def __init__(self, reuse=True, kind="phantomjs"): 84 self.reuse = reuse 85 self.kind = kind 86 self.current = None 87 88 def reset(self): 89 if self.current is not None: 90 terminate_webdriver(self.current) 91 self.current = None 92 93 def get(self): 94 if not self.reuse or self.current is None: 95 if self.current is not None: 96 terminate_webdriver(self.current) 97 self.current = self.create() 98 return self.current 99 100 def create(self): 101 if self.kind == "phantomjs": 102 return create_phantomjs_webdriver() 103 raise ValueError("Unknown webdriver kind %r" % self.kind) 104 105 @property 106 def reuse(self): 107 return self._reuse 108 109 @reuse.setter 110 def reuse(self, value): 111 self._reuse = value 112 113 @property 114 def kind(self): 115 return self._kind 116 117 @kind.setter 118 def kind(self, value): 119 # TODO (bev) enum/value check when more are added 120 self._kind = value 121 122 #----------------------------------------------------------------------------- 123 # Code 124 #----------------------------------------------------------------------------- 125 126 127 webdriver_control = _WebdriverState() 128 129 atexit.register(lambda: webdriver_control.reset()) 130 [end of bokeh/io/webdriver.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bokeh/io/webdriver.py b/bokeh/io/webdriver.py --- a/bokeh/io/webdriver.py +++ b/bokeh/io/webdriver.py @@ -21,6 +21,7 @@ #----------------------------------------------------------------------------- # Standard library imports +import sys import atexit import signal import warnings @@ -29,7 +30,7 @@ # External imports # Bokeh imports -from ..util.dependencies import import_required, detect_phantomjs +from ..util.dependencies import import_required, detect_phantomjs, import_optional #----------------------------------------------------------------------------- # Globals and constants @@ -49,6 +50,20 @@ # Dev API #----------------------------------------------------------------------------- + +def kill_proc_tree(pid, including_parent=True): + psutil = import_optional('psutil') + if psutil is not None: + parent = psutil.Process(pid) + children = parent.children(recursive=True) + for child in children: + child.kill() + psutil.wait_procs(children) + if including_parent: + parent.kill() + parent.wait(5) + + def create_phantomjs_webdriver(): with warnings.catch_warnings(): warnings.filterwarnings("ignore", ".*", UserWarning, "selenium.webdriver.phantomjs.webdriver") @@ -60,21 +75,25 @@ phantomjs_path = detect_phantomjs() return webdriver.PhantomJS(executable_path=phantomjs_path, service_log_path=devnull) + def terminate_webdriver(driver): if driver.name == "phantomjs": # https://github.com/seleniumhq/selenium/issues/767 if driver.service.process: + if sys.platform == 'win32': + kill_proc_tree(driver.service.process.pid, including_parent=False) driver.service.process.send_signal(signal.SIGTERM) try: driver.quit() - except (IOError, OSError): # IOError for Python 2.7 + except (IOError, OSError): # IOError for Python 2.7 pass #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- + class _WebdriverState(object): '''
{"golden_diff": "diff --git a/bokeh/io/webdriver.py b/bokeh/io/webdriver.py\n--- a/bokeh/io/webdriver.py\n+++ b/bokeh/io/webdriver.py\n@@ -21,6 +21,7 @@\n #-----------------------------------------------------------------------------\n \n # Standard library imports\n+import sys\n import atexit\n import signal\n import warnings\n@@ -29,7 +30,7 @@\n # External imports\n \n # Bokeh imports\n-from ..util.dependencies import import_required, detect_phantomjs\n+from ..util.dependencies import import_required, detect_phantomjs, import_optional\n \n #-----------------------------------------------------------------------------\n # Globals and constants\n@@ -49,6 +50,20 @@\n # Dev API\n #-----------------------------------------------------------------------------\n \n+\n+def kill_proc_tree(pid, including_parent=True):\n+ psutil = import_optional('psutil')\n+ if psutil is not None:\n+ parent = psutil.Process(pid)\n+ children = parent.children(recursive=True)\n+ for child in children:\n+ child.kill()\n+ psutil.wait_procs(children)\n+ if including_parent:\n+ parent.kill()\n+ parent.wait(5)\n+\n+\n def create_phantomjs_webdriver():\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \".*\", UserWarning, \"selenium.webdriver.phantomjs.webdriver\")\n@@ -60,21 +75,25 @@\n phantomjs_path = detect_phantomjs()\n return webdriver.PhantomJS(executable_path=phantomjs_path, service_log_path=devnull)\n \n+\n def terminate_webdriver(driver):\n if driver.name == \"phantomjs\":\n # https://github.com/seleniumhq/selenium/issues/767\n if driver.service.process:\n+ if sys.platform == 'win32':\n+ kill_proc_tree(driver.service.process.pid, including_parent=False)\n driver.service.process.send_signal(signal.SIGTERM)\n \n try:\n driver.quit()\n- except (IOError, OSError): # IOError for Python 2.7\n+ except (IOError, OSError): # IOError for Python 2.7\n pass\n \n #-----------------------------------------------------------------------------\n # Private API\n #-----------------------------------------------------------------------------\n \n+\n class _WebdriverState(object):\n '''\n", "issue": "Windows phantomjs not killed on selenium termination\nI reinstalled a fresh python environment on windows with python 3.7 and pythonenv\r\nI use only pip for package dependencies\r\nWhen I tried to run tests some failed because temp files were locked.\r\n<img width=\"726\" alt=\"win32error\" src=\"https://user-images.githubusercontent.com/18531147/54091987-214f4580-4387-11e9-9584-6a117a356ad2.png\">\r\n<img width=\"257\" alt=\"test_failures\" src=\"https://user-images.githubusercontent.com/18531147/54091989-24e2cc80-4387-11e9-9c42-3573dabd1813.PNG\">\r\n\r\n\r\nWhen driver terminate phantomjs is not correctly killed:\r\n<img width=\"294\" alt=\"proc_pantomjs\" src=\"https://user-images.githubusercontent.com/18531147/54092002-45128b80-4387-11e9-9967-bf74b1e41bd7.PNG\">\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n'''\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport atexit\nimport signal\nimport warnings\nfrom os.path import devnull\n\n# External imports\n\n# Bokeh imports\nfrom ..util.dependencies import import_required, detect_phantomjs\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'create_phantomjs_webdriver',\n 'terminate_webdriver',\n 'webdriver_control',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\ndef create_phantomjs_webdriver():\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \".*\", UserWarning, \"selenium.webdriver.phantomjs.webdriver\")\n\n webdriver = import_required('selenium.webdriver',\n 'To use bokeh.io image export functions you need selenium ' +\n '(\"conda install -c bokeh selenium\" or \"pip install selenium\")')\n\n phantomjs_path = detect_phantomjs()\n return webdriver.PhantomJS(executable_path=phantomjs_path, service_log_path=devnull)\n\ndef terminate_webdriver(driver):\n if driver.name == \"phantomjs\":\n # https://github.com/seleniumhq/selenium/issues/767\n if driver.service.process:\n driver.service.process.send_signal(signal.SIGTERM)\n\n try:\n driver.quit()\n except (IOError, OSError): # IOError for Python 2.7\n pass\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\nclass _WebdriverState(object):\n '''\n\n '''\n\n def __init__(self, reuse=True, kind=\"phantomjs\"):\n self.reuse = reuse\n self.kind = kind\n self.current = None\n\n def reset(self):\n if self.current is not None:\n terminate_webdriver(self.current)\n self.current = None\n\n def get(self):\n if not self.reuse or self.current is None:\n if self.current is not None:\n terminate_webdriver(self.current)\n self.current = self.create()\n return self.current\n\n def create(self):\n if self.kind == \"phantomjs\":\n return create_phantomjs_webdriver()\n raise ValueError(\"Unknown webdriver kind %r\" % self.kind)\n\n @property\n def reuse(self):\n return self._reuse\n\n @reuse.setter\n def reuse(self, value):\n self._reuse = value\n\n @property\n def kind(self):\n return self._kind\n\n @kind.setter\n def kind(self, value):\n # TODO (bev) enum/value check when more are added\n self._kind = value\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\n\nwebdriver_control = _WebdriverState()\n\natexit.register(lambda: webdriver_control.reset())\n", "path": "bokeh/io/webdriver.py"}]}
1,807
474
gh_patches_debug_24271
rasdani/github-patches
git_diff
ivy-llc__ivy-15738
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ihfft </issue> <code> [start of ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py] 1 import ivy 2 from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back 3 from ivy.func_wrapper import with_unsupported_dtypes 4 5 6 @to_ivy_arrays_and_back 7 def ifft(a, n=None, axis=-1, norm=None): 8 a = ivy.array(a, dtype=ivy.complex128) 9 if norm is None: 10 norm = "backward" 11 return ivy.ifft(a, axis, norm=norm, n=n) 12 13 14 @to_ivy_arrays_and_back 15 @with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy") 16 def ifftshift(x, axes=None): 17 x = ivy.asarray(x) 18 19 if axes is None: 20 axes = tuple(range(x.ndim)) 21 shift = [-(dim // 2) for dim in x.shape] 22 elif isinstance( 23 axes, 24 (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)), 25 ): 26 shift = -(x.shape[axes] // 2) 27 else: 28 shift = [-(x.shape[ax] // 2) for ax in axes] 29 30 roll = ivy.roll(x, shift, axis=axes) 31 32 return roll 33 34 35 @to_ivy_arrays_and_back 36 def fft(a, n=None, axis=-1, norm=None): 37 return ivy.fft(ivy.astype(a, ivy.complex128), axis, norm=norm, n=n) 38 39 40 @to_ivy_arrays_and_back 41 @with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy") 42 def fftshift(x, axes=None): 43 x = ivy.asarray(x) 44 45 if axes is None: 46 axes = tuple(range(x.ndim)) 47 shift = [(dim // 2) for dim in x.shape] 48 elif isinstance( 49 axes, 50 (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)), 51 ): 52 shift = x.shape[axes] // 2 53 else: 54 shift = [(x.shape[ax] // 2) for ax in axes] 55 56 roll = ivy.roll(x, shift, axis=axes) 57 58 return roll 59 60 61 @with_unsupported_dtypes({"1.9.0 and below": ("float16",)}, "torch") 62 @to_ivy_arrays_and_back 63 def rfft(a, n=None, axis=-1, norm=None): 64 if norm is None: 65 norm = "backward" 66 a = ivy.array(a, dtype=ivy.float64) 67 return ivy.dft(a, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm) 68 69 70 @with_unsupported_dtypes({"2.4.2 and below": ("int",)}, "paddle") 71 @to_ivy_arrays_and_back 72 def fftfreq(n, d=1.0): 73 if not isinstance( 74 n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64)) 75 ): 76 raise ValueError("n should be an integer") 77 78 N = (n - 1) // 2 + 1 79 val = 1.0 / (n * d) 80 results = ivy.empty(tuple([n]), dtype=int) 81 82 p1 = ivy.arange(0, N, dtype=int) 83 results[:N] = p1 84 p2 = ivy.arange(-(n // 2), 0, dtype=int) 85 results[N:] = p2 86 87 return results * val 88 [end of ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py --- a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py +++ b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py @@ -3,6 +3,23 @@ from ivy.func_wrapper import with_unsupported_dtypes +_SWAP_DIRECTION_MAP = { + None: "forward", + "backward": "forward", + "ortho": "ortho", + "forward": "backward", +} + + +def _swap_direction(norm): + try: + return _SWAP_DIRECTION_MAP[norm] + except KeyError: + raise ValueError( + f'Invalid norm value {norm}; should be "backward", "ortho" or "forward".' + ) from None + + @to_ivy_arrays_and_back def ifft(a, n=None, axis=-1, norm=None): a = ivy.array(a, dtype=ivy.complex128) @@ -67,6 +84,17 @@ return ivy.dft(a, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm) +@to_ivy_arrays_and_back +@with_unsupported_dtypes({"1.12.0 and below": ("float16",)}, "numpy") +def ihfft(a, n=None, axis=-1, norm=None): + a = ivy.array(a, dtype=ivy.float64) + if n is None: + n = a.shape[axis] + norm = _swap_direction(norm) + output = ivy.conj(rfft(a, n, axis, norm=norm).ivy_array) + return output + + @with_unsupported_dtypes({"2.4.2 and below": ("int",)}, "paddle") @to_ivy_arrays_and_back def fftfreq(n, d=1.0):
{"golden_diff": "diff --git a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py\n--- a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py\n+++ b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py\n@@ -3,6 +3,23 @@\n from ivy.func_wrapper import with_unsupported_dtypes\n \n \n+_SWAP_DIRECTION_MAP = {\n+ None: \"forward\",\n+ \"backward\": \"forward\",\n+ \"ortho\": \"ortho\",\n+ \"forward\": \"backward\",\n+}\n+\n+\n+def _swap_direction(norm):\n+ try:\n+ return _SWAP_DIRECTION_MAP[norm]\n+ except KeyError:\n+ raise ValueError(\n+ f'Invalid norm value {norm}; should be \"backward\", \"ortho\" or \"forward\".'\n+ ) from None\n+\n+\n @to_ivy_arrays_and_back\n def ifft(a, n=None, axis=-1, norm=None):\n a = ivy.array(a, dtype=ivy.complex128)\n@@ -67,6 +84,17 @@\n return ivy.dft(a, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)\n \n \n+@to_ivy_arrays_and_back\n+@with_unsupported_dtypes({\"1.12.0 and below\": (\"float16\",)}, \"numpy\")\n+def ihfft(a, n=None, axis=-1, norm=None):\n+ a = ivy.array(a, dtype=ivy.float64)\n+ if n is None:\n+ n = a.shape[axis]\n+ norm = _swap_direction(norm)\n+ output = ivy.conj(rfft(a, n, axis, norm=norm).ivy_array)\n+ return output\n+\n+\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"int\",)}, \"paddle\")\n @to_ivy_arrays_and_back\n def fftfreq(n, d=1.0):\n", "issue": "ihfft\n\n", "before_files": [{"content": "import ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n@to_ivy_arrays_and_back\ndef ifft(a, n=None, axis=-1, norm=None):\n a = ivy.array(a, dtype=ivy.complex128)\n if norm is None:\n norm = \"backward\"\n return ivy.ifft(a, axis, norm=norm, n=n)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\ndef ifftshift(x, axes=None):\n x = ivy.asarray(x)\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shift = [-(dim // 2) for dim in x.shape]\n elif isinstance(\n axes,\n (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),\n ):\n shift = -(x.shape[axes] // 2)\n else:\n shift = [-(x.shape[ax] // 2) for ax in axes]\n\n roll = ivy.roll(x, shift, axis=axes)\n\n return roll\n\n\n@to_ivy_arrays_and_back\ndef fft(a, n=None, axis=-1, norm=None):\n return ivy.fft(ivy.astype(a, ivy.complex128), axis, norm=norm, n=n)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\ndef fftshift(x, axes=None):\n x = ivy.asarray(x)\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shift = [(dim // 2) for dim in x.shape]\n elif isinstance(\n axes,\n (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),\n ):\n shift = x.shape[axes] // 2\n else:\n shift = [(x.shape[ax] // 2) for ax in axes]\n\n roll = ivy.roll(x, shift, axis=axes)\n\n return roll\n\n\n@with_unsupported_dtypes({\"1.9.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef rfft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n a = ivy.array(a, dtype=ivy.float64)\n return ivy.dft(a, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"int\",)}, \"paddle\")\n@to_ivy_arrays_and_back\ndef fftfreq(n, d=1.0):\n if not isinstance(\n n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))\n ):\n raise ValueError(\"n should be an integer\")\n\n N = (n - 1) // 2 + 1\n val = 1.0 / (n * d)\n results = ivy.empty(tuple([n]), dtype=int)\n\n p1 = ivy.arange(0, N, dtype=int)\n results[:N] = p1\n p2 = ivy.arange(-(n // 2), 0, dtype=int)\n results[N:] = p2\n\n return results * val\n", "path": "ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py"}]}
1,535
451
gh_patches_debug_25212
rasdani/github-patches
git_diff
vyperlang__vyper-3030
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> can't declare variable `public` but can define custom getter ### Version Information * vyper Version: 0.3.4+commit.f31f0ec * OS: osx * Python Version: 3.8.9 ### What's your issue about? This code fails to compile: ``` slates : public(HashMap[bytes32, DynArray[address, 15]]) ``` but I can define my own getter for that type just fine: ``` slates : HashMap[bytes32, DynArray[address, 15]] @external def slate(sid :bytes32) -> DynArray[address, 15]: return self.slates[sid] ``` </issue> <code> [start of vyper/ast/expansion.py] 1 import copy 2 3 from vyper import ast as vy_ast 4 from vyper.exceptions import CompilerPanic 5 6 7 def expand_annotated_ast(vyper_module: vy_ast.Module) -> None: 8 """ 9 Perform expansion / simplification operations on an annotated Vyper AST. 10 11 This pass uses annotated type information to modify the AST, simplifying 12 logic and expanding subtrees to reduce the compexity during codegen. 13 14 Arguments 15 --------- 16 vyper_module : Module 17 Top-level Vyper AST node that has been type-checked and annotated. 18 """ 19 generate_public_variable_getters(vyper_module) 20 remove_unused_statements(vyper_module) 21 22 23 def generate_public_variable_getters(vyper_module: vy_ast.Module) -> None: 24 """ 25 Create getter functions for public variables. 26 27 Arguments 28 --------- 29 vyper_module : Module 30 Top-level Vyper AST node. 31 """ 32 33 for node in vyper_module.get_children(vy_ast.VariableDecl, {"annotation.func.id": "public"}): 34 func_type = node._metadata["func_type"] 35 input_types, return_type = func_type.get_signature() 36 input_nodes = [] 37 38 # use the annotation node as a base to build the input args and return type 39 # starting with `args[0]` to remove the surrounding `public()` call` 40 annotation = copy.copy(node.annotation.args[0]) 41 42 # the base return statement is an `Attribute` node, e.g. `self.<var_name>` 43 # for each input type we wrap it in a `Subscript` to access a specific member 44 return_stmt: vy_ast.VyperNode = vy_ast.Attribute( 45 value=vy_ast.Name(id="self"), attr=func_type.name 46 ) 47 return_stmt._metadata["type"] = node._metadata["type"] 48 49 for i, type_ in enumerate(input_types): 50 if not isinstance(annotation, vy_ast.Subscript): 51 # if we get here something has failed in type checking 52 raise CompilerPanic("Mismatch between node and input type while building getter") 53 if annotation.value.get("id") == "HashMap": # type: ignore 54 # for a HashMap, split the key/value types and use the key type as the next arg 55 arg, annotation = annotation.slice.value.elements # type: ignore 56 else: 57 # for other types, build an input arg node from the expected type 58 # and remove the outer `Subscript` from the annotation 59 arg = vy_ast.Name(id=type_._id) 60 annotation = annotation.value 61 input_nodes.append(vy_ast.arg(arg=f"arg{i}", annotation=arg)) 62 63 # wrap the return statement in a `Subscript` 64 return_stmt = vy_ast.Subscript( 65 value=return_stmt, slice=vy_ast.Index(value=vy_ast.Name(id=f"arg{i}")) 66 ) 67 68 # after iterating the input types, the remaining annotation node is our return type 69 return_node = annotation 70 71 # join everything together as a new `FunctionDef` node, annotate it 72 # with the type, and append it to the existing `Module` node 73 expanded = vy_ast.FunctionDef.from_node( 74 node.annotation, 75 name=func_type.name, 76 args=vy_ast.arguments(args=input_nodes, defaults=[]), 77 body=[vy_ast.Return(value=return_stmt)], 78 decorator_list=[vy_ast.Name(id="external"), vy_ast.Name(id="view")], 79 returns=return_node, 80 ) 81 expanded._metadata["type"] = func_type 82 return_node.set_parent(expanded) 83 vyper_module.add_to_body(expanded) 84 85 86 def remove_unused_statements(vyper_module: vy_ast.Module) -> None: 87 """ 88 Remove statement nodes that are unused after type checking. 89 90 Once type checking is complete, we can remove now-meaningless statements to 91 simplify the AST prior to IR generation. 92 93 Arguments 94 --------- 95 vyper_module : Module 96 Top-level Vyper AST node. 97 """ 98 99 # constant declarations - values were substituted within the AST during folding 100 for node in vyper_module.get_children(vy_ast.VariableDecl, {"annotation.func.id": "constant"}): 101 vyper_module.remove_from_body(node) 102 103 # `implements: interface` statements - validated during type checking 104 for node in vyper_module.get_children(vy_ast.AnnAssign, {"target.id": "implements"}): 105 vyper_module.remove_from_body(node) 106 [end of vyper/ast/expansion.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/vyper/ast/expansion.py b/vyper/ast/expansion.py --- a/vyper/ast/expansion.py +++ b/vyper/ast/expansion.py @@ -53,6 +53,9 @@ if annotation.value.get("id") == "HashMap": # type: ignore # for a HashMap, split the key/value types and use the key type as the next arg arg, annotation = annotation.slice.value.elements # type: ignore + elif annotation.value.get("id") == "DynArray": + arg = vy_ast.Name(id=type_._id) + annotation = annotation.slice.value.elements[0] # type: ignore else: # for other types, build an input arg node from the expected type # and remove the outer `Subscript` from the annotation @@ -66,7 +69,7 @@ ) # after iterating the input types, the remaining annotation node is our return type - return_node = annotation + return_node = copy.copy(annotation) # join everything together as a new `FunctionDef` node, annotate it # with the type, and append it to the existing `Module` node
{"golden_diff": "diff --git a/vyper/ast/expansion.py b/vyper/ast/expansion.py\n--- a/vyper/ast/expansion.py\n+++ b/vyper/ast/expansion.py\n@@ -53,6 +53,9 @@\n if annotation.value.get(\"id\") == \"HashMap\": # type: ignore\n # for a HashMap, split the key/value types and use the key type as the next arg\n arg, annotation = annotation.slice.value.elements # type: ignore\n+ elif annotation.value.get(\"id\") == \"DynArray\":\n+ arg = vy_ast.Name(id=type_._id)\n+ annotation = annotation.slice.value.elements[0] # type: ignore\n else:\n # for other types, build an input arg node from the expected type\n # and remove the outer `Subscript` from the annotation\n@@ -66,7 +69,7 @@\n )\n \n # after iterating the input types, the remaining annotation node is our return type\n- return_node = annotation\n+ return_node = copy.copy(annotation)\n \n # join everything together as a new `FunctionDef` node, annotate it\n # with the type, and append it to the existing `Module` node\n", "issue": "can't declare variable `public` but can define custom getter\n### Version Information\r\n\r\n* vyper Version: 0.3.4+commit.f31f0ec\r\n* OS: osx\r\n* Python Version: 3.8.9\r\n\r\n### What's your issue about?\r\n\r\nThis code fails to compile:\r\n\r\n```\r\nslates : public(HashMap[bytes32, DynArray[address, 15]])\r\n```\r\n\r\nbut I can define my own getter for that type just fine:\r\n\r\n```\r\nslates : HashMap[bytes32, DynArray[address, 15]]\r\n\r\n@external\r\ndef slate(sid :bytes32) -> DynArray[address, 15]:\r\n return self.slates[sid]\r\n```\r\n\n", "before_files": [{"content": "import copy\n\nfrom vyper import ast as vy_ast\nfrom vyper.exceptions import CompilerPanic\n\n\ndef expand_annotated_ast(vyper_module: vy_ast.Module) -> None:\n \"\"\"\n Perform expansion / simplification operations on an annotated Vyper AST.\n\n This pass uses annotated type information to modify the AST, simplifying\n logic and expanding subtrees to reduce the compexity during codegen.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node that has been type-checked and annotated.\n \"\"\"\n generate_public_variable_getters(vyper_module)\n remove_unused_statements(vyper_module)\n\n\ndef generate_public_variable_getters(vyper_module: vy_ast.Module) -> None:\n \"\"\"\n Create getter functions for public variables.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n \"\"\"\n\n for node in vyper_module.get_children(vy_ast.VariableDecl, {\"annotation.func.id\": \"public\"}):\n func_type = node._metadata[\"func_type\"]\n input_types, return_type = func_type.get_signature()\n input_nodes = []\n\n # use the annotation node as a base to build the input args and return type\n # starting with `args[0]` to remove the surrounding `public()` call`\n annotation = copy.copy(node.annotation.args[0])\n\n # the base return statement is an `Attribute` node, e.g. `self.<var_name>`\n # for each input type we wrap it in a `Subscript` to access a specific member\n return_stmt: vy_ast.VyperNode = vy_ast.Attribute(\n value=vy_ast.Name(id=\"self\"), attr=func_type.name\n )\n return_stmt._metadata[\"type\"] = node._metadata[\"type\"]\n\n for i, type_ in enumerate(input_types):\n if not isinstance(annotation, vy_ast.Subscript):\n # if we get here something has failed in type checking\n raise CompilerPanic(\"Mismatch between node and input type while building getter\")\n if annotation.value.get(\"id\") == \"HashMap\": # type: ignore\n # for a HashMap, split the key/value types and use the key type as the next arg\n arg, annotation = annotation.slice.value.elements # type: ignore\n else:\n # for other types, build an input arg node from the expected type\n # and remove the outer `Subscript` from the annotation\n arg = vy_ast.Name(id=type_._id)\n annotation = annotation.value\n input_nodes.append(vy_ast.arg(arg=f\"arg{i}\", annotation=arg))\n\n # wrap the return statement in a `Subscript`\n return_stmt = vy_ast.Subscript(\n value=return_stmt, slice=vy_ast.Index(value=vy_ast.Name(id=f\"arg{i}\"))\n )\n\n # after iterating the input types, the remaining annotation node is our return type\n return_node = annotation\n\n # join everything together as a new `FunctionDef` node, annotate it\n # with the type, and append it to the existing `Module` node\n expanded = vy_ast.FunctionDef.from_node(\n node.annotation,\n name=func_type.name,\n args=vy_ast.arguments(args=input_nodes, defaults=[]),\n body=[vy_ast.Return(value=return_stmt)],\n decorator_list=[vy_ast.Name(id=\"external\"), vy_ast.Name(id=\"view\")],\n returns=return_node,\n )\n expanded._metadata[\"type\"] = func_type\n return_node.set_parent(expanded)\n vyper_module.add_to_body(expanded)\n\n\ndef remove_unused_statements(vyper_module: vy_ast.Module) -> None:\n \"\"\"\n Remove statement nodes that are unused after type checking.\n\n Once type checking is complete, we can remove now-meaningless statements to\n simplify the AST prior to IR generation.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n \"\"\"\n\n # constant declarations - values were substituted within the AST during folding\n for node in vyper_module.get_children(vy_ast.VariableDecl, {\"annotation.func.id\": \"constant\"}):\n vyper_module.remove_from_body(node)\n\n # `implements: interface` statements - validated during type checking\n for node in vyper_module.get_children(vy_ast.AnnAssign, {\"target.id\": \"implements\"}):\n vyper_module.remove_from_body(node)\n", "path": "vyper/ast/expansion.py"}]}
1,856
268
gh_patches_debug_28637
rasdani/github-patches
git_diff
comic__grand-challenge.org-1771
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add profile information to the verification admin When manually reviewing verification requests it would be helpful to have more information in the admin such as the users full name, location, department and website. </issue> <code> [start of app/grandchallenge/verifications/admin.py] 1 from django.contrib import admin 2 from django.utils.timezone import now 3 4 from grandchallenge.verifications.models import Verification 5 6 7 def mark_verified(modeladmin, request, queryset): 8 queryset.update(is_verified=True, verified_at=now()) 9 10 11 mark_verified.short_description = "Mark selected users as verified" 12 mark_verified.allowed_permissions = ("change",) 13 14 15 def mark_not_verified(modeladmin, request, queryset): 16 queryset.update(is_verified=False, verified_at=None) 17 18 19 mark_not_verified.short_description = "Mark selected users as not verified" 20 mark_not_verified.allowed_permissions = ("change",) 21 22 23 class VerificationAdmin(admin.ModelAdmin): 24 list_display = ( 25 "user", 26 "created", 27 "signup_email", 28 "signup_email_activated", 29 "email", 30 "email_is_verified", 31 "is_verified", 32 "verified_at", 33 ) 34 list_filter = ("email_is_verified", "is_verified") 35 readonly_fields = ( 36 "created", 37 "modified", 38 "email_is_verified", 39 "email_verified_at", 40 "is_verified", 41 "verified_at", 42 ) 43 search_fields = ("user__username", "email", "user__email") 44 actions = (mark_verified, mark_not_verified) 45 autocomplete_fields = ("user",) 46 47 def signup_email_activated(self, instance): 48 return instance.signup_email_activated 49 50 signup_email_activated.boolean = True 51 52 def get_readonly_fields(self, request, obj=None): 53 if obj: 54 return ("user", "email", *self.readonly_fields) 55 else: 56 return self.readonly_fields 57 58 59 admin.site.register(Verification, VerificationAdmin) 60 [end of app/grandchallenge/verifications/admin.py] [start of app/grandchallenge/verifications/models.py] 1 from allauth.account.signals import email_confirmed 2 from django.contrib.auth import get_user_model 3 from django.db import models 4 from django.utils.timezone import now 5 from pyswot import is_academic 6 7 from grandchallenge.subdomains.utils import reverse 8 from grandchallenge.verifications.tokens import ( 9 email_verification_token_generator, 10 ) 11 12 13 def email_is_trusted(*, email): 14 return is_academic(email) 15 16 17 class Verification(models.Model): 18 created = models.DateTimeField(auto_now_add=True) 19 modified = models.DateTimeField(auto_now=True) 20 21 user = models.OneToOneField( 22 get_user_model(), unique=True, on_delete=models.CASCADE, 23 ) 24 25 email = models.EmailField(blank=True) 26 email_is_verified = models.BooleanField(default=False, editable=False) 27 email_verified_at = models.DateTimeField( 28 blank=True, null=True, editable=False 29 ) 30 31 is_verified = models.BooleanField(default=None, null=True, editable=False) 32 verified_at = models.DateTimeField(blank=True, null=True, editable=False) 33 34 def __str__(self): 35 return f"Verification for {self.user}" 36 37 @property 38 def signup_email(self): 39 return self.user.email 40 41 @property 42 def signup_email_activated(self): 43 return self.user.emailaddress_set.filter( 44 verified=True, email=self.signup_email 45 ).exists() 46 47 @property 48 def signup_email_is_trusted(self): 49 return self.signup_email_activated and email_is_trusted( 50 email=self.signup_email 51 ) 52 53 @property 54 def verification_email_is_trusted(self): 55 return self.email_is_verified and email_is_trusted(email=self.email) 56 57 @property 58 def token(self): 59 return email_verification_token_generator.make_token(self.user) 60 61 @property 62 def verification_url(self): 63 return reverse("verifications:confirm", kwargs={"token": self.token},) 64 65 def save(self, *args, **kwargs): 66 if self.signup_email_is_trusted or self.verification_email_is_trusted: 67 self.is_verified = True 68 self.verified_at = now() 69 70 super().save(*args, **kwargs) 71 72 73 def create_verification(email_address, *_, **__): 74 if ( 75 email_is_trusted(email=email_address.email) 76 and not Verification.objects.filter(user=email_address.user).exists() 77 ): 78 Verification.objects.create( 79 user=email_address.user, email=email_address.email 80 ) 81 82 83 email_confirmed.connect(create_verification) 84 [end of app/grandchallenge/verifications/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/grandchallenge/verifications/admin.py b/app/grandchallenge/verifications/admin.py --- a/app/grandchallenge/verifications/admin.py +++ b/app/grandchallenge/verifications/admin.py @@ -23,14 +23,15 @@ class VerificationAdmin(admin.ModelAdmin): list_display = ( "user", + "user_info", "created", "signup_email", - "signup_email_activated", "email", "email_is_verified", "is_verified", "verified_at", ) + list_select_related = ("user__user_profile",) list_filter = ("email_is_verified", "is_verified") readonly_fields = ( "created", diff --git a/app/grandchallenge/verifications/models.py b/app/grandchallenge/verifications/models.py --- a/app/grandchallenge/verifications/models.py +++ b/app/grandchallenge/verifications/models.py @@ -1,6 +1,7 @@ from allauth.account.signals import email_confirmed from django.contrib.auth import get_user_model from django.db import models +from django.utils.html import format_html from django.utils.timezone import now from pyswot import is_academic @@ -69,6 +70,17 @@ super().save(*args, **kwargs) + @property + def user_info(self): + return format_html( + "<span>{} <br/> {} <br/> {} <br/> {} <br/> {}</span>", + self.user.get_full_name(), + self.user.user_profile.institution, + self.user.user_profile.department, + self.user.user_profile.country, + self.user.user_profile.website, + ) + def create_verification(email_address, *_, **__): if (
{"golden_diff": "diff --git a/app/grandchallenge/verifications/admin.py b/app/grandchallenge/verifications/admin.py\n--- a/app/grandchallenge/verifications/admin.py\n+++ b/app/grandchallenge/verifications/admin.py\n@@ -23,14 +23,15 @@\n class VerificationAdmin(admin.ModelAdmin):\n list_display = (\n \"user\",\n+ \"user_info\",\n \"created\",\n \"signup_email\",\n- \"signup_email_activated\",\n \"email\",\n \"email_is_verified\",\n \"is_verified\",\n \"verified_at\",\n )\n+ list_select_related = (\"user__user_profile\",)\n list_filter = (\"email_is_verified\", \"is_verified\")\n readonly_fields = (\n \"created\",\ndiff --git a/app/grandchallenge/verifications/models.py b/app/grandchallenge/verifications/models.py\n--- a/app/grandchallenge/verifications/models.py\n+++ b/app/grandchallenge/verifications/models.py\n@@ -1,6 +1,7 @@\n from allauth.account.signals import email_confirmed\n from django.contrib.auth import get_user_model\n from django.db import models\n+from django.utils.html import format_html\n from django.utils.timezone import now\n from pyswot import is_academic\n \n@@ -69,6 +70,17 @@\n \n super().save(*args, **kwargs)\n \n+ @property\n+ def user_info(self):\n+ return format_html(\n+ \"<span>{} <br/> {} <br/> {} <br/> {} <br/> {}</span>\",\n+ self.user.get_full_name(),\n+ self.user.user_profile.institution,\n+ self.user.user_profile.department,\n+ self.user.user_profile.country,\n+ self.user.user_profile.website,\n+ )\n+\n \n def create_verification(email_address, *_, **__):\n if (\n", "issue": "Add profile information to the verification admin\nWhen manually reviewing verification requests it would be helpful to have more information in the admin such as the users full name, location, department and website.\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.utils.timezone import now\n\nfrom grandchallenge.verifications.models import Verification\n\n\ndef mark_verified(modeladmin, request, queryset):\n queryset.update(is_verified=True, verified_at=now())\n\n\nmark_verified.short_description = \"Mark selected users as verified\"\nmark_verified.allowed_permissions = (\"change\",)\n\n\ndef mark_not_verified(modeladmin, request, queryset):\n queryset.update(is_verified=False, verified_at=None)\n\n\nmark_not_verified.short_description = \"Mark selected users as not verified\"\nmark_not_verified.allowed_permissions = (\"change\",)\n\n\nclass VerificationAdmin(admin.ModelAdmin):\n list_display = (\n \"user\",\n \"created\",\n \"signup_email\",\n \"signup_email_activated\",\n \"email\",\n \"email_is_verified\",\n \"is_verified\",\n \"verified_at\",\n )\n list_filter = (\"email_is_verified\", \"is_verified\")\n readonly_fields = (\n \"created\",\n \"modified\",\n \"email_is_verified\",\n \"email_verified_at\",\n \"is_verified\",\n \"verified_at\",\n )\n search_fields = (\"user__username\", \"email\", \"user__email\")\n actions = (mark_verified, mark_not_verified)\n autocomplete_fields = (\"user\",)\n\n def signup_email_activated(self, instance):\n return instance.signup_email_activated\n\n signup_email_activated.boolean = True\n\n def get_readonly_fields(self, request, obj=None):\n if obj:\n return (\"user\", \"email\", *self.readonly_fields)\n else:\n return self.readonly_fields\n\n\nadmin.site.register(Verification, VerificationAdmin)\n", "path": "app/grandchallenge/verifications/admin.py"}, {"content": "from allauth.account.signals import email_confirmed\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.utils.timezone import now\nfrom pyswot import is_academic\n\nfrom grandchallenge.subdomains.utils import reverse\nfrom grandchallenge.verifications.tokens import (\n email_verification_token_generator,\n)\n\n\ndef email_is_trusted(*, email):\n return is_academic(email)\n\n\nclass Verification(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n\n user = models.OneToOneField(\n get_user_model(), unique=True, on_delete=models.CASCADE,\n )\n\n email = models.EmailField(blank=True)\n email_is_verified = models.BooleanField(default=False, editable=False)\n email_verified_at = models.DateTimeField(\n blank=True, null=True, editable=False\n )\n\n is_verified = models.BooleanField(default=None, null=True, editable=False)\n verified_at = models.DateTimeField(blank=True, null=True, editable=False)\n\n def __str__(self):\n return f\"Verification for {self.user}\"\n\n @property\n def signup_email(self):\n return self.user.email\n\n @property\n def signup_email_activated(self):\n return self.user.emailaddress_set.filter(\n verified=True, email=self.signup_email\n ).exists()\n\n @property\n def signup_email_is_trusted(self):\n return self.signup_email_activated and email_is_trusted(\n email=self.signup_email\n )\n\n @property\n def verification_email_is_trusted(self):\n return self.email_is_verified and email_is_trusted(email=self.email)\n\n @property\n def token(self):\n return email_verification_token_generator.make_token(self.user)\n\n @property\n def verification_url(self):\n return reverse(\"verifications:confirm\", kwargs={\"token\": self.token},)\n\n def save(self, *args, **kwargs):\n if self.signup_email_is_trusted or self.verification_email_is_trusted:\n self.is_verified = True\n self.verified_at = now()\n\n super().save(*args, **kwargs)\n\n\ndef create_verification(email_address, *_, **__):\n if (\n email_is_trusted(email=email_address.email)\n and not Verification.objects.filter(user=email_address.user).exists()\n ):\n Verification.objects.create(\n user=email_address.user, email=email_address.email\n )\n\n\nemail_confirmed.connect(create_verification)\n", "path": "app/grandchallenge/verifications/models.py"}]}
1,732
394
gh_patches_debug_37750
rasdani/github-patches
git_diff
ytdl-org__youtube-dl-2859
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> FunnyOrDie extractor not finding thumbnails python -m youtube_dl -v --skip-download --write-info-json --no-playlist -f mp4 http://www.funnyordie.com/videos/e402820827/please-use-this-song-jon-lajoie extracts the video properly but not a thumbnail. Here's the resulting JSON: {"display_id": "e402820827", "extractor": "FunnyOrDie", "description": "Please use this to sell something", "format": "0 - unknown", "format_id": "0", "playlist_index": null, "stitle": "Please Use This Song (Jon Lajoie)", "playlist": null, "title": "Please Use This Song (Jon Lajoie)", "url": "http://vo.fod4.com/v/e402820827/v600.mp4", "extractor_key": "FunnyOrDie", "id": "e402820827", "ext": "mp4", "webpage_url": "http://www.funnyordie.com/videos/e402820827/please-use-this-song-jon-lajoie", "fulltitle": "Please Use This Song (Jon Lajoie)", "thumbnail": null, "webpage_url_basename": "please-use-this-song-jon-lajoie"} FunnyorDie's RSS feed entry for this page does contain a thumbnail: media:thumbnail url="http://t.fod4.com/t/e402820827/c480x270_50.jpg" width="464" height="348" </issue> <code> [start of youtube_dl/extractor/funnyordie.py] 1 from __future__ import unicode_literals 2 3 import json 4 import re 5 6 from .common import InfoExtractor 7 8 9 class FunnyOrDieIE(InfoExtractor): 10 _VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])' 11 _TEST = { 12 'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version', 13 'file': '0732f586d7.mp4', 14 'md5': 'f647e9e90064b53b6e046e75d0241fbd', 15 'info_dict': { 16 'description': ('Lyrics changed to match the video. Spoken cameo ' 17 'by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a ' 18 'concept by Dustin McLean (DustFilms.com). Performed, edited, ' 19 'and written by David A. Scott.'), 20 'title': 'Heart-Shaped Box: Literal Video Version', 21 }, 22 } 23 24 def _real_extract(self, url): 25 mobj = re.match(self._VALID_URL, url) 26 27 video_id = mobj.group('id') 28 webpage = self._download_webpage(url, video_id) 29 30 video_url = self._search_regex( 31 [r'type="video/mp4" src="(.*?)"', r'src="([^>]*?)" type=\'video/mp4\''], 32 webpage, 'video URL', flags=re.DOTALL) 33 34 if mobj.group('type') == 'embed': 35 post_json = self._search_regex( 36 r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details') 37 post = json.loads(post_json) 38 title = post['name'] 39 description = post.get('description') 40 thumbnail = post.get('picture') 41 else: 42 title = self._og_search_title(webpage) 43 description = self._og_search_description(webpage) 44 thumbnail = None 45 46 return { 47 'id': video_id, 48 'url': video_url, 49 'ext': 'mp4', 50 'title': title, 51 'description': description, 52 'thumbnail': thumbnail, 53 } 54 [end of youtube_dl/extractor/funnyordie.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/youtube_dl/extractor/funnyordie.py b/youtube_dl/extractor/funnyordie.py --- a/youtube_dl/extractor/funnyordie.py +++ b/youtube_dl/extractor/funnyordie.py @@ -8,18 +8,27 @@ class FunnyOrDieIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])' - _TEST = { + _TESTS = [{ 'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version', - 'file': '0732f586d7.mp4', 'md5': 'f647e9e90064b53b6e046e75d0241fbd', 'info_dict': { - 'description': ('Lyrics changed to match the video. Spoken cameo ' - 'by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a ' - 'concept by Dustin McLean (DustFilms.com). Performed, edited, ' - 'and written by David A. Scott.'), + 'id': '0732f586d7', + 'ext': 'mp4', 'title': 'Heart-Shaped Box: Literal Video Version', + 'description': 'md5:ea09a01bc9a1c46d9ab696c01747c338', + 'thumbnail': 're:^http:.*\.jpg$', + }, + }, { + 'url': 'http://www.funnyordie.com/embed/e402820827', + 'md5': '0e0c5a7bf45c52b95cd16aa7f28be0b6', + 'info_dict': { + 'id': 'e402820827', + 'ext': 'mp4', + 'title': 'Please Use This Song (Jon Lajoie)', + 'description': 'md5:2ed27d364f5a805a6dba199faaf6681d', + 'thumbnail': 're:^http:.*\.jpg$', }, - } + }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) @@ -31,23 +40,15 @@ [r'type="video/mp4" src="(.*?)"', r'src="([^>]*?)" type=\'video/mp4\''], webpage, 'video URL', flags=re.DOTALL) - if mobj.group('type') == 'embed': - post_json = self._search_regex( - r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details') - post = json.loads(post_json) - title = post['name'] - description = post.get('description') - thumbnail = post.get('picture') - else: - title = self._og_search_title(webpage) - description = self._og_search_description(webpage) - thumbnail = None + post_json = self._search_regex( + r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details') + post = json.loads(post_json) return { 'id': video_id, 'url': video_url, 'ext': 'mp4', - 'title': title, - 'description': description, - 'thumbnail': thumbnail, + 'title': post['name'], + 'description': post.get('description'), + 'thumbnail': post.get('picture'), }
{"golden_diff": "diff --git a/youtube_dl/extractor/funnyordie.py b/youtube_dl/extractor/funnyordie.py\n--- a/youtube_dl/extractor/funnyordie.py\n+++ b/youtube_dl/extractor/funnyordie.py\n@@ -8,18 +8,27 @@\n \n class FunnyOrDieIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?funnyordie\\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'\n- _TEST = {\n+ _TESTS = [{\n 'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',\n- 'file': '0732f586d7.mp4',\n 'md5': 'f647e9e90064b53b6e046e75d0241fbd',\n 'info_dict': {\n- 'description': ('Lyrics changed to match the video. Spoken cameo '\n- 'by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a '\n- 'concept by Dustin McLean (DustFilms.com). Performed, edited, '\n- 'and written by David A. Scott.'),\n+ 'id': '0732f586d7',\n+ 'ext': 'mp4',\n 'title': 'Heart-Shaped Box: Literal Video Version',\n+ 'description': 'md5:ea09a01bc9a1c46d9ab696c01747c338',\n+ 'thumbnail': 're:^http:.*\\.jpg$',\n+ },\n+ }, {\n+ 'url': 'http://www.funnyordie.com/embed/e402820827',\n+ 'md5': '0e0c5a7bf45c52b95cd16aa7f28be0b6',\n+ 'info_dict': {\n+ 'id': 'e402820827',\n+ 'ext': 'mp4',\n+ 'title': 'Please Use This Song (Jon Lajoie)',\n+ 'description': 'md5:2ed27d364f5a805a6dba199faaf6681d',\n+ 'thumbnail': 're:^http:.*\\.jpg$',\n },\n- }\n+ }]\n \n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n@@ -31,23 +40,15 @@\n [r'type=\"video/mp4\" src=\"(.*?)\"', r'src=\"([^>]*?)\" type=\\'video/mp4\\''],\n webpage, 'video URL', flags=re.DOTALL)\n \n- if mobj.group('type') == 'embed':\n- post_json = self._search_regex(\n- r'fb_post\\s*=\\s*(\\{.*?\\});', webpage, 'post details')\n- post = json.loads(post_json)\n- title = post['name']\n- description = post.get('description')\n- thumbnail = post.get('picture')\n- else:\n- title = self._og_search_title(webpage)\n- description = self._og_search_description(webpage)\n- thumbnail = None\n+ post_json = self._search_regex(\n+ r'fb_post\\s*=\\s*(\\{.*?\\});', webpage, 'post details')\n+ post = json.loads(post_json)\n \n return {\n 'id': video_id,\n 'url': video_url,\n 'ext': 'mp4',\n- 'title': title,\n- 'description': description,\n- 'thumbnail': thumbnail,\n+ 'title': post['name'],\n+ 'description': post.get('description'),\n+ 'thumbnail': post.get('picture'),\n }\n", "issue": "FunnyOrDie extractor not finding thumbnails\npython -m youtube_dl -v --skip-download --write-info-json --no-playlist -f mp4 http://www.funnyordie.com/videos/e402820827/please-use-this-song-jon-lajoie extracts the video properly but not a thumbnail. Here's the resulting JSON:\n{\"display_id\": \"e402820827\", \"extractor\": \"FunnyOrDie\", \"description\": \"Please use this to sell something\", \"format\": \"0 - unknown\", \"format_id\": \"0\", \"playlist_index\": null, \"stitle\": \"Please Use This Song (Jon Lajoie)\", \"playlist\": null, \"title\": \"Please Use This Song (Jon Lajoie)\", \"url\": \"http://vo.fod4.com/v/e402820827/v600.mp4\", \"extractor_key\": \"FunnyOrDie\", \"id\": \"e402820827\", \"ext\": \"mp4\", \"webpage_url\": \"http://www.funnyordie.com/videos/e402820827/please-use-this-song-jon-lajoie\", \"fulltitle\": \"Please Use This Song (Jon Lajoie)\", \"thumbnail\": null, \"webpage_url_basename\": \"please-use-this-song-jon-lajoie\"}\n\nFunnyorDie's RSS feed entry for this page does contain a thumbnail:\nmedia:thumbnail url=\"http://t.fod4.com/t/e402820827/c480x270_50.jpg\" width=\"464\" height=\"348\"\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom .common import InfoExtractor\n\n\nclass FunnyOrDieIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?funnyordie\\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'\n _TEST = {\n 'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',\n 'file': '0732f586d7.mp4',\n 'md5': 'f647e9e90064b53b6e046e75d0241fbd',\n 'info_dict': {\n 'description': ('Lyrics changed to match the video. Spoken cameo '\n 'by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a '\n 'concept by Dustin McLean (DustFilms.com). Performed, edited, '\n 'and written by David A. Scott.'),\n 'title': 'Heart-Shaped Box: Literal Video Version',\n },\n }\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n\n video_id = mobj.group('id')\n webpage = self._download_webpage(url, video_id)\n\n video_url = self._search_regex(\n [r'type=\"video/mp4\" src=\"(.*?)\"', r'src=\"([^>]*?)\" type=\\'video/mp4\\''],\n webpage, 'video URL', flags=re.DOTALL)\n\n if mobj.group('type') == 'embed':\n post_json = self._search_regex(\n r'fb_post\\s*=\\s*(\\{.*?\\});', webpage, 'post details')\n post = json.loads(post_json)\n title = post['name']\n description = post.get('description')\n thumbnail = post.get('picture')\n else:\n title = self._og_search_title(webpage)\n description = self._og_search_description(webpage)\n thumbnail = None\n\n return {\n 'id': video_id,\n 'url': video_url,\n 'ext': 'mp4',\n 'title': title,\n 'description': description,\n 'thumbnail': thumbnail,\n }\n", "path": "youtube_dl/extractor/funnyordie.py"}]}
1,530
900
gh_patches_debug_37748
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-1123
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add support for OTEL_PROPAGATORS The spec describes environment variables that should be supported to configure propagators, this feature request is to add support in the current implementation. https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/sdk-environment-variables.md </issue> <code> [start of opentelemetry-api/src/opentelemetry/propagators/__init__.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 API for propagation of context. 17 18 Example:: 19 20 import flask 21 import requests 22 from opentelemetry import propagators 23 24 25 PROPAGATOR = propagators.get_global_textmap() 26 27 28 def get_header_from_flask_request(request, key): 29 return request.headers.get_all(key) 30 31 def set_header_into_requests_request(request: requests.Request, 32 key: str, value: str): 33 request.headers[key] = value 34 35 def example_route(): 36 context = PROPAGATOR.extract( 37 get_header_from_flask_request, 38 flask.request 39 ) 40 request_to_downstream = requests.Request( 41 "GET", "http://httpbin.org/get" 42 ) 43 PROPAGATOR.inject( 44 set_header_into_requests_request, 45 request_to_downstream, 46 context=context 47 ) 48 session = requests.Session() 49 session.send(request_to_downstream.prepare()) 50 51 52 .. _Propagation API Specification: 53 https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/api-propagators.md 54 """ 55 56 import typing 57 58 from opentelemetry.baggage.propagation import BaggagePropagator 59 from opentelemetry.context.context import Context 60 from opentelemetry.propagators import composite 61 from opentelemetry.trace.propagation import textmap 62 from opentelemetry.trace.propagation.tracecontext import ( 63 TraceContextTextMapPropagator, 64 ) 65 66 67 def extract( 68 get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT], 69 carrier: textmap.TextMapPropagatorT, 70 context: typing.Optional[Context] = None, 71 ) -> Context: 72 """ Uses the configured propagator to extract a Context from the carrier. 73 74 Args: 75 get_from_carrier: a function that can retrieve zero 76 or more values from the carrier. In the case that 77 the value does not exist, return an empty list. 78 carrier: and object which contains values that are 79 used to construct a Context. This object 80 must be paired with an appropriate get_from_carrier 81 which understands how to extract a value from it. 82 context: an optional Context to use. Defaults to current 83 context if not set. 84 """ 85 return get_global_textmap().extract(get_from_carrier, carrier, context) 86 87 88 def inject( 89 set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT], 90 carrier: textmap.TextMapPropagatorT, 91 context: typing.Optional[Context] = None, 92 ) -> None: 93 """ Uses the configured propagator to inject a Context into the carrier. 94 95 Args: 96 set_in_carrier: A setter function that can set values 97 on the carrier. 98 carrier: An object that contains a representation of HTTP 99 headers. Should be paired with set_in_carrier, which 100 should know how to set header values on the carrier. 101 context: an optional Context to use. Defaults to current 102 context if not set. 103 """ 104 get_global_textmap().inject(set_in_carrier, carrier, context) 105 106 107 _HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator( 108 [TraceContextTextMapPropagator(), BaggagePropagator()], 109 ) # type: textmap.TextMapPropagator 110 111 112 def get_global_textmap() -> textmap.TextMapPropagator: 113 return _HTTP_TEXT_FORMAT 114 115 116 def set_global_textmap(http_text_format: textmap.TextMapPropagator,) -> None: 117 global _HTTP_TEXT_FORMAT # pylint:disable=global-statement 118 _HTTP_TEXT_FORMAT = http_text_format 119 [end of opentelemetry-api/src/opentelemetry/propagators/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opentelemetry-api/src/opentelemetry/propagators/__init__.py b/opentelemetry-api/src/opentelemetry/propagators/__init__.py --- a/opentelemetry-api/src/opentelemetry/propagators/__init__.py +++ b/opentelemetry-api/src/opentelemetry/propagators/__init__.py @@ -15,6 +15,21 @@ """ API for propagation of context. +The propagators for the +``opentelemetry.propagators.composite.CompositeHTTPPropagator`` can be defined +via configuration in the ``OTEL_PROPAGATORS`` environment variable. This +variable should be set to a comma-separated string of names of values for the +``opentelemetry_propagator`` entry point. For example, setting +``OTEL_PROPAGATORS`` to ``tracecontext,baggage`` (which is the default value) +would instantiate +``opentelemetry.propagators.composite.CompositeHTTPPropagator`` with 2 +propagators, one of type +``opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator`` +and other of type ``opentelemetry.baggage.propagation.BaggagePropagator``. +Notice that these propagator classes are defined as +``opentelemetry_propagator`` entry points in the ``setup.cfg`` file of +``opentelemetry``. + Example:: import flask @@ -54,14 +69,16 @@ """ import typing +from logging import getLogger + +from pkg_resources import iter_entry_points -from opentelemetry.baggage.propagation import BaggagePropagator +from opentelemetry.configuration import Configuration from opentelemetry.context.context import Context from opentelemetry.propagators import composite from opentelemetry.trace.propagation import textmap -from opentelemetry.trace.propagation.tracecontext import ( - TraceContextTextMapPropagator, -) + +logger = getLogger(__name__) def extract( @@ -104,9 +121,25 @@ get_global_textmap().inject(set_in_carrier, carrier, context) -_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator( - [TraceContextTextMapPropagator(), BaggagePropagator()], -) # type: textmap.TextMapPropagator +try: + + propagators = [] + + for propagator in ( # type: ignore + Configuration().get("PROPAGATORS", "tracecontext,baggage").split(",") # type: ignore + ): + + propagators.append( # type: ignore + next( # type: ignore + iter_entry_points("opentelemetry_propagator", propagator) # type: ignore + ).load()() + ) + +except Exception: # pylint: disable=broad-except + logger.exception("Failed to load configured propagators") + raise + +_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator(propagators) # type: ignore def get_global_textmap() -> textmap.TextMapPropagator: @@ -115,4 +148,4 @@ def set_global_textmap(http_text_format: textmap.TextMapPropagator,) -> None: global _HTTP_TEXT_FORMAT # pylint:disable=global-statement - _HTTP_TEXT_FORMAT = http_text_format + _HTTP_TEXT_FORMAT = http_text_format # type: ignore
{"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/propagators/__init__.py b/opentelemetry-api/src/opentelemetry/propagators/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/propagators/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/propagators/__init__.py\n@@ -15,6 +15,21 @@\n \"\"\"\n API for propagation of context.\n \n+The propagators for the\n+``opentelemetry.propagators.composite.CompositeHTTPPropagator`` can be defined\n+via configuration in the ``OTEL_PROPAGATORS`` environment variable. This\n+variable should be set to a comma-separated string of names of values for the\n+``opentelemetry_propagator`` entry point. For example, setting\n+``OTEL_PROPAGATORS`` to ``tracecontext,baggage`` (which is the default value)\n+would instantiate\n+``opentelemetry.propagators.composite.CompositeHTTPPropagator`` with 2\n+propagators, one of type\n+``opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator``\n+and other of type ``opentelemetry.baggage.propagation.BaggagePropagator``.\n+Notice that these propagator classes are defined as\n+``opentelemetry_propagator`` entry points in the ``setup.cfg`` file of\n+``opentelemetry``.\n+\n Example::\n \n import flask\n@@ -54,14 +69,16 @@\n \"\"\"\n \n import typing\n+from logging import getLogger\n+\n+from pkg_resources import iter_entry_points\n \n-from opentelemetry.baggage.propagation import BaggagePropagator\n+from opentelemetry.configuration import Configuration\n from opentelemetry.context.context import Context\n from opentelemetry.propagators import composite\n from opentelemetry.trace.propagation import textmap\n-from opentelemetry.trace.propagation.tracecontext import (\n- TraceContextTextMapPropagator,\n-)\n+\n+logger = getLogger(__name__)\n \n \n def extract(\n@@ -104,9 +121,25 @@\n get_global_textmap().inject(set_in_carrier, carrier, context)\n \n \n-_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator(\n- [TraceContextTextMapPropagator(), BaggagePropagator()],\n-) # type: textmap.TextMapPropagator\n+try:\n+\n+ propagators = []\n+\n+ for propagator in ( # type: ignore\n+ Configuration().get(\"PROPAGATORS\", \"tracecontext,baggage\").split(\",\") # type: ignore\n+ ):\n+\n+ propagators.append( # type: ignore\n+ next( # type: ignore\n+ iter_entry_points(\"opentelemetry_propagator\", propagator) # type: ignore\n+ ).load()()\n+ )\n+\n+except Exception: # pylint: disable=broad-except\n+ logger.exception(\"Failed to load configured propagators\")\n+ raise\n+\n+_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator(propagators) # type: ignore\n \n \n def get_global_textmap() -> textmap.TextMapPropagator:\n@@ -115,4 +148,4 @@\n \n def set_global_textmap(http_text_format: textmap.TextMapPropagator,) -> None:\n global _HTTP_TEXT_FORMAT # pylint:disable=global-statement\n- _HTTP_TEXT_FORMAT = http_text_format\n+ _HTTP_TEXT_FORMAT = http_text_format # type: ignore\n", "issue": "Add support for OTEL_PROPAGATORS\nThe spec describes environment variables that should be supported to configure propagators, this feature request is to add support in the current implementation.\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/sdk-environment-variables.md\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAPI for propagation of context.\n\nExample::\n\n import flask\n import requests\n from opentelemetry import propagators\n\n\n PROPAGATOR = propagators.get_global_textmap()\n\n\n def get_header_from_flask_request(request, key):\n return request.headers.get_all(key)\n\n def set_header_into_requests_request(request: requests.Request,\n key: str, value: str):\n request.headers[key] = value\n\n def example_route():\n context = PROPAGATOR.extract(\n get_header_from_flask_request,\n flask.request\n )\n request_to_downstream = requests.Request(\n \"GET\", \"http://httpbin.org/get\"\n )\n PROPAGATOR.inject(\n set_header_into_requests_request,\n request_to_downstream,\n context=context\n )\n session = requests.Session()\n session.send(request_to_downstream.prepare())\n\n\n.. _Propagation API Specification:\n https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/api-propagators.md\n\"\"\"\n\nimport typing\n\nfrom opentelemetry.baggage.propagation import BaggagePropagator\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.propagators import composite\nfrom opentelemetry.trace.propagation import textmap\nfrom opentelemetry.trace.propagation.tracecontext import (\n TraceContextTextMapPropagator,\n)\n\n\ndef extract(\n get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n) -> Context:\n \"\"\" Uses the configured propagator to extract a Context from the carrier.\n\n Args:\n get_from_carrier: a function that can retrieve zero\n or more values from the carrier. In the case that\n the value does not exist, return an empty list.\n carrier: and object which contains values that are\n used to construct a Context. This object\n must be paired with an appropriate get_from_carrier\n which understands how to extract a value from it.\n context: an optional Context to use. Defaults to current\n context if not set.\n \"\"\"\n return get_global_textmap().extract(get_from_carrier, carrier, context)\n\n\ndef inject(\n set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n) -> None:\n \"\"\" Uses the configured propagator to inject a Context into the carrier.\n\n Args:\n set_in_carrier: A setter function that can set values\n on the carrier.\n carrier: An object that contains a representation of HTTP\n headers. Should be paired with set_in_carrier, which\n should know how to set header values on the carrier.\n context: an optional Context to use. Defaults to current\n context if not set.\n \"\"\"\n get_global_textmap().inject(set_in_carrier, carrier, context)\n\n\n_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator(\n [TraceContextTextMapPropagator(), BaggagePropagator()],\n) # type: textmap.TextMapPropagator\n\n\ndef get_global_textmap() -> textmap.TextMapPropagator:\n return _HTTP_TEXT_FORMAT\n\n\ndef set_global_textmap(http_text_format: textmap.TextMapPropagator,) -> None:\n global _HTTP_TEXT_FORMAT # pylint:disable=global-statement\n _HTTP_TEXT_FORMAT = http_text_format\n", "path": "opentelemetry-api/src/opentelemetry/propagators/__init__.py"}]}
1,750
774
gh_patches_debug_28935
rasdani/github-patches
git_diff
pyload__pyload-1659
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> premiumize.me hook is broken account says username and password is ok but the log always shows: 4 01.08.2015 19:50:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 5 01.08.2015 19:51:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 6 01.08.2015 19:51:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 7 01.08.2015 19:52:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 8 01.08.2015 19:52:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 9 01.08.2015 19:53:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 10 01.08.2015 19:53:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry so i guess the hook is broken premiumize.me hook is broken account says username and password is ok but the log always shows: 4 01.08.2015 19:50:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 5 01.08.2015 19:51:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 6 01.08.2015 19:51:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 7 01.08.2015 19:52:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 8 01.08.2015 19:52:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 9 01.08.2015 19:53:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 10 01.08.2015 19:53:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry so i guess the hook is broken </issue> <code> [start of module/plugins/hoster/PremiumizeMe.py] 1 # -*- coding: utf-8 -*- 2 3 from module.common.json_layer import json_loads 4 from module.plugins.internal.MultiHoster import MultiHoster, create_getInfo 5 6 7 class PremiumizeMe(MultiHoster): 8 __name__ = "PremiumizeMe" 9 __type__ = "hoster" 10 __version__ = "0.19" 11 __status__ = "testing" 12 13 __pattern__ = r'^unmatchable$' #: Since we want to allow the user to specify the list of hoster to use we let MultiHoster.activate 14 __config__ = [("use_premium" , "bool", "Use premium account if available" , True), 15 ("revertfailed", "bool", "Revert to standard download if fails", True)] 16 17 __description__ = """Premiumize.me multi-hoster plugin""" 18 __license__ = "GPLv3" 19 __authors__ = [("Florian Franzen", "[email protected]")] 20 21 22 def handle_premium(self, pyfile): 23 #: In some cases hostsers do not supply us with a filename at download, so we 24 #: Are going to set a fall back filename (e.g. for freakshare or xfileshare) 25 pyfile.name = pyfile.name.split('/').pop() #: Remove everthing before last slash 26 27 #: Correction for automatic assigned filename: Removing html at end if needed 28 suffix_to_remove = ["html", "htm", "php", "php3", "asp", "shtm", "shtml", "cfml", "cfm"] 29 temp = pyfile.name.split('.') 30 if temp.pop() in suffix_to_remove: 31 pyfile.name = ".".join(temp) 32 33 #: Get account data 34 user, data = self.account.select() 35 36 #: Get rewritten link using the premiumize.me api v1 (see https://secure.premiumize.me/?show=api) 37 data = json_loads(self.load("http://api.premiumize.me/pm-api/v1.php", #@TODO: Revert to `https` in 0.4.10 38 get={'method' : "directdownloadlink", 39 'params[login]': user, 40 'params[pass]' : data['password'], 41 'params[link]' : pyfile.url})) 42 43 #: Check status and decide what to do 44 status = data['status'] 45 46 if status == 200: 47 self.link = data['result']['location'] 48 return 49 50 elif status == 400: 51 self.fail(_("Invalid link")) 52 53 elif status == 404: 54 self.offline() 55 56 elif status >= 500: 57 self.temp_offline() 58 59 else: 60 self.fail(data['statusmessage']) 61 62 63 getInfo = create_getInfo(PremiumizeMe) 64 [end of module/plugins/hoster/PremiumizeMe.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/module/plugins/hoster/PremiumizeMe.py b/module/plugins/hoster/PremiumizeMe.py --- a/module/plugins/hoster/PremiumizeMe.py +++ b/module/plugins/hoster/PremiumizeMe.py @@ -7,7 +7,7 @@ class PremiumizeMe(MultiHoster): __name__ = "PremiumizeMe" __type__ = "hoster" - __version__ = "0.19" + __version__ = "0.20" __status__ = "testing" __pattern__ = r'^unmatchable$' #: Since we want to allow the user to specify the list of hoster to use we let MultiHoster.activate @@ -31,13 +31,13 @@ pyfile.name = ".".join(temp) #: Get account data - user, data = self.account.select() + user, info = self.account.select() #: Get rewritten link using the premiumize.me api v1 (see https://secure.premiumize.me/?show=api) data = json_loads(self.load("http://api.premiumize.me/pm-api/v1.php", #@TODO: Revert to `https` in 0.4.10 get={'method' : "directdownloadlink", 'params[login]': user, - 'params[pass]' : data['password'], + 'params[pass]' : info['login']['password'], 'params[link]' : pyfile.url})) #: Check status and decide what to do
{"golden_diff": "diff --git a/module/plugins/hoster/PremiumizeMe.py b/module/plugins/hoster/PremiumizeMe.py\n--- a/module/plugins/hoster/PremiumizeMe.py\n+++ b/module/plugins/hoster/PremiumizeMe.py\n@@ -7,7 +7,7 @@\n class PremiumizeMe(MultiHoster):\n __name__ = \"PremiumizeMe\"\n __type__ = \"hoster\"\n- __version__ = \"0.19\"\n+ __version__ = \"0.20\"\n __status__ = \"testing\"\n \n __pattern__ = r'^unmatchable$' #: Since we want to allow the user to specify the list of hoster to use we let MultiHoster.activate\n@@ -31,13 +31,13 @@\n pyfile.name = \".\".join(temp)\n \n #: Get account data\n- user, data = self.account.select()\n+ user, info = self.account.select()\n \n #: Get rewritten link using the premiumize.me api v1 (see https://secure.premiumize.me/?show=api)\n data = json_loads(self.load(\"http://api.premiumize.me/pm-api/v1.php\", #@TODO: Revert to `https` in 0.4.10\n get={'method' : \"directdownloadlink\",\n 'params[login]': user,\n- 'params[pass]' : data['password'],\n+ 'params[pass]' : info['login']['password'],\n 'params[link]' : pyfile.url}))\n \n #: Check status and decide what to do\n", "issue": "premiumize.me hook is broken\naccount says username and password is ok\n\nbut the log always shows:\n\n4 01.08.2015 19:50:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n5 01.08.2015 19:51:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n6 01.08.2015 19:51:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n7 01.08.2015 19:52:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n8 01.08.2015 19:52:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n9 01.08.2015 19:53:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n10 01.08.2015 19:53:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n\nso i guess the hook is broken\n\npremiumize.me hook is broken\naccount says username and password is ok\n\nbut the log always shows:\n\n4 01.08.2015 19:50:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n5 01.08.2015 19:51:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n6 01.08.2015 19:51:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n7 01.08.2015 19:52:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n8 01.08.2015 19:52:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n9 01.08.2015 19:53:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n10 01.08.2015 19:53:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n\nso i guess the hook is broken\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom module.common.json_layer import json_loads\nfrom module.plugins.internal.MultiHoster import MultiHoster, create_getInfo\n\n\nclass PremiumizeMe(MultiHoster):\n __name__ = \"PremiumizeMe\"\n __type__ = \"hoster\"\n __version__ = \"0.19\"\n __status__ = \"testing\"\n\n __pattern__ = r'^unmatchable$' #: Since we want to allow the user to specify the list of hoster to use we let MultiHoster.activate\n __config__ = [(\"use_premium\" , \"bool\", \"Use premium account if available\" , True),\n (\"revertfailed\", \"bool\", \"Revert to standard download if fails\", True)]\n\n __description__ = \"\"\"Premiumize.me multi-hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Florian Franzen\", \"[email protected]\")]\n\n\n def handle_premium(self, pyfile):\n #: In some cases hostsers do not supply us with a filename at download, so we\n #: Are going to set a fall back filename (e.g. for freakshare or xfileshare)\n pyfile.name = pyfile.name.split('/').pop() #: Remove everthing before last slash\n\n #: Correction for automatic assigned filename: Removing html at end if needed\n suffix_to_remove = [\"html\", \"htm\", \"php\", \"php3\", \"asp\", \"shtm\", \"shtml\", \"cfml\", \"cfm\"]\n temp = pyfile.name.split('.')\n if temp.pop() in suffix_to_remove:\n pyfile.name = \".\".join(temp)\n\n #: Get account data\n user, data = self.account.select()\n\n #: Get rewritten link using the premiumize.me api v1 (see https://secure.premiumize.me/?show=api)\n data = json_loads(self.load(\"http://api.premiumize.me/pm-api/v1.php\", #@TODO: Revert to `https` in 0.4.10\n get={'method' : \"directdownloadlink\",\n 'params[login]': user,\n 'params[pass]' : data['password'],\n 'params[link]' : pyfile.url}))\n\n #: Check status and decide what to do\n status = data['status']\n\n if status == 200:\n self.link = data['result']['location']\n return\n\n elif status == 400:\n self.fail(_(\"Invalid link\"))\n\n elif status == 404:\n self.offline()\n\n elif status >= 500:\n self.temp_offline()\n\n else:\n self.fail(data['statusmessage'])\n\n\ngetInfo = create_getInfo(PremiumizeMe)\n", "path": "module/plugins/hoster/PremiumizeMe.py"}]}
1,898
349
gh_patches_debug_22076
rasdani/github-patches
git_diff
netbox-community__netbox-16229
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> User and group queries are not properly restricted via GraphQL API in v4.0.2 Re-Open ### Deployment Type Self-hosted ### NetBox Version v4.0.2 ### Python Version 3.10 ### Steps to Reproduce This is is to re-opent #7814 Create New Group netbox-graphql. Don't add any permission to the group. Add new user to the group Login as new user Access https://netbox/graphql query { user_list{ username password } } Username and hash in password returned. ### Expected Behavior Empty result retured because the user in a group without permission to Group/User view. ### Observed Behavior All Username and hash in Database returned. </issue> <code> [start of netbox/users/graphql/types.py] 1 from typing import List 2 3 import strawberry 4 import strawberry_django 5 from django.contrib.auth import get_user_model 6 from django.contrib.auth.models import Group 7 from strawberry import auto 8 from users import filtersets 9 from users.models import Group 10 from utilities.querysets import RestrictedQuerySet 11 from .filters import * 12 13 __all__ = ( 14 'GroupType', 15 'UserType', 16 ) 17 18 19 @strawberry_django.type( 20 Group, 21 fields=['id', 'name'], 22 filters=GroupFilter 23 ) 24 class GroupType: 25 pass 26 27 28 @strawberry_django.type( 29 get_user_model(), 30 fields=[ 31 'id', 'username', 'password', 'first_name', 'last_name', 'email', 'is_staff', 32 'is_active', 'date_joined', 'groups', 33 ], 34 filters=UserFilter 35 ) 36 class UserType: 37 groups: List[GroupType] 38 [end of netbox/users/graphql/types.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/netbox/users/graphql/types.py b/netbox/users/graphql/types.py --- a/netbox/users/graphql/types.py +++ b/netbox/users/graphql/types.py @@ -1,13 +1,10 @@ from typing import List -import strawberry import strawberry_django from django.contrib.auth import get_user_model -from django.contrib.auth.models import Group -from strawberry import auto -from users import filtersets + +from netbox.graphql.types import BaseObjectType from users.models import Group -from utilities.querysets import RestrictedQuerySet from .filters import * __all__ = ( @@ -21,17 +18,16 @@ fields=['id', 'name'], filters=GroupFilter ) -class GroupType: +class GroupType(BaseObjectType): pass @strawberry_django.type( get_user_model(), fields=[ - 'id', 'username', 'password', 'first_name', 'last_name', 'email', 'is_staff', - 'is_active', 'date_joined', 'groups', + 'id', 'username', 'first_name', 'last_name', 'email', 'is_staff', 'is_active', 'date_joined', 'groups', ], filters=UserFilter ) -class UserType: +class UserType(BaseObjectType): groups: List[GroupType]
{"golden_diff": "diff --git a/netbox/users/graphql/types.py b/netbox/users/graphql/types.py\n--- a/netbox/users/graphql/types.py\n+++ b/netbox/users/graphql/types.py\n@@ -1,13 +1,10 @@\n from typing import List\n \n-import strawberry\n import strawberry_django\n from django.contrib.auth import get_user_model\n-from django.contrib.auth.models import Group\n-from strawberry import auto\n-from users import filtersets\n+\n+from netbox.graphql.types import BaseObjectType\n from users.models import Group\n-from utilities.querysets import RestrictedQuerySet\n from .filters import *\n \n __all__ = (\n@@ -21,17 +18,16 @@\n fields=['id', 'name'],\n filters=GroupFilter\n )\n-class GroupType:\n+class GroupType(BaseObjectType):\n pass\n \n \n @strawberry_django.type(\n get_user_model(),\n fields=[\n- 'id', 'username', 'password', 'first_name', 'last_name', 'email', 'is_staff',\n- 'is_active', 'date_joined', 'groups',\n+ 'id', 'username', 'first_name', 'last_name', 'email', 'is_staff', 'is_active', 'date_joined', 'groups',\n ],\n filters=UserFilter\n )\n-class UserType:\n+class UserType(BaseObjectType):\n groups: List[GroupType]\n", "issue": "User and group queries are not properly restricted via GraphQL API in v4.0.2 Re-Open\n### Deployment Type\n\nSelf-hosted\n\n### NetBox Version\n\nv4.0.2\n\n### Python Version\n\n3.10\n\n### Steps to Reproduce\n\nThis is is to re-opent #7814\r\n\r\nCreate New Group netbox-graphql. Don't add any permission to the group.\r\nAdd new user to the group\r\nLogin as new user\r\nAccess https://netbox/graphql\r\n\r\nquery {\r\n user_list{\r\n username\r\n password\r\n }\r\n }\r\n\r\nUsername and hash in password returned.\r\n\r\n\n\n### Expected Behavior\n\nEmpty result retured because the user in a group without permission to Group/User view.\n\n### Observed Behavior\n\nAll Username and hash in Database returned.\n", "before_files": [{"content": "from typing import List\n\nimport strawberry\nimport strawberry_django\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom strawberry import auto\nfrom users import filtersets\nfrom users.models import Group\nfrom utilities.querysets import RestrictedQuerySet\nfrom .filters import *\n\n__all__ = (\n 'GroupType',\n 'UserType',\n)\n\n\n@strawberry_django.type(\n Group,\n fields=['id', 'name'],\n filters=GroupFilter\n)\nclass GroupType:\n pass\n\n\n@strawberry_django.type(\n get_user_model(),\n fields=[\n 'id', 'username', 'password', 'first_name', 'last_name', 'email', 'is_staff',\n 'is_active', 'date_joined', 'groups',\n ],\n filters=UserFilter\n)\nclass UserType:\n groups: List[GroupType]\n", "path": "netbox/users/graphql/types.py"}]}
959
288
gh_patches_debug_7311
rasdani/github-patches
git_diff
freedomofpress__securedrop-4644
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> replace "hidden service" occurrences ## Status ready for review ## Description of Changes Changes Proposed: - no longer refer to [Onion Services](https://2019.www.torproject.org/docs/onion-services.html.en) as hidden services; - there are NO new images I added, it's just text; - all changed content here is either just a comment (playbook, or shell script); - changelog was kept as is. ## Testing I followed the _(slightly outdated)_ [Documentation Guidelines](https://docs.securedrop.org/en/latest/development/documentation_guidelines.html), and all looked fine: ``` # make docs ``` Gave me the following: ``` ... | copying static files... done | copying extra files... done | dumping search index in English (code: en) ... done | dumping object inventory... done | build succeeded. +-------------------------------------------------------------------------------- [I 190725 16:16:16 server:296] Serving on http://127.0.0.1:8000 [I 190725 16:16:16 handlers:62] Start watching changes [I 190725 16:16:16 handlers:64] Start detecting changes ``` `make docs-linkcheck` returned an error, but that's not related to the changes made here. `docs-lint` ran just fine. ## Deployment Any special considerations for deployment? - AFAIK, no. ## Checklist ### If you made changes to the server application code: - [ ] Linting (`make lint`) and tests (`make -C securedrop test`) pass in the development container ### If you made changes to `securedrop-admin`: - [ ] Linting and tests (`make -C admin test`) pass in the admin development container ### If you made changes to the system configuration: - [ ] [Configuration tests](https://docs.securedrop.org/en/latest/development/testing_configuration_tests.html) pass ### If you made non-trivial code changes: - [ ] I have written a test plan and validated it for this PR ### If you made changes to documentation: - [x] Doc linting (`make docs-lint`) passed locally </issue> <code> [start of install_files/ansible-base/roles/backup/files/0.3_collect.py] 1 #!/usr/bin/python2.7 2 """ 3 4 This script should be copied to the App server and ran by the anisble 5 plabook. When run (as root), it collects all of the necessary information 6 to backup the 0.3 system and stores it in /tmp/sd-backup-0.3-TIME_STAMP.zip.gpg 7 8 """ 9 10 import sys 11 import os 12 import io 13 import zipfile 14 from datetime import datetime 15 # Import the application config.py file 16 sys.path.append("/var/www/securedrop") 17 import config # noqa: F403 18 import gnupg # noqa: F403 19 20 TOR_SERVICES = "/var/lib/tor/services" 21 TOR_CONFIG = "/etc/tor/torrc" 22 23 24 def collect_config_file(zf): 25 config_file_path = os.path.join(config.SECUREDROP_ROOT, "config.py") 26 zf.write(config_file_path) 27 28 29 def collect_securedrop_data_root(zf): 30 # The store and key dirs are shared between both interfaces 31 for root, dirs, files in os.walk(config.SECUREDROP_DATA_ROOT): 32 for name in files: 33 zf.write(os.path.join(root, name)) 34 35 36 def collect_custom_header_image(zf): 37 # The custom header image is copied over the deafult `static/i/logo.png`. 38 zf.write(os.path.join(config.SECUREDROP_ROOT, "static/i/logo.png")) 39 40 41 def collect_tor_files(zf): 42 # All of the tor hidden service private keys are stored in the THS specific 43 # subdirectory `/var/lib/tor/services` backing up this directory will back 44 # up all of the THS and ATHS required keys needed to restore all the hidden 45 # services on that system. 46 for root, dirs, files in os.walk(TOR_SERVICES): 47 for name in files: 48 zf.write(os.path.join(root, name)) 49 50 # The tor config file has the ATHS client names required to restore 51 # the ATHS info. These names are also in the the specific client_key file 52 # but backing up this file makes it easier than parsing the files during a 53 # restore. 54 zf.write(TOR_CONFIG) 55 56 57 def encrypt_zip_file(zf_fn): 58 # Encrypt the backup zip file with the application's gpg public key 59 gpg = gnupg.GPG(binary='gpg2', homedir=config.GPG_KEY_DIR) 60 e_fn = '{}.gpg'.format(zf_fn) 61 62 stream = io.open(zf_fn, "rb") 63 gpg.encrypt_file(stream, config.JOURNALIST_KEY, always_trust='True', 64 output=e_fn) 65 66 67 def main(): 68 # name append a timestamp to the sd-backup zip filename 69 dt = str(datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S")) 70 zf_fn = 'sd-backup-{}.zip'.format(dt) 71 with zipfile.ZipFile(zf_fn, 'w') as zf: 72 collect_config_file(zf) 73 collect_securedrop_data_root(zf) 74 collect_custom_header_image(zf) 75 collect_tor_files(zf) 76 encrypt_zip_file(zf_fn) 77 print(zf_fn) 78 79 80 if __name__ == "__main__": 81 main() 82 [end of install_files/ansible-base/roles/backup/files/0.3_collect.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/install_files/ansible-base/roles/backup/files/0.3_collect.py b/install_files/ansible-base/roles/backup/files/0.3_collect.py --- a/install_files/ansible-base/roles/backup/files/0.3_collect.py +++ b/install_files/ansible-base/roles/backup/files/0.3_collect.py @@ -39,7 +39,7 @@ def collect_tor_files(zf): - # All of the tor hidden service private keys are stored in the THS specific + # All of the tor Onion Service private keys are stored in the THS specific # subdirectory `/var/lib/tor/services` backing up this directory will back # up all of the THS and ATHS required keys needed to restore all the hidden # services on that system.
{"golden_diff": "diff --git a/install_files/ansible-base/roles/backup/files/0.3_collect.py b/install_files/ansible-base/roles/backup/files/0.3_collect.py\n--- a/install_files/ansible-base/roles/backup/files/0.3_collect.py\n+++ b/install_files/ansible-base/roles/backup/files/0.3_collect.py\n@@ -39,7 +39,7 @@\n \n \n def collect_tor_files(zf):\n- # All of the tor hidden service private keys are stored in the THS specific\n+ # All of the tor Onion Service private keys are stored in the THS specific\n # subdirectory `/var/lib/tor/services` backing up this directory will back\n # up all of the THS and ATHS required keys needed to restore all the hidden\n # services on that system.\n", "issue": "replace \"hidden service\" occurrences\n## Status\r\n\r\nready for review\r\n\r\n## Description of Changes\r\n\r\nChanges Proposed:\r\n\r\n - no longer refer to [Onion Services](https://2019.www.torproject.org/docs/onion-services.html.en) as hidden services;\r\n - there are NO new images I added, it's just text;\r\n - all changed content here is either just a comment (playbook, or shell script);\r\n - changelog was kept as is.\r\n\r\n## Testing\r\n\r\nI followed the _(slightly outdated)_ [Documentation Guidelines](https://docs.securedrop.org/en/latest/development/documentation_guidelines.html), and all looked fine:\r\n\r\n```\r\n# make docs\r\n```\r\n\r\nGave me the following:\r\n\r\n```\r\n ...\r\n\r\n| copying static files... done\r\n| copying extra files... done\r\n| dumping search index in English (code: en) ... done\r\n| dumping object inventory... done\r\n| build succeeded.\r\n+--------------------------------------------------------------------------------\r\n\r\n[I 190725 16:16:16 server:296] Serving on http://127.0.0.1:8000\r\n[I 190725 16:16:16 handlers:62] Start watching changes\r\n[I 190725 16:16:16 handlers:64] Start detecting changes\r\n```\r\n`make docs-linkcheck` returned an error, but that's not related to the changes made here. `docs-lint` ran just fine.\r\n\r\n## Deployment\r\n\r\nAny special considerations for deployment?\r\n\r\n - AFAIK, no.\r\n\r\n## Checklist\r\n\r\n### If you made changes to the server application code:\r\n\r\n- [ ] Linting (`make lint`) and tests (`make -C securedrop test`) pass in the development container\r\n\r\n### If you made changes to `securedrop-admin`:\r\n\r\n- [ ] Linting and tests (`make -C admin test`) pass in the admin development container\r\n\r\n### If you made changes to the system configuration:\r\n\r\n- [ ] [Configuration tests](https://docs.securedrop.org/en/latest/development/testing_configuration_tests.html) pass\r\n\r\n### If you made non-trivial code changes:\r\n\r\n- [ ] I have written a test plan and validated it for this PR\r\n\r\n### If you made changes to documentation:\r\n\r\n- [x] Doc linting (`make docs-lint`) passed locally\r\n\n", "before_files": [{"content": "#!/usr/bin/python2.7\n\"\"\"\n\nThis script should be copied to the App server and ran by the anisble\nplabook. When run (as root), it collects all of the necessary information\nto backup the 0.3 system and stores it in /tmp/sd-backup-0.3-TIME_STAMP.zip.gpg\n\n\"\"\"\n\nimport sys\nimport os\nimport io\nimport zipfile\nfrom datetime import datetime\n# Import the application config.py file\nsys.path.append(\"/var/www/securedrop\")\nimport config # noqa: F403\nimport gnupg # noqa: F403\n\nTOR_SERVICES = \"/var/lib/tor/services\"\nTOR_CONFIG = \"/etc/tor/torrc\"\n\n\ndef collect_config_file(zf):\n config_file_path = os.path.join(config.SECUREDROP_ROOT, \"config.py\")\n zf.write(config_file_path)\n\n\ndef collect_securedrop_data_root(zf):\n # The store and key dirs are shared between both interfaces\n for root, dirs, files in os.walk(config.SECUREDROP_DATA_ROOT):\n for name in files:\n zf.write(os.path.join(root, name))\n\n\ndef collect_custom_header_image(zf):\n # The custom header image is copied over the deafult `static/i/logo.png`.\n zf.write(os.path.join(config.SECUREDROP_ROOT, \"static/i/logo.png\"))\n\n\ndef collect_tor_files(zf):\n # All of the tor hidden service private keys are stored in the THS specific\n # subdirectory `/var/lib/tor/services` backing up this directory will back\n # up all of the THS and ATHS required keys needed to restore all the hidden\n # services on that system.\n for root, dirs, files in os.walk(TOR_SERVICES):\n for name in files:\n zf.write(os.path.join(root, name))\n\n # The tor config file has the ATHS client names required to restore\n # the ATHS info. These names are also in the the specific client_key file\n # but backing up this file makes it easier than parsing the files during a\n # restore.\n zf.write(TOR_CONFIG)\n\n\ndef encrypt_zip_file(zf_fn):\n # Encrypt the backup zip file with the application's gpg public key\n gpg = gnupg.GPG(binary='gpg2', homedir=config.GPG_KEY_DIR)\n e_fn = '{}.gpg'.format(zf_fn)\n\n stream = io.open(zf_fn, \"rb\")\n gpg.encrypt_file(stream, config.JOURNALIST_KEY, always_trust='True',\n output=e_fn)\n\n\ndef main():\n # name append a timestamp to the sd-backup zip filename\n dt = str(datetime.utcnow().strftime(\"%Y-%m-%d--%H-%M-%S\"))\n zf_fn = 'sd-backup-{}.zip'.format(dt)\n with zipfile.ZipFile(zf_fn, 'w') as zf:\n collect_config_file(zf)\n collect_securedrop_data_root(zf)\n collect_custom_header_image(zf)\n collect_tor_files(zf)\n encrypt_zip_file(zf_fn)\n print(zf_fn)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install_files/ansible-base/roles/backup/files/0.3_collect.py"}]}
1,914
181
gh_patches_debug_7166
rasdani/github-patches
git_diff
pytorch__vision-7665
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> bug when using PIL backend in classification references ### 🐛 Describe the bug When I try to train a model using the train.py script under references/classification with the PIL backend, I encounter an error: ```bash ValueError: backend can be 'tensor' or 'pil', but got pil ``` To reproduce this issue, you can write: ```bash git clone https://github.com/pytorch/vision && cd vision conda create -n vision_env python=3.9 conda activate vision_env pip install torch==1.13.1 torchvision cd references/classification/ python train.py --data-path "path-to-dataset" --test-only --backend pil ``` ### Versions [pip3] mypy-extensions==1.0.0 [pip3] numpy==1.24.3 [pip3] torch==1.13.1 [pip3] torchvision==0.14.1 [conda] numpy 1.24.3 pypi_0 pypi [conda] torch 1.13.1 pypi_0 pypi [conda] torchvision 0.14.1 pypi_0 pypi </issue> <code> [start of references/classification/presets.py] 1 import torch 2 from torchvision.transforms import autoaugment, transforms 3 from torchvision.transforms.functional import InterpolationMode 4 5 6 class ClassificationPresetTrain: 7 def __init__( 8 self, 9 *, 10 crop_size, 11 mean=(0.485, 0.456, 0.406), 12 std=(0.229, 0.224, 0.225), 13 interpolation=InterpolationMode.BILINEAR, 14 hflip_prob=0.5, 15 auto_augment_policy=None, 16 ra_magnitude=9, 17 augmix_severity=3, 18 random_erase_prob=0.0, 19 backend="pil", 20 ): 21 trans = [] 22 backend = backend.lower() 23 if backend == "tensor": 24 trans.append(transforms.PILToTensor()) 25 elif backend != "pil": 26 raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}") 27 28 trans.append(transforms.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True)) 29 if hflip_prob > 0: 30 trans.append(transforms.RandomHorizontalFlip(hflip_prob)) 31 if auto_augment_policy is not None: 32 if auto_augment_policy == "ra": 33 trans.append(autoaugment.RandAugment(interpolation=interpolation, magnitude=ra_magnitude)) 34 elif auto_augment_policy == "ta_wide": 35 trans.append(autoaugment.TrivialAugmentWide(interpolation=interpolation)) 36 elif auto_augment_policy == "augmix": 37 trans.append(autoaugment.AugMix(interpolation=interpolation, severity=augmix_severity)) 38 else: 39 aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy) 40 trans.append(autoaugment.AutoAugment(policy=aa_policy, interpolation=interpolation)) 41 42 if backend == "pil": 43 trans.append(transforms.PILToTensor()) 44 45 trans.extend( 46 [ 47 transforms.ConvertImageDtype(torch.float), 48 transforms.Normalize(mean=mean, std=std), 49 ] 50 ) 51 if random_erase_prob > 0: 52 trans.append(transforms.RandomErasing(p=random_erase_prob)) 53 54 self.transforms = transforms.Compose(trans) 55 56 def __call__(self, img): 57 return self.transforms(img) 58 59 60 class ClassificationPresetEval: 61 def __init__( 62 self, 63 *, 64 crop_size, 65 resize_size=256, 66 mean=(0.485, 0.456, 0.406), 67 std=(0.229, 0.224, 0.225), 68 interpolation=InterpolationMode.BILINEAR, 69 backend="pil", 70 ): 71 trans = [] 72 73 backend = backend.lower() 74 if backend == "tensor": 75 trans.append(transforms.PILToTensor()) 76 else: 77 raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}") 78 79 trans += [ 80 transforms.Resize(resize_size, interpolation=interpolation, antialias=True), 81 transforms.CenterCrop(crop_size), 82 ] 83 84 if backend == "pil": 85 trans.append(transforms.PILToTensor()) 86 87 trans += [ 88 transforms.ConvertImageDtype(torch.float), 89 transforms.Normalize(mean=mean, std=std), 90 ] 91 92 self.transforms = transforms.Compose(trans) 93 94 def __call__(self, img): 95 return self.transforms(img) 96 [end of references/classification/presets.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/references/classification/presets.py b/references/classification/presets.py --- a/references/classification/presets.py +++ b/references/classification/presets.py @@ -69,11 +69,10 @@ backend="pil", ): trans = [] - backend = backend.lower() if backend == "tensor": trans.append(transforms.PILToTensor()) - else: + elif backend != "pil": raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}") trans += [
{"golden_diff": "diff --git a/references/classification/presets.py b/references/classification/presets.py\n--- a/references/classification/presets.py\n+++ b/references/classification/presets.py\n@@ -69,11 +69,10 @@\n backend=\"pil\",\n ):\n trans = []\n-\n backend = backend.lower()\n if backend == \"tensor\":\n trans.append(transforms.PILToTensor())\n- else:\n+ elif backend != \"pil\":\n raise ValueError(f\"backend can be 'tensor' or 'pil', but got {backend}\")\n \n trans += [\n", "issue": "bug when using PIL backend in classification references\n### \ud83d\udc1b Describe the bug\n\nWhen I try to train a model using the train.py script under references/classification with the PIL backend, I encounter an error:\r\n```bash\r\nValueError: backend can be 'tensor' or 'pil', but got pil\r\n```\r\n\r\nTo reproduce this issue, you can write:\r\n```bash\r\ngit clone https://github.com/pytorch/vision && cd vision\r\nconda create -n vision_env python=3.9\r\nconda activate vision_env\r\npip install torch==1.13.1 torchvision\r\ncd references/classification/\r\npython train.py --data-path \"path-to-dataset\" --test-only --backend pil\r\n```\n\n### Versions\n\n[pip3] mypy-extensions==1.0.0\r\n[pip3] numpy==1.24.3\r\n[pip3] torch==1.13.1\r\n[pip3] torchvision==0.14.1\r\n[conda] numpy 1.24.3 pypi_0 pypi\r\n[conda] torch 1.13.1 pypi_0 pypi\r\n[conda] torchvision 0.14.1 pypi_0 pypi\n", "before_files": [{"content": "import torch\nfrom torchvision.transforms import autoaugment, transforms\nfrom torchvision.transforms.functional import InterpolationMode\n\n\nclass ClassificationPresetTrain:\n def __init__(\n self,\n *,\n crop_size,\n mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225),\n interpolation=InterpolationMode.BILINEAR,\n hflip_prob=0.5,\n auto_augment_policy=None,\n ra_magnitude=9,\n augmix_severity=3,\n random_erase_prob=0.0,\n backend=\"pil\",\n ):\n trans = []\n backend = backend.lower()\n if backend == \"tensor\":\n trans.append(transforms.PILToTensor())\n elif backend != \"pil\":\n raise ValueError(f\"backend can be 'tensor' or 'pil', but got {backend}\")\n\n trans.append(transforms.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True))\n if hflip_prob > 0:\n trans.append(transforms.RandomHorizontalFlip(hflip_prob))\n if auto_augment_policy is not None:\n if auto_augment_policy == \"ra\":\n trans.append(autoaugment.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))\n elif auto_augment_policy == \"ta_wide\":\n trans.append(autoaugment.TrivialAugmentWide(interpolation=interpolation))\n elif auto_augment_policy == \"augmix\":\n trans.append(autoaugment.AugMix(interpolation=interpolation, severity=augmix_severity))\n else:\n aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy)\n trans.append(autoaugment.AutoAugment(policy=aa_policy, interpolation=interpolation))\n\n if backend == \"pil\":\n trans.append(transforms.PILToTensor())\n\n trans.extend(\n [\n transforms.ConvertImageDtype(torch.float),\n transforms.Normalize(mean=mean, std=std),\n ]\n )\n if random_erase_prob > 0:\n trans.append(transforms.RandomErasing(p=random_erase_prob))\n\n self.transforms = transforms.Compose(trans)\n\n def __call__(self, img):\n return self.transforms(img)\n\n\nclass ClassificationPresetEval:\n def __init__(\n self,\n *,\n crop_size,\n resize_size=256,\n mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225),\n interpolation=InterpolationMode.BILINEAR,\n backend=\"pil\",\n ):\n trans = []\n\n backend = backend.lower()\n if backend == \"tensor\":\n trans.append(transforms.PILToTensor())\n else:\n raise ValueError(f\"backend can be 'tensor' or 'pil', but got {backend}\")\n\n trans += [\n transforms.Resize(resize_size, interpolation=interpolation, antialias=True),\n transforms.CenterCrop(crop_size),\n ]\n\n if backend == \"pil\":\n trans.append(transforms.PILToTensor())\n\n trans += [\n transforms.ConvertImageDtype(torch.float),\n transforms.Normalize(mean=mean, std=std),\n ]\n\n self.transforms = transforms.Compose(trans)\n\n def __call__(self, img):\n return self.transforms(img)\n", "path": "references/classification/presets.py"}]}
1,725
134
gh_patches_debug_20997
rasdani/github-patches
git_diff
microsoft__presidio-259
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> crypto_recognizer throws an exception When calling the engine analyze API like ``` response = engine.analyze(correlation_id=0, text=text_to_analyze, language='en', entities=[], all_fields=True, score_threshold=0.5) ``` and the value of 'text_to_analyze' is "/boardingPass/v1/devices/34e7b5e1a0aa1d6f3d862b52a289cdb7/registrations/pass.apoc.wallet/" The exception below is thrown ` File "/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/analyzer_engine.py", line 204, in analyze current_results = recognizer.analyze(text, entities, nlp_artifacts) File "/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/pattern_recognizer.py", line 61, in analyze pattern_result = self.__analyze_patterns(text) File "/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/pattern_recognizer.py", line 144, in __analyze_patterns validation_result = self.validate_result(current_match) File "/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/predefined_recognizers/crypto_recognizer.py", line 23, in validate_result bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25) File "/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/predefined_recognizers/crypto_recognizer.py", line 33, in __decode_base58 n = n * 58 + digits58.index(char)` ValueError: substring not found </issue> <code> [start of presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py] 1 from hashlib import sha256 2 from analyzer import Pattern 3 from analyzer import PatternRecognizer 4 5 # Copied from: 6 # http://rosettacode.org/wiki/Bitcoin/address_validation#Python 7 REGEX = r'\b[13][a-km-zA-HJ-NP-Z0-9]{26,33}\b' 8 CONTEXT = ["wallet", "btc", "bitcoin", "crypto"] 9 10 11 class CryptoRecognizer(PatternRecognizer): 12 """ 13 Recognizes common crypto account numbers using regex + checksum 14 """ 15 16 def __init__(self): 17 patterns = [Pattern('Crypto (Medium)', REGEX, 0.5)] 18 super().__init__(supported_entity="CRYPTO", patterns=patterns, 19 context=CONTEXT) 20 21 def validate_result(self, pattern_text): 22 # try: 23 bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25) 24 result = bcbytes[-4:] == sha256(sha256(bcbytes[:-4]) 25 .digest()).digest()[:4] 26 return result 27 28 @staticmethod 29 def __decode_base58(bc, length): 30 digits58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' 31 n = 0 32 for char in bc: 33 n = n * 58 + digits58.index(char) 34 return n.to_bytes(length, 'big') 35 [end of presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py b/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py --- a/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py +++ b/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py @@ -4,7 +4,7 @@ # Copied from: # http://rosettacode.org/wiki/Bitcoin/address_validation#Python -REGEX = r'\b[13][a-km-zA-HJ-NP-Z0-9]{26,33}\b' +REGEX = r'\b[13][a-km-zA-HJ-NP-Z1-9]{26,33}\b' CONTEXT = ["wallet", "btc", "bitcoin", "crypto"] @@ -19,11 +19,12 @@ context=CONTEXT) def validate_result(self, pattern_text): - # try: - bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25) - result = bcbytes[-4:] == sha256(sha256(bcbytes[:-4]) - .digest()).digest()[:4] - return result + try: + bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25) + return bcbytes[-4:] == sha256(sha256(bcbytes[:-4]) + .digest()).digest()[:4] + except ValueError: + return False @staticmethod def __decode_base58(bc, length):
{"golden_diff": "diff --git a/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py b/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py\n--- a/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py\n+++ b/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py\n@@ -4,7 +4,7 @@\n \n # Copied from:\n # http://rosettacode.org/wiki/Bitcoin/address_validation#Python\n-REGEX = r'\\b[13][a-km-zA-HJ-NP-Z0-9]{26,33}\\b'\n+REGEX = r'\\b[13][a-km-zA-HJ-NP-Z1-9]{26,33}\\b'\n CONTEXT = [\"wallet\", \"btc\", \"bitcoin\", \"crypto\"]\n \n \n@@ -19,11 +19,12 @@\n context=CONTEXT)\n \n def validate_result(self, pattern_text):\n- # try:\n- bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25)\n- result = bcbytes[-4:] == sha256(sha256(bcbytes[:-4])\n- .digest()).digest()[:4]\n- return result\n+ try:\n+ bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25)\n+ return bcbytes[-4:] == sha256(sha256(bcbytes[:-4])\n+ .digest()).digest()[:4]\n+ except ValueError:\n+ return False\n \n @staticmethod\n def __decode_base58(bc, length):\n", "issue": "crypto_recognizer throws an exception\n\r\nWhen calling the engine analyze API like\r\n\r\n```\r\n response = engine.analyze(correlation_id=0,\r\n text=text_to_analyze,\r\n language='en',\r\n entities=[],\r\n all_fields=True,\r\n score_threshold=0.5)\r\n```\r\n\r\nand the value of 'text_to_analyze' is \r\n\r\n\"/boardingPass/v1/devices/34e7b5e1a0aa1d6f3d862b52a289cdb7/registrations/pass.apoc.wallet/\"\r\n\r\nThe exception below is thrown\r\n\r\n\r\n` File \"/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/analyzer_engine.py\", line 204, in analyze\r\n current_results = recognizer.analyze(text, entities, nlp_artifacts)\r\n File \"/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/pattern_recognizer.py\", line 61, in analyze\r\n pattern_result = self.__analyze_patterns(text)\r\n File \"/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/pattern_recognizer.py\", line 144, in __analyze_patterns\r\n validation_result = self.validate_result(current_match)\r\n File \"/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/predefined_recognizers/crypto_recognizer.py\", line 23, in validate_result\r\n bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25)\r\n File \"/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/predefined_recognizers/crypto_recognizer.py\", line 33, in __decode_base58\r\n n = n * 58 + digits58.index(char)`\r\n\r\nValueError: substring not found\n", "before_files": [{"content": "from hashlib import sha256\nfrom analyzer import Pattern\nfrom analyzer import PatternRecognizer\n\n# Copied from:\n# http://rosettacode.org/wiki/Bitcoin/address_validation#Python\nREGEX = r'\\b[13][a-km-zA-HJ-NP-Z0-9]{26,33}\\b'\nCONTEXT = [\"wallet\", \"btc\", \"bitcoin\", \"crypto\"]\n\n\nclass CryptoRecognizer(PatternRecognizer):\n \"\"\"\n Recognizes common crypto account numbers using regex + checksum\n \"\"\"\n\n def __init__(self):\n patterns = [Pattern('Crypto (Medium)', REGEX, 0.5)]\n super().__init__(supported_entity=\"CRYPTO\", patterns=patterns,\n context=CONTEXT)\n\n def validate_result(self, pattern_text):\n # try:\n bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25)\n result = bcbytes[-4:] == sha256(sha256(bcbytes[:-4])\n .digest()).digest()[:4]\n return result\n\n @staticmethod\n def __decode_base58(bc, length):\n digits58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n n = 0\n for char in bc:\n n = n * 58 + digits58.index(char)\n return n.to_bytes(length, 'big')\n", "path": "presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py"}]}
1,343
369