problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_32237 | rasdani/github-patches | git_diff | dmlc__dgl-5059 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Sparse] Create a mock implementation in mock_sparse for BSDDMM.
## 🔨Work Item
**IMPORTANT:**
* This template is only for dev team to track project progress. For feature request or bug report, please use the corresponding issue templates.
* DO NOT create a new work item if the purpose is to fix an existing issue or feature request. We will directly use the issue in the project tracker.
Project tracker: https://github.com/orgs/dmlc/projects/2
## Description
<!-- short description of the work item -->
## Depending work items or issues
<!-- what must be done before this -->
</issue>
<code>
[start of python/dgl/mock_sparse/sddmm.py]
1 """Sampled Dense-Dense Matrix Multiplication (SDDMM) operator module."""
2 import torch
3
4 from .sp_matrix import SparseMatrix
5
6 __all__ = ["sddmm"]
7
8
9 def sddmm(
10 A: SparseMatrix, mat1: torch.Tensor, mat2: torch.Tensor
11 ) -> SparseMatrix:
12 r"""Sampled-Dense-Dense Matrix Multiplication (SDDMM).
13
14 ``sddmm`` multiplies two dense matrices :attr:``mat1`` and :attr:``mat2``
15 at the nonzero locations of sparse matrix :attr:``A``. Values of :attr:``A``
16 is added to the resulting matrix.
17
18 Mathematically ``sddmm`` is formulated as:
19
20 .. math::
21 out = (mat1 @ mat2) * spy(A) + A
22
23 Parameters
24 ----------
25 A : SparseMatrix
26 Sparse matrix of shape `(M, N)`.
27 mat1 : Tensor
28 Dense matrix of shape `(M, K)`
29 mat2 : Tensor
30 Dense matrix of shape `(K, N)`
31
32 Returns
33 -------
34 SparseMatrix
35 Sparse matrix of shape `(M, N)`.
36
37 Examples
38 --------
39
40 >>> row = torch.Tensor([1, 1, 2])
41 >>> col = torch.Tensor([2, 3, 3])
42 >>> val = torch.arange(1, 4).float()
43 >>> A = SparseMatrix(row, col, val, (3, 4))
44 >>> mat1 = torch.randn(3, 5)
45 >>> mat2 = torch.randn(5, 4)
46 >>> dgl.mock_sparse.sddmm(A, mat1, mat2)
47 SparseMatrix(indices=tensor([[1, 1, 2],
48 [2, 3, 3]]),
49 values=tensor([1.8035, 2.3375, 3.1255]),
50 shape=(3, 4), nnz=3)
51 """
52 assert A.val.dim() == 1, (
53 f"Nonzero elements have values of shape ({A.val.shape[1]}). Expects "
54 "scalar values. "
55 )
56 # PyTorch's sddmm operator only supports CSR format.
57 res = torch.sparse.sampled_addmm(A.adj.to_sparse_csr(), mat1, mat2)
58 return SparseMatrix(A.row, A.col, res.values(), A.adj.shape)
59
[end of python/dgl/mock_sparse/sddmm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/dgl/mock_sparse/sddmm.py b/python/dgl/mock_sparse/sddmm.py
--- a/python/dgl/mock_sparse/sddmm.py
+++ b/python/dgl/mock_sparse/sddmm.py
@@ -1,9 +1,9 @@
"""Sampled Dense-Dense Matrix Multiplication (SDDMM) operator module."""
import torch
-from .sp_matrix import SparseMatrix
+from .sp_matrix import create_from_coo, SparseMatrix
-__all__ = ["sddmm"]
+__all__ = ["sddmm", "mock_bsddmm"]
def sddmm(
@@ -56,3 +56,56 @@
# PyTorch's sddmm operator only supports CSR format.
res = torch.sparse.sampled_addmm(A.adj.to_sparse_csr(), mat1, mat2)
return SparseMatrix(A.row, A.col, res.values(), A.adj.shape)
+
+
+def mock_bsddmm(
+ A: SparseMatrix, mat1: torch.Tensor, mat2: torch.Tensor
+) -> SparseMatrix:
+ r"""Batched Sampled-Dense-Dense Matrix Multiplication (SDDMM).
+
+ ``bsddmm`` conducts `sddmm` for each batch of the two dense matrices
+ independently.
+
+ In particular, :attr:``mat1`` and :attr:``mat2`` can be 2-D, which will be
+ reshape as `(B, M, 1)` and `(B, 1, K)` in the computation.
+
+ Parameters
+ ----------
+ A : SparseMatrix
+ Sparse matrix of shape `(M, N)`.
+ mat1 : Tensor
+ Dense matrix of shape `(B, M, K)` or `(B, M,)`
+ mat2 : Tensor
+ Dense matrix of shape `(B, K, N)` or `(B, K,)`
+
+ Returns
+ -------
+ SparseMatrix
+ Sparse matrix of shape `(M, N)` with non-zero values of `B` dimension.
+
+ Examples
+ --------
+
+ >>> row = torch.tensor([1, 1, 2])
+ >>> col = torch.tensor([2, 3, 3])
+ >>> val = torch.arange(1, 4).float()
+ >>> A = create_from_coo(row, col, val, (3, 4))
+ >>> mat1 = torch.randn(2, 3, 5)
+ >>> mat2 = torch.randn(2, 5, 4)
+ >>> dgl.mock_sparse.mock_bsddmm(A, mat1, mat2)
+ SparseMatrix(indices=tensor([[1, 1, 2],
+ [2, 3, 3]]),
+ values=tensor([[-0.6765, -0.4017],
+ [ 3.3290, 6.9016],
+ [ 4.8184, 5.8882]]),
+ shape=(3, 4), nnz=3)
+ """
+ batch_mat1 = [mat1[i, ...] for i in range(mat1.shape[0])]
+ batch_mat2 = [mat2[i, ...] for i in range(mat2.shape[0])]
+ batch_ret = [sddmm(A, lhs, rhs) for lhs, rhs in zip(batch_mat1, batch_mat2)]
+ return create_from_coo(
+ row=A.row,
+ col=A.col,
+ val=torch.stack([sp_mat.val for sp_mat in batch_ret], dim=-1),
+ shape=A.shape,
+ )
| {"golden_diff": "diff --git a/python/dgl/mock_sparse/sddmm.py b/python/dgl/mock_sparse/sddmm.py\n--- a/python/dgl/mock_sparse/sddmm.py\n+++ b/python/dgl/mock_sparse/sddmm.py\n@@ -1,9 +1,9 @@\n \"\"\"Sampled Dense-Dense Matrix Multiplication (SDDMM) operator module.\"\"\"\n import torch\n \n-from .sp_matrix import SparseMatrix\n+from .sp_matrix import create_from_coo, SparseMatrix\n \n-__all__ = [\"sddmm\"]\n+__all__ = [\"sddmm\", \"mock_bsddmm\"]\n \n \n def sddmm(\n@@ -56,3 +56,56 @@\n # PyTorch's sddmm operator only supports CSR format.\n res = torch.sparse.sampled_addmm(A.adj.to_sparse_csr(), mat1, mat2)\n return SparseMatrix(A.row, A.col, res.values(), A.adj.shape)\n+\n+\n+def mock_bsddmm(\n+ A: SparseMatrix, mat1: torch.Tensor, mat2: torch.Tensor\n+) -> SparseMatrix:\n+ r\"\"\"Batched Sampled-Dense-Dense Matrix Multiplication (SDDMM).\n+\n+ ``bsddmm`` conducts `sddmm` for each batch of the two dense matrices\n+ independently.\n+\n+ In particular, :attr:``mat1`` and :attr:``mat2`` can be 2-D, which will be\n+ reshape as `(B, M, 1)` and `(B, 1, K)` in the computation.\n+\n+ Parameters\n+ ----------\n+ A : SparseMatrix\n+ Sparse matrix of shape `(M, N)`.\n+ mat1 : Tensor\n+ Dense matrix of shape `(B, M, K)` or `(B, M,)`\n+ mat2 : Tensor\n+ Dense matrix of shape `(B, K, N)` or `(B, K,)`\n+\n+ Returns\n+ -------\n+ SparseMatrix\n+ Sparse matrix of shape `(M, N)` with non-zero values of `B` dimension.\n+\n+ Examples\n+ --------\n+\n+ >>> row = torch.tensor([1, 1, 2])\n+ >>> col = torch.tensor([2, 3, 3])\n+ >>> val = torch.arange(1, 4).float()\n+ >>> A = create_from_coo(row, col, val, (3, 4))\n+ >>> mat1 = torch.randn(2, 3, 5)\n+ >>> mat2 = torch.randn(2, 5, 4)\n+ >>> dgl.mock_sparse.mock_bsddmm(A, mat1, mat2)\n+ SparseMatrix(indices=tensor([[1, 1, 2],\n+ [2, 3, 3]]),\n+ values=tensor([[-0.6765, -0.4017],\n+ [ 3.3290, 6.9016],\n+ [ 4.8184, 5.8882]]),\n+ shape=(3, 4), nnz=3)\n+ \"\"\"\n+ batch_mat1 = [mat1[i, ...] for i in range(mat1.shape[0])]\n+ batch_mat2 = [mat2[i, ...] for i in range(mat2.shape[0])]\n+ batch_ret = [sddmm(A, lhs, rhs) for lhs, rhs in zip(batch_mat1, batch_mat2)]\n+ return create_from_coo(\n+ row=A.row,\n+ col=A.col,\n+ val=torch.stack([sp_mat.val for sp_mat in batch_ret], dim=-1),\n+ shape=A.shape,\n+ )\n", "issue": "[Sparse] Create a mock implementation in mock_sparse for BSDDMM.\n## \ud83d\udd28Work Item\r\n\r\n**IMPORTANT:**\r\n* This template is only for dev team to track project progress. For feature request or bug report, please use the corresponding issue templates.\r\n* DO NOT create a new work item if the purpose is to fix an existing issue or feature request. We will directly use the issue in the project tracker.\r\n\r\nProject tracker: https://github.com/orgs/dmlc/projects/2\r\n\r\n## Description\r\n\r\n<!-- short description of the work item -->\r\n\r\n## Depending work items or issues\r\n\r\n<!-- what must be done before this -->\r\n\n", "before_files": [{"content": "\"\"\"Sampled Dense-Dense Matrix Multiplication (SDDMM) operator module.\"\"\"\nimport torch\n\nfrom .sp_matrix import SparseMatrix\n\n__all__ = [\"sddmm\"]\n\n\ndef sddmm(\n A: SparseMatrix, mat1: torch.Tensor, mat2: torch.Tensor\n) -> SparseMatrix:\n r\"\"\"Sampled-Dense-Dense Matrix Multiplication (SDDMM).\n\n ``sddmm`` multiplies two dense matrices :attr:``mat1`` and :attr:``mat2``\n at the nonzero locations of sparse matrix :attr:``A``. Values of :attr:``A``\n is added to the resulting matrix.\n\n Mathematically ``sddmm`` is formulated as:\n\n .. math::\n out = (mat1 @ mat2) * spy(A) + A\n\n Parameters\n ----------\n A : SparseMatrix\n Sparse matrix of shape `(M, N)`.\n mat1 : Tensor\n Dense matrix of shape `(M, K)`\n mat2 : Tensor\n Dense matrix of shape `(K, N)`\n\n Returns\n -------\n SparseMatrix\n Sparse matrix of shape `(M, N)`.\n\n Examples\n --------\n\n >>> row = torch.Tensor([1, 1, 2])\n >>> col = torch.Tensor([2, 3, 3])\n >>> val = torch.arange(1, 4).float()\n >>> A = SparseMatrix(row, col, val, (3, 4))\n >>> mat1 = torch.randn(3, 5)\n >>> mat2 = torch.randn(5, 4)\n >>> dgl.mock_sparse.sddmm(A, mat1, mat2)\n SparseMatrix(indices=tensor([[1, 1, 2],\n [2, 3, 3]]),\n values=tensor([1.8035, 2.3375, 3.1255]),\n shape=(3, 4), nnz=3)\n \"\"\"\n assert A.val.dim() == 1, (\n f\"Nonzero elements have values of shape ({A.val.shape[1]}). Expects \"\n \"scalar values. \"\n )\n # PyTorch's sddmm operator only supports CSR format.\n res = torch.sparse.sampled_addmm(A.adj.to_sparse_csr(), mat1, mat2)\n return SparseMatrix(A.row, A.col, res.values(), A.adj.shape)\n", "path": "python/dgl/mock_sparse/sddmm.py"}]} | 1,312 | 813 |
gh_patches_debug_8938 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1448 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unhandled yaml.scanner.ScannerError when trying autoupdate with a malformed pre-commit config
In migrate_config.py we catch `yaml.YAMLError` on [lines 31-36](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/commands/migrate_config.py#L31-L36) (of which `yaml.scanner.ScannerError` is a subclass), but when the exception is raised on line 28, it is unhandled.
```console
$ pre-commit autoupdate
An unexpected error has occurred: ScannerError: mapping values are not allowed in this context
in "<unicode string>", line 2, column 6
Check the log at /home/ryan/.cache/pre-commit/pre-commit.log
```
### version information
```
pre-commit version: 2.3.0
sys.version:
3.8.2 (default, Apr 8 2020, 14:31:25)
[GCC 9.3.0]
sys.executable: /home/ryan/.local/pipx/venvs/pre-commit/bin/python
os.name: posix
sys.platform: linux
```
### error information
```
An unexpected error has occurred: ScannerError: mapping values are not allowed in this context
in "<unicode string>", line 2, column 6
```
```
Traceback (most recent call last):
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/error_handler.py", line 56, in error_handler
yield
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/main.py", line 354, in main
return autoupdate(
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/autoupdate.py", line 141, in autoupdate
migrate_config(config_file, quiet=True)
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/migrate_config.py", line 49, in migrate_config
contents = _migrate_map(contents)
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/migrate_config.py", line 28, in _migrate_map
if isinstance(yaml_load(contents), list):
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/yaml/__init__.py", line 114, in load
return loader.get_single_data()
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/yaml/constructor.py", line 49, in get_single_data
node = self.get_single_node()
File "ext/_yaml.pyx", line 707, in _yaml.CParser.get_single_node
File "ext/_yaml.pyx", line 726, in _yaml.CParser._compose_document
File "ext/_yaml.pyx", line 905, in _yaml.CParser._parse_next_event
yaml.scanner.ScannerError: mapping values are not allowed in this context
in "<unicode string>", line 2, column 6
```
</issue>
<code>
[start of pre_commit/commands/migrate_config.py]
1 import re
2
3 import yaml
4
5 from pre_commit.util import yaml_load
6
7
8 def _indent(s: str) -> str:
9 lines = s.splitlines(True)
10 return ''.join(' ' * 4 + line if line.strip() else line for line in lines)
11
12
13 def _is_header_line(line: str) -> bool:
14 return line.startswith(('#', '---')) or not line.strip()
15
16
17 def _migrate_map(contents: str) -> str:
18 # Find the first non-header line
19 lines = contents.splitlines(True)
20 i = 0
21 # Only loop on non empty configuration file
22 while i < len(lines) and _is_header_line(lines[i]):
23 i += 1
24
25 header = ''.join(lines[:i])
26 rest = ''.join(lines[i:])
27
28 if isinstance(yaml_load(contents), list):
29 # If they are using the "default" flow style of yaml, this operation
30 # will yield a valid configuration
31 try:
32 trial_contents = f'{header}repos:\n{rest}'
33 yaml_load(trial_contents)
34 contents = trial_contents
35 except yaml.YAMLError:
36 contents = f'{header}repos:\n{_indent(rest)}'
37
38 return contents
39
40
41 def _migrate_sha_to_rev(contents: str) -> str:
42 return re.sub(r'(\n\s+)sha:', r'\1rev:', contents)
43
44
45 def migrate_config(config_file: str, quiet: bool = False) -> int:
46 with open(config_file) as f:
47 orig_contents = contents = f.read()
48
49 contents = _migrate_map(contents)
50 contents = _migrate_sha_to_rev(contents)
51
52 if contents != orig_contents:
53 with open(config_file, 'w') as f:
54 f.write(contents)
55
56 print('Configuration has been migrated.')
57 elif not quiet:
58 print('Configuration is already migrated.')
59 return 0
60
[end of pre_commit/commands/migrate_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py
--- a/pre_commit/commands/migrate_config.py
+++ b/pre_commit/commands/migrate_config.py
@@ -2,6 +2,7 @@
import yaml
+from pre_commit.clientlib import load_config
from pre_commit.util import yaml_load
@@ -43,6 +44,9 @@
def migrate_config(config_file: str, quiet: bool = False) -> int:
+ # ensure that the configuration is a valid pre-commit configuration
+ load_config(config_file)
+
with open(config_file) as f:
orig_contents = contents = f.read()
| {"golden_diff": "diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py\n--- a/pre_commit/commands/migrate_config.py\n+++ b/pre_commit/commands/migrate_config.py\n@@ -2,6 +2,7 @@\n \n import yaml\n \n+from pre_commit.clientlib import load_config\n from pre_commit.util import yaml_load\n \n \n@@ -43,6 +44,9 @@\n \n \n def migrate_config(config_file: str, quiet: bool = False) -> int:\n+ # ensure that the configuration is a valid pre-commit configuration\n+ load_config(config_file)\n+\n with open(config_file) as f:\n orig_contents = contents = f.read()\n", "issue": "Unhandled yaml.scanner.ScannerError when trying autoupdate with a malformed pre-commit config\nIn migrate_config.py we catch `yaml.YAMLError` on [lines 31-36](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/commands/migrate_config.py#L31-L36) (of which `yaml.scanner.ScannerError` is a subclass), but when the exception is raised on line 28, it is unhandled.\r\n\r\n```console\r\n$ pre-commit autoupdate\r\nAn unexpected error has occurred: ScannerError: mapping values are not allowed in this context\r\n in \"<unicode string>\", line 2, column 6\r\nCheck the log at /home/ryan/.cache/pre-commit/pre-commit.log\r\n```\r\n\r\n### version information\r\n\r\n```\r\npre-commit version: 2.3.0\r\nsys.version:\r\n 3.8.2 (default, Apr 8 2020, 14:31:25) \r\n [GCC 9.3.0]\r\nsys.executable: /home/ryan/.local/pipx/venvs/pre-commit/bin/python\r\nos.name: posix\r\nsys.platform: linux\r\n```\r\n\r\n### error information\r\n\r\n```\r\nAn unexpected error has occurred: ScannerError: mapping values are not allowed in this context\r\n in \"<unicode string>\", line 2, column 6\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/error_handler.py\", line 56, in error_handler\r\n yield\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/main.py\", line 354, in main\r\n return autoupdate(\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/autoupdate.py\", line 141, in autoupdate\r\n migrate_config(config_file, quiet=True)\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/migrate_config.py\", line 49, in migrate_config\r\n contents = _migrate_map(contents)\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/migrate_config.py\", line 28, in _migrate_map\r\n if isinstance(yaml_load(contents), list):\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/yaml/__init__.py\", line 114, in load\r\n return loader.get_single_data()\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/yaml/constructor.py\", line 49, in get_single_data\r\n node = self.get_single_node()\r\n File \"ext/_yaml.pyx\", line 707, in _yaml.CParser.get_single_node\r\n File \"ext/_yaml.pyx\", line 726, in _yaml.CParser._compose_document\r\n File \"ext/_yaml.pyx\", line 905, in _yaml.CParser._parse_next_event\r\nyaml.scanner.ScannerError: mapping values are not allowed in this context\r\n in \"<unicode string>\", line 2, column 6\r\n```\r\n\n", "before_files": [{"content": "import re\n\nimport yaml\n\nfrom pre_commit.util import yaml_load\n\n\ndef _indent(s: str) -> str:\n lines = s.splitlines(True)\n return ''.join(' ' * 4 + line if line.strip() else line for line in lines)\n\n\ndef _is_header_line(line: str) -> bool:\n return line.startswith(('#', '---')) or not line.strip()\n\n\ndef _migrate_map(contents: str) -> str:\n # Find the first non-header line\n lines = contents.splitlines(True)\n i = 0\n # Only loop on non empty configuration file\n while i < len(lines) and _is_header_line(lines[i]):\n i += 1\n\n header = ''.join(lines[:i])\n rest = ''.join(lines[i:])\n\n if isinstance(yaml_load(contents), list):\n # If they are using the \"default\" flow style of yaml, this operation\n # will yield a valid configuration\n try:\n trial_contents = f'{header}repos:\\n{rest}'\n yaml_load(trial_contents)\n contents = trial_contents\n except yaml.YAMLError:\n contents = f'{header}repos:\\n{_indent(rest)}'\n\n return contents\n\n\ndef _migrate_sha_to_rev(contents: str) -> str:\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n\n\ndef migrate_config(config_file: str, quiet: bool = False) -> int:\n with open(config_file) as f:\n orig_contents = contents = f.read()\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n\n if contents != orig_contents:\n with open(config_file, 'w') as f:\n f.write(contents)\n\n print('Configuration has been migrated.')\n elif not quiet:\n print('Configuration is already migrated.')\n return 0\n", "path": "pre_commit/commands/migrate_config.py"}]} | 1,804 | 148 |
gh_patches_debug_60346 | rasdani/github-patches | git_diff | graspologic-org__graspologic-366 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
returning test statistic in LDT
some practitioners (read: Vince, cep) only care about the test statistic and not the p-value. obviously one can still extract it if they perform the full test. however, that wastes time and resources. one can set the number of iterations to 1 to minimize that, but we can still do less. i propose to allow the number of permutations to be set to 0 (hyppo allows that, so really it is just a change in argument check). i am happy to do this, but:
this brings up the following questions: what should be happening to the fit_predict in that case? should it return the test statistic instead? or the p-value of 1? or NaN? should we be raising warnings?
and on a larger scale: should we really have this API? should fit predict return p-value, or a tuple of a p-value and a test statistic, like many other tests in python? furthremore, should it really be a class? once again, most tests in python that i have seen (scipy, statsmodels) are functions, not classes.
</issue>
<code>
[start of setup.py]
1 import os
2 import sys
3 from setuptools import setup, find_packages
4 from sys import platform
5
6 PACKAGE_NAME = "graspy"
7 DESCRIPTION = "A set of python modules for graph statistics"
8 with open("README.md", "r") as f:
9 LONG_DESCRIPTION = f.read()
10 AUTHOR = ("Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand",)
11 AUTHOR_EMAIL = "[email protected]"
12 URL = "https://github.com/neurodata/graspy"
13 MINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.5
14 REQUIRED_PACKAGES = [
15 "networkx>=2.1",
16 "numpy>=1.8.1",
17 "scikit-learn>=0.19.1",
18 "scipy>=1.1.0",
19 "seaborn>=0.9.0",
20 "matplotlib>=3.0.0",
21 "hyppo>=0.1.2",
22 ]
23
24
25 # Find GraSPy version.
26 PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
27 for line in open(os.path.join(PROJECT_PATH, "graspy", "__init__.py")):
28 if line.startswith("__version__ = "):
29 VERSION = line.strip().split()[2][1:-1]
30
31
32 def check_python_version():
33 """Exit when the Python version is too low."""
34 if sys.version_info < MINIMUM_PYTHON_VERSION:
35 sys.exit("Python {}.{}+ is required.".format(*MINIMUM_PYTHON_VERSION))
36
37
38 check_python_version()
39
40 setup(
41 name=PACKAGE_NAME,
42 version=VERSION,
43 description=DESCRIPTION,
44 long_description=LONG_DESCRIPTION,
45 long_description_content_type="text/markdown",
46 author=AUTHOR,
47 author_email=AUTHOR_EMAIL,
48 install_requires=REQUIRED_PACKAGES,
49 url=URL,
50 license="Apache License 2.0",
51 classifiers=[
52 "Development Status :: 3 - Alpha",
53 "Intended Audience :: Science/Research",
54 "Topic :: Scientific/Engineering :: Mathematics",
55 "License :: OSI Approved :: Apache Software License",
56 "Programming Language :: Python :: 3",
57 "Programming Language :: Python :: 3.6",
58 "Programming Language :: Python :: 3.7",
59 ],
60 packages=find_packages(),
61 include_package_data=True,
62 )
63
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
"scipy>=1.1.0",
"seaborn>=0.9.0",
"matplotlib>=3.0.0",
- "hyppo>=0.1.2",
+ "hyppo>=0.1.3",
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n \"scipy>=1.1.0\",\n \"seaborn>=0.9.0\",\n \"matplotlib>=3.0.0\",\n- \"hyppo>=0.1.2\",\n+ \"hyppo>=0.1.3\",\n ]\n", "issue": "returning test statistic in LDT\nsome practitioners (read: Vince, cep) only care about the test statistic and not the p-value. obviously one can still extract it if they perform the full test. however, that wastes time and resources. one can set the number of iterations to 1 to minimize that, but we can still do less. i propose to allow the number of permutations to be set to 0 (hyppo allows that, so really it is just a change in argument check). i am happy to do this, but:\r\n\r\nthis brings up the following questions: what should be happening to the fit_predict in that case? should it return the test statistic instead? or the p-value of 1? or NaN? should we be raising warnings?\r\n\r\nand on a larger scale: should we really have this API? should fit predict return p-value, or a tuple of a p-value and a test statistic, like many other tests in python? furthremore, should it really be a class? once again, most tests in python that i have seen (scipy, statsmodels) are functions, not classes.\n", "before_files": [{"content": "import os\nimport sys\nfrom setuptools import setup, find_packages\nfrom sys import platform\n\nPACKAGE_NAME = \"graspy\"\nDESCRIPTION = \"A set of python modules for graph statistics\"\nwith open(\"README.md\", \"r\") as f:\n LONG_DESCRIPTION = f.read()\nAUTHOR = (\"Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand\",)\nAUTHOR_EMAIL = \"[email protected]\"\nURL = \"https://github.com/neurodata/graspy\"\nMINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.5\nREQUIRED_PACKAGES = [\n \"networkx>=2.1\",\n \"numpy>=1.8.1\",\n \"scikit-learn>=0.19.1\",\n \"scipy>=1.1.0\",\n \"seaborn>=0.9.0\",\n \"matplotlib>=3.0.0\",\n \"hyppo>=0.1.2\",\n]\n\n\n# Find GraSPy version.\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nfor line in open(os.path.join(PROJECT_PATH, \"graspy\", \"__init__.py\")):\n if line.startswith(\"__version__ = \"):\n VERSION = line.strip().split()[2][1:-1]\n\n\ndef check_python_version():\n \"\"\"Exit when the Python version is too low.\"\"\"\n if sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))\n\n\ncheck_python_version()\n\nsetup(\n name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n install_requires=REQUIRED_PACKAGES,\n url=URL,\n license=\"Apache License 2.0\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n)\n", "path": "setup.py"}]} | 1,377 | 91 |
gh_patches_debug_17961 | rasdani/github-patches | git_diff | pytorch__tnt-101 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AverageValueMeter returns incorrect results when `tensor` is passed
Based on [this thread](https://discuss.pytorch.org/t/confusing-result-about-meter-averagevaluemeter/21819) it seems as if the internal members of the class hold references to the `tensors`, thus yielding wrong results.
When the `tensor` value is passed by `.item()` the result is correct.
A simple fix would be to add this condition to `add`:
```python
def add(self, value, n=1):
if isinstance(value, torch.Tensor):
value = value.item()
self.val = value
```
I can submit a PR, if that makes sense to you.
</issue>
<code>
[start of torchnet/meter/averagevaluemeter.py]
1 import math
2 from . import meter
3 import numpy as np
4
5
6 class AverageValueMeter(meter.Meter):
7 def __init__(self):
8 super(AverageValueMeter, self).__init__()
9 self.reset()
10 self.val = 0
11
12 def add(self, value, n=1):
13 self.val = value
14 self.sum += value
15 self.var += value * value
16 self.n += n
17
18 if self.n == 0:
19 self.mean, self.std = np.nan, np.nan
20 elif self.n == 1:
21 self.mean, self.std = self.sum, np.inf
22 self.mean_old = self.mean
23 self.m_s = 0.0
24 else:
25 self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)
26 self.m_s += (value - self.mean_old) * (value - self.mean)
27 self.mean_old = self.mean
28 self.std = math.sqrt(self.m_s / (self.n - 1.0))
29
30 def value(self):
31 return self.mean, self.std
32
33 def reset(self):
34 self.n = 0
35 self.sum = 0.0
36 self.var = 0.0
37 self.val = 0.0
38 self.mean = np.nan
39 self.mean_old = 0.0
40 self.m_s = 0.0
41 self.std = np.nan
42
[end of torchnet/meter/averagevaluemeter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchnet/meter/averagevaluemeter.py b/torchnet/meter/averagevaluemeter.py
--- a/torchnet/meter/averagevaluemeter.py
+++ b/torchnet/meter/averagevaluemeter.py
@@ -18,14 +18,15 @@
if self.n == 0:
self.mean, self.std = np.nan, np.nan
elif self.n == 1:
- self.mean, self.std = self.sum, np.inf
+ self.mean = 0.0 + self.sum # This is to force a copy in torch/numpy
+ self.std = np.inf
self.mean_old = self.mean
self.m_s = 0.0
else:
self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)
self.m_s += (value - self.mean_old) * (value - self.mean)
self.mean_old = self.mean
- self.std = math.sqrt(self.m_s / (self.n - 1.0))
+ self.std = np.sqrt(self.m_s / (self.n - 1.0))
def value(self):
return self.mean, self.std
| {"golden_diff": "diff --git a/torchnet/meter/averagevaluemeter.py b/torchnet/meter/averagevaluemeter.py\n--- a/torchnet/meter/averagevaluemeter.py\n+++ b/torchnet/meter/averagevaluemeter.py\n@@ -18,14 +18,15 @@\n if self.n == 0:\n self.mean, self.std = np.nan, np.nan\n elif self.n == 1:\n- self.mean, self.std = self.sum, np.inf\n+ self.mean = 0.0 + self.sum # This is to force a copy in torch/numpy\n+ self.std = np.inf\n self.mean_old = self.mean\n self.m_s = 0.0\n else:\n self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)\n self.m_s += (value - self.mean_old) * (value - self.mean)\n self.mean_old = self.mean\n- self.std = math.sqrt(self.m_s / (self.n - 1.0))\n+ self.std = np.sqrt(self.m_s / (self.n - 1.0))\n \n def value(self):\n return self.mean, self.std\n", "issue": "AverageValueMeter returns incorrect results when `tensor` is passed\nBased on [this thread](https://discuss.pytorch.org/t/confusing-result-about-meter-averagevaluemeter/21819) it seems as if the internal members of the class hold references to the `tensors`, thus yielding wrong results.\r\nWhen the `tensor` value is passed by `.item()` the result is correct.\r\nA simple fix would be to add this condition to `add`:\r\n```python\r\ndef add(self, value, n=1):\r\n if isinstance(value, torch.Tensor):\r\n value = value.item()\r\n self.val = value\r\n```\r\n\r\nI can submit a PR, if that makes sense to you.\n", "before_files": [{"content": "import math\nfrom . import meter\nimport numpy as np\n\n\nclass AverageValueMeter(meter.Meter):\n def __init__(self):\n super(AverageValueMeter, self).__init__()\n self.reset()\n self.val = 0\n\n def add(self, value, n=1):\n self.val = value\n self.sum += value\n self.var += value * value\n self.n += n\n\n if self.n == 0:\n self.mean, self.std = np.nan, np.nan\n elif self.n == 1:\n self.mean, self.std = self.sum, np.inf\n self.mean_old = self.mean\n self.m_s = 0.0\n else:\n self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)\n self.m_s += (value - self.mean_old) * (value - self.mean)\n self.mean_old = self.mean\n self.std = math.sqrt(self.m_s / (self.n - 1.0))\n\n def value(self):\n return self.mean, self.std\n\n def reset(self):\n self.n = 0\n self.sum = 0.0\n self.var = 0.0\n self.val = 0.0\n self.mean = np.nan\n self.mean_old = 0.0\n self.m_s = 0.0\n self.std = np.nan\n", "path": "torchnet/meter/averagevaluemeter.py"}]} | 1,079 | 275 |
gh_patches_debug_24187 | rasdani/github-patches | git_diff | vega__altair-1539 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support built-in vega themes
See [vega-themes](https://github.com/vega/vega-themes). Themes should be supported via the current theme infrastructure, maybe something like this:
```python
alt.themes.enable('vega.themes.dark')
```
We'll have to think about how to best populate the list of available themes, and how to make this work cleanly with user-specified themes from within Altair.
</issue>
<code>
[start of altair/vegalite/v3/theme.py]
1 """Tools for enabling and registering chart themes"""
2
3 from ...utils.theme import ThemeRegistry
4
5 # The entry point group that can be used by other packages to declare other
6 # renderers that will be auto-detected. Explicit registration is also
7 # allowed by the PluginRegistery API.
8 ENTRY_POINT_GROUP = 'altair.vegalite.v3.theme' # type: str
9 themes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP)
10
11 themes.register('default', lambda: {"config": {"view": {"width": 400, "height": 300},
12 "mark": {"tooltip": None}}})
13 themes.register('opaque', lambda: {"config": {"background": "white",
14 "view": {"width": 400, "height": 300},
15 "mark": {"tooltip": None}}})
16 themes.register('none', lambda: {})
17 themes.enable('default')
18
[end of altair/vegalite/v3/theme.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/altair/vegalite/v3/theme.py b/altair/vegalite/v3/theme.py
--- a/altair/vegalite/v3/theme.py
+++ b/altair/vegalite/v3/theme.py
@@ -2,6 +2,23 @@
from ...utils.theme import ThemeRegistry
+VEGA_THEMES = ['ggplot2', 'quartz', 'vox', 'fivethirtyeight', 'dark', 'latimes']
+
+
+class VegaTheme(object):
+ """Implementation of a builtin vega theme."""
+ def __init__(self, theme):
+ self.theme = theme
+
+ def __call__(self):
+ return {"usermeta": {"embedOptions": {"theme": self.theme}},
+ "config": {"view": {"width": 400, "height": 300},
+ "mark": {"tooltip": None}}}
+
+ def __repr__(self):
+ return "VegaTheme({!r})".format(self.theme)
+
+
# The entry point group that can be used by other packages to declare other
# renderers that will be auto-detected. Explicit registration is also
# allowed by the PluginRegistery API.
@@ -14,4 +31,8 @@
"view": {"width": 400, "height": 300},
"mark": {"tooltip": None}}})
themes.register('none', lambda: {})
+
+for theme in VEGA_THEMES:
+ themes.register(theme, VegaTheme(theme))
+
themes.enable('default')
| {"golden_diff": "diff --git a/altair/vegalite/v3/theme.py b/altair/vegalite/v3/theme.py\n--- a/altair/vegalite/v3/theme.py\n+++ b/altair/vegalite/v3/theme.py\n@@ -2,6 +2,23 @@\n \n from ...utils.theme import ThemeRegistry\n \n+VEGA_THEMES = ['ggplot2', 'quartz', 'vox', 'fivethirtyeight', 'dark', 'latimes']\n+\n+\n+class VegaTheme(object):\n+ \"\"\"Implementation of a builtin vega theme.\"\"\"\n+ def __init__(self, theme):\n+ self.theme = theme\n+ \n+ def __call__(self):\n+ return {\"usermeta\": {\"embedOptions\": {\"theme\": self.theme}},\n+ \"config\": {\"view\": {\"width\": 400, \"height\": 300},\n+ \"mark\": {\"tooltip\": None}}}\n+\n+ def __repr__(self):\n+ return \"VegaTheme({!r})\".format(self.theme)\n+\n+\n # The entry point group that can be used by other packages to declare other\n # renderers that will be auto-detected. Explicit registration is also\n # allowed by the PluginRegistery API.\n@@ -14,4 +31,8 @@\n \"view\": {\"width\": 400, \"height\": 300},\n \"mark\": {\"tooltip\": None}}})\n themes.register('none', lambda: {})\n+ \n+for theme in VEGA_THEMES:\n+ themes.register(theme, VegaTheme(theme))\n+\n themes.enable('default')\n", "issue": "Support built-in vega themes\nSee [vega-themes](https://github.com/vega/vega-themes). Themes should be supported via the current theme infrastructure, maybe something like this:\r\n```python\r\nalt.themes.enable('vega.themes.dark')\r\n```\r\nWe'll have to think about how to best populate the list of available themes, and how to make this work cleanly with user-specified themes from within Altair.\n", "before_files": [{"content": "\"\"\"Tools for enabling and registering chart themes\"\"\"\n\nfrom ...utils.theme import ThemeRegistry\n\n# The entry point group that can be used by other packages to declare other\n# renderers that will be auto-detected. Explicit registration is also\n# allowed by the PluginRegistery API.\nENTRY_POINT_GROUP = 'altair.vegalite.v3.theme' # type: str\nthemes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP)\n\nthemes.register('default', lambda: {\"config\": {\"view\": {\"width\": 400, \"height\": 300},\n \"mark\": {\"tooltip\": None}}})\nthemes.register('opaque', lambda: {\"config\": {\"background\": \"white\",\n \"view\": {\"width\": 400, \"height\": 300},\n \"mark\": {\"tooltip\": None}}})\nthemes.register('none', lambda: {})\nthemes.enable('default')\n", "path": "altair/vegalite/v3/theme.py"}]} | 856 | 352 |
gh_patches_debug_22033 | rasdani/github-patches | git_diff | searx__searx-1689 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Google Images & DeviantArt don't work anymore
From one day to another, Google Images and DeviantArt stopped to show me anything, even with simple searches.


They are of course activated in the engines. It has been a few days, with a restart every day (local instance using Docker), without modifying engines nor any other setting. Using searx 0.15.0
</issue>
<code>
[start of searx/engines/google_images.py]
1 """
2 Google (Images)
3
4 @website https://www.google.com
5 @provide-api yes (https://developers.google.com/custom-search/)
6
7 @using-api no
8 @results HTML chunks with JSON inside
9 @stable no
10 @parse url, title, img_src
11 """
12
13 from datetime import date, timedelta
14 from json import loads
15 from lxml import html
16 from searx.url_utils import urlencode
17
18 # engine dependent config
19 categories = ['images']
20 paging = True
21 safesearch = True
22 time_range_support = True
23 number_of_results = 100
24
25 search_url = 'https://www.google.com/search'\
26 '?{query}'\
27 '&tbm=isch'\
28 '&yv=2'\
29 '&{search_options}'
30 time_range_attr = "qdr:{range}"
31 time_range_custom_attr = "cdr:1,cd_min:{start},cd_max{end}"
32 time_range_dict = {'day': 'd',
33 'week': 'w',
34 'month': 'm'}
35
36
37 # do search-request
38 def request(query, params):
39 search_options = {
40 'ijn': params['pageno'] - 1,
41 'start': (params['pageno'] - 1) * number_of_results
42 }
43
44 if params['time_range'] in time_range_dict:
45 search_options['tbs'] = time_range_attr.format(range=time_range_dict[params['time_range']])
46 elif params['time_range'] == 'year':
47 now = date.today()
48 then = now - timedelta(days=365)
49 start = then.strftime('%m/%d/%Y')
50 end = now.strftime('%m/%d/%Y')
51 search_options['tbs'] = time_range_custom_attr.format(start=start, end=end)
52
53 if safesearch and params['safesearch']:
54 search_options['safe'] = 'on'
55
56 params['url'] = search_url.format(query=urlencode({'q': query}),
57 search_options=urlencode(search_options))
58
59 return params
60
61
62 # get response from search-request
63 def response(resp):
64 results = []
65
66 dom = html.fromstring(resp.text)
67
68 # parse results
69 for result in dom.xpath('//div[contains(@class, "rg_meta")]/text()'):
70
71 try:
72 metadata = loads(result)
73 img_format = "{0} {1}x{2}".format(metadata['ity'], str(metadata['ow']), str(metadata['oh']))
74 source = "{0} ({1})".format(metadata['st'], metadata['isu'])
75 results.append({'url': metadata['ru'],
76 'title': metadata['pt'],
77 'content': metadata['s'],
78 'source': source,
79 'img_format': img_format,
80 'thumbnail_src': metadata['tu'],
81 'img_src': metadata['ou'],
82 'template': 'images.html'})
83
84 except:
85 continue
86
87 return results
88
[end of searx/engines/google_images.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
--- a/searx/engines/google_images.py
+++ b/searx/engines/google_images.py
@@ -70,11 +70,21 @@
try:
metadata = loads(result)
- img_format = "{0} {1}x{2}".format(metadata['ity'], str(metadata['ow']), str(metadata['oh']))
- source = "{0} ({1})".format(metadata['st'], metadata['isu'])
+
+ img_format = metadata.get('ity', '')
+ img_width = metadata.get('ow', '')
+ img_height = metadata.get('oh', '')
+ if img_width and img_height:
+ img_format += " {0}x{1}".format(img_width, img_height)
+
+ source = metadata.get('st', '')
+ source_url = metadata.get('isu', '')
+ if source_url:
+ source += " ({0})".format(source_url)
+
results.append({'url': metadata['ru'],
'title': metadata['pt'],
- 'content': metadata['s'],
+ 'content': metadata.get('s', ''),
'source': source,
'img_format': img_format,
'thumbnail_src': metadata['tu'],
| {"golden_diff": "diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py\n--- a/searx/engines/google_images.py\n+++ b/searx/engines/google_images.py\n@@ -70,11 +70,21 @@\n \n try:\n metadata = loads(result)\n- img_format = \"{0} {1}x{2}\".format(metadata['ity'], str(metadata['ow']), str(metadata['oh']))\n- source = \"{0} ({1})\".format(metadata['st'], metadata['isu'])\n+\n+ img_format = metadata.get('ity', '')\n+ img_width = metadata.get('ow', '')\n+ img_height = metadata.get('oh', '')\n+ if img_width and img_height:\n+ img_format += \" {0}x{1}\".format(img_width, img_height)\n+\n+ source = metadata.get('st', '')\n+ source_url = metadata.get('isu', '')\n+ if source_url:\n+ source += \" ({0})\".format(source_url)\n+\n results.append({'url': metadata['ru'],\n 'title': metadata['pt'],\n- 'content': metadata['s'],\n+ 'content': metadata.get('s', ''),\n 'source': source,\n 'img_format': img_format,\n 'thumbnail_src': metadata['tu'],\n", "issue": "Google Images & DeviantArt don't work anymore\nFrom one day to another, Google Images and DeviantArt stopped to show me anything, even with simple searches.\r\n\r\n\r\nThey are of course activated in the engines. It has been a few days, with a restart every day (local instance using Docker), without modifying engines nor any other setting. Using searx 0.15.0\n", "before_files": [{"content": "\"\"\"\n Google (Images)\n\n @website https://www.google.com\n @provide-api yes (https://developers.google.com/custom-search/)\n\n @using-api no\n @results HTML chunks with JSON inside\n @stable no\n @parse url, title, img_src\n\"\"\"\n\nfrom datetime import date, timedelta\nfrom json import loads\nfrom lxml import html\nfrom searx.url_utils import urlencode\n\n# engine dependent config\ncategories = ['images']\npaging = True\nsafesearch = True\ntime_range_support = True\nnumber_of_results = 100\n\nsearch_url = 'https://www.google.com/search'\\\n '?{query}'\\\n '&tbm=isch'\\\n '&yv=2'\\\n '&{search_options}'\ntime_range_attr = \"qdr:{range}\"\ntime_range_custom_attr = \"cdr:1,cd_min:{start},cd_max{end}\"\ntime_range_dict = {'day': 'd',\n 'week': 'w',\n 'month': 'm'}\n\n\n# do search-request\ndef request(query, params):\n search_options = {\n 'ijn': params['pageno'] - 1,\n 'start': (params['pageno'] - 1) * number_of_results\n }\n\n if params['time_range'] in time_range_dict:\n search_options['tbs'] = time_range_attr.format(range=time_range_dict[params['time_range']])\n elif params['time_range'] == 'year':\n now = date.today()\n then = now - timedelta(days=365)\n start = then.strftime('%m/%d/%Y')\n end = now.strftime('%m/%d/%Y')\n search_options['tbs'] = time_range_custom_attr.format(start=start, end=end)\n\n if safesearch and params['safesearch']:\n search_options['safe'] = 'on'\n\n params['url'] = search_url.format(query=urlencode({'q': query}),\n search_options=urlencode(search_options))\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n # parse results\n for result in dom.xpath('//div[contains(@class, \"rg_meta\")]/text()'):\n\n try:\n metadata = loads(result)\n img_format = \"{0} {1}x{2}\".format(metadata['ity'], str(metadata['ow']), str(metadata['oh']))\n source = \"{0} ({1})\".format(metadata['st'], metadata['isu'])\n results.append({'url': metadata['ru'],\n 'title': metadata['pt'],\n 'content': metadata['s'],\n 'source': source,\n 'img_format': img_format,\n 'thumbnail_src': metadata['tu'],\n 'img_src': metadata['ou'],\n 'template': 'images.html'})\n\n except:\n continue\n\n return results\n", "path": "searx/engines/google_images.py"}]} | 1,549 | 289 |
gh_patches_debug_11817 | rasdani/github-patches | git_diff | pytorch__pytorch-3139 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sparse SGD + momentum = cuda memory issue.
When using classic SGD optimizer with momentum with sparse embeddings the memory keeps garbage collecting / allocating leading to slow down and out of memory error eventually. [Here is a minimal exemple to reproduce the issue](https://gist.github.com/cedias/946a380807b7e1bf92d738268b71415a)


The issue dissapears when momentum is not used

or when embeddings are not sparse

I'm using the last pytorch version on conda: `'0.2.0_4'`
</issue>
<code>
[start of torch/optim/sgd.py]
1 from .optimizer import Optimizer, required
2
3
4 class SGD(Optimizer):
5 r"""Implements stochastic gradient descent (optionally with momentum).
6
7 Nesterov momentum is based on the formula from
8 `On the importance of initialization and momentum in deep learning`__.
9
10 Args:
11 params (iterable): iterable of parameters to optimize or dicts defining
12 parameter groups
13 lr (float): learning rate
14 momentum (float, optional): momentum factor (default: 0)
15 weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
16 dampening (float, optional): dampening for momentum (default: 0)
17 nesterov (bool, optional): enables Nesterov momentum (default: False)
18
19 Example:
20 >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
21 >>> optimizer.zero_grad()
22 >>> loss_fn(model(input), target).backward()
23 >>> optimizer.step()
24
25 __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
26
27 .. note::
28 The implementation of SGD with Momentum/Nesterov subtly differs from
29 Sutskever et. al. and implementations in some other frameworks.
30
31 Considering the specific case of Momentum, the update can be written as
32
33 .. math::
34 v = \rho * v + g \\
35 p = p - lr * v
36
37 where p, g, v and :math:`\rho` denote the parameters, gradient,
38 velocity, and momentum respectively.
39
40 This is in contrast to Sutskever et. al. and
41 other frameworks which employ an update of the form
42
43 .. math::
44 v = \rho * v + lr * g \\
45 p = p - v
46
47 The Nesterov version is analogously modified.
48 """
49
50 def __init__(self, params, lr=required, momentum=0, dampening=0,
51 weight_decay=0, nesterov=False):
52 defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
53 weight_decay=weight_decay, nesterov=nesterov)
54 if nesterov and (momentum <= 0 or dampening != 0):
55 raise ValueError("Nesterov momentum requires a momentum and zero dampening")
56 super(SGD, self).__init__(params, defaults)
57
58 def __setstate__(self, state):
59 super(SGD, self).__setstate__(state)
60 for group in self.param_groups:
61 group.setdefault('nesterov', False)
62
63 def step(self, closure=None):
64 """Performs a single optimization step.
65
66 Arguments:
67 closure (callable, optional): A closure that reevaluates the model
68 and returns the loss.
69 """
70 loss = None
71 if closure is not None:
72 loss = closure()
73
74 for group in self.param_groups:
75 weight_decay = group['weight_decay']
76 momentum = group['momentum']
77 dampening = group['dampening']
78 nesterov = group['nesterov']
79
80 for p in group['params']:
81 if p.grad is None:
82 continue
83 d_p = p.grad.data
84 if weight_decay != 0:
85 d_p.add_(weight_decay, p.data)
86 if momentum != 0:
87 param_state = self.state[p]
88 if 'momentum_buffer' not in param_state:
89 buf = param_state['momentum_buffer'] = d_p.clone()
90 else:
91 buf = param_state['momentum_buffer']
92 buf.mul_(momentum).add_(1 - dampening, d_p)
93 if nesterov:
94 d_p = d_p.add(momentum, buf)
95 else:
96 d_p = buf
97
98 p.data.add_(-group['lr'], d_p)
99
100 return loss
101
[end of torch/optim/sgd.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torch/optim/sgd.py b/torch/optim/sgd.py
--- a/torch/optim/sgd.py
+++ b/torch/optim/sgd.py
@@ -86,7 +86,8 @@
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
- buf = param_state['momentum_buffer'] = d_p.clone()
+ buf = param_state['momentum_buffer'] = p.data.new().resize_as_(p.data).zero_()
+ buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
| {"golden_diff": "diff --git a/torch/optim/sgd.py b/torch/optim/sgd.py\n--- a/torch/optim/sgd.py\n+++ b/torch/optim/sgd.py\n@@ -86,7 +86,8 @@\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n- buf = param_state['momentum_buffer'] = d_p.clone()\n+ buf = param_state['momentum_buffer'] = p.data.new().resize_as_(p.data).zero_()\n+ buf.mul_(momentum).add_(d_p)\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n", "issue": "Sparse SGD + momentum = cuda memory issue.\nWhen using classic SGD optimizer with momentum with sparse embeddings the memory keeps garbage collecting / allocating leading to slow down and out of memory error eventually. [Here is a minimal exemple to reproduce the issue](https://gist.github.com/cedias/946a380807b7e1bf92d738268b71415a)\r\n\r\n\r\n\r\n\r\n\r\nThe issue dissapears when momentum is not used\r\n\r\n\r\nor when embeddings are not sparse\r\n\r\n\r\n\r\nI'm using the last pytorch version on conda: `'0.2.0_4'`\r\n\n", "before_files": [{"content": "from .optimizer import Optimizer, required\n\n\nclass SGD(Optimizer):\n r\"\"\"Implements stochastic gradient descent (optionally with momentum).\n\n Nesterov momentum is based on the formula from\n `On the importance of initialization and momentum in deep learning`__.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): learning rate\n momentum (float, optional): momentum factor (default: 0)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n dampening (float, optional): dampening for momentum (default: 0)\n nesterov (bool, optional): enables Nesterov momentum (default: False)\n\n Example:\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n\n __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf\n\n .. note::\n The implementation of SGD with Momentum/Nesterov subtly differs from\n Sutskever et. al. and implementations in some other frameworks.\n\n Considering the specific case of Momentum, the update can be written as\n\n .. math::\n v = \\rho * v + g \\\\\n p = p - lr * v\n\n where p, g, v and :math:`\\rho` denote the parameters, gradient,\n velocity, and momentum respectively.\n\n This is in contrast to Sutskever et. al. and\n other frameworks which employ an update of the form\n\n .. math::\n v = \\rho * v + lr * g \\\\\n p = p - v\n\n The Nesterov version is analogously modified.\n \"\"\"\n\n def __init__(self, params, lr=required, momentum=0, dampening=0,\n weight_decay=0, nesterov=False):\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening,\n weight_decay=weight_decay, nesterov=nesterov)\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n super(SGD, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(SGD, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('nesterov', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n if weight_decay != 0:\n d_p.add_(weight_decay, p.data)\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = d_p.clone()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n\n p.data.add_(-group['lr'], d_p)\n\n return loss\n", "path": "torch/optim/sgd.py"}]} | 1,948 | 167 |
gh_patches_debug_30442 | rasdani/github-patches | git_diff | privacyidea__privacyidea-3324 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve the logging of SSH tokens
If a user has many different SSH keys assigned on one machine for different ssh-users on this machine, this line gets logged for each SSH key, that is not used:
https://github.com/privacyidea/privacyidea/blob/745a829d89fb2824d253e27b510027316c856245/privacyidea/lib/applications/ssh.py#L84
We should change this to "debug".
On the other hand we should add a log here
https://github.com/privacyidea/privacyidea/blob/745a829d89fb2824d253e27b510027316c856245/privacyidea/lib/applications/ssh.py#L73
like
~~~~python
log.info(u"Using SSH key {0!s} for user {1!s}".format(tokclass.token.serial, options.get("user")))
~~~~
</issue>
<code>
[start of privacyidea/lib/applications/ssh.py]
1 # -*- coding: utf-8 -*-
2 #
3 # privacyIDEA
4 # Jul 18, 2014 Cornelius Kölbel
5 # License: AGPLv3
6 # contact: http://www.privacyidea.org
7 #
8 # This code is free software; you can redistribute it and/or
9 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
10 # License as published by the Free Software Foundation; either
11 # version 3 of the License, or any later version.
12 #
13 # This code is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
17 #
18 # You should have received a copy of the GNU Affero General Public
19 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #
21 """
22 This file is tested in tests/test_lib_machinetokens.py
23 """
24 from privacyidea.lib.applications import MachineApplicationBase
25 import logging
26 from privacyidea.lib.token import get_tokens
27 log = logging.getLogger(__name__)
28
29
30 class MachineApplication(MachineApplicationBase):
31 """
32 This is the application for SSH.
33
34 Possible options:
35 user
36
37 """
38 application_name = "ssh"
39 '''as the authentication item is no sensitive information,
40 we can set bulk_call to True. Thus the admin can call
41 all public keys to distribute them via salt.
42 FIXME: This is only true for SSH pub keys.
43 If we would support OTP with SSH, this might be sensitive information!
44 '''
45 allow_bulk_call = True
46
47 @staticmethod
48 def get_authentication_item(token_type,
49 serial,
50 challenge=None, options=None,
51 filter_param=None):
52 """
53 :param token_type: the type of the token. At the moment
54 we support the tokenype "sshkey"
55 :param serial: the serial number of the token.
56 :return auth_item: Return the SSH pub keys.
57 """
58 options = options or {}
59 ret = {}
60 filter_param = filter_param or {}
61 user_filter = filter_param.get("user")
62 if token_type.lower() == "sshkey":
63 toks = get_tokens(serial=serial, active=True)
64 if len(toks) == 1:
65 # We return this entry, either if no user_filter is requested
66 # or if the user_filter matches the user
67 if (user_filter and user_filter == options.get("user")) or \
68 not user_filter:
69 # tokenclass is a SSHkeyTokenClass
70 tokclass = toks[0]
71 # We just return the ssh public key, so that
72 # it can be included into authorized keys.
73 ret["sshkey"] = tokclass.get_sshkey()
74 # We return the username if the token is assigned to a
75 # user, so that this username could be used to save
76 # the ssh key accordingly
77 user_object = toks[0].user
78 if user_object:
79 uInfo = user_object.info
80 if "username" in uInfo:
81 ret["username"] = uInfo.get("username")
82 # ret["info"] = uInfo
83 else:
84 log.info("The requested user %s does not match the user "
85 "option (%s) of the SSH application." % (
86 user_filter, options.get("user")))
87 else:
88 log.info("Token %r, type %r is not supported by "
89 "SSH application module" % (serial, token_type))
90
91 return ret
92
93 @staticmethod
94 def get_options():
95 """
96 returns a dictionary with a list of required and optional options
97 """
98 return {'required': [],
99 'optional': ['user']}
100
[end of privacyidea/lib/applications/ssh.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/privacyidea/lib/applications/ssh.py b/privacyidea/lib/applications/ssh.py
--- a/privacyidea/lib/applications/ssh.py
+++ b/privacyidea/lib/applications/ssh.py
@@ -70,6 +70,8 @@
tokclass = toks[0]
# We just return the ssh public key, so that
# it can be included into authorized keys.
+ log.info(u"Using SSH key {0!s} for SSH user {1!s}".format(tokclass.token.serial,
+ options.get("user")))
ret["sshkey"] = tokclass.get_sshkey()
# We return the username if the token is assigned to a
# user, so that this username could be used to save
@@ -79,14 +81,13 @@
uInfo = user_object.info
if "username" in uInfo:
ret["username"] = uInfo.get("username")
- # ret["info"] = uInfo
else:
- log.info("The requested user %s does not match the user "
- "option (%s) of the SSH application." % (
+ log.debug(u"The requested user {0!s} does not match the user "
+ "option ({0!s}) of the SSH application.".format(
user_filter, options.get("user")))
else:
- log.info("Token %r, type %r is not supported by "
- "SSH application module" % (serial, token_type))
+ log.info(u"Token {0!r}, type {0!r} is not supported by "
+ "SSH application module".format(serial, token_type))
return ret
| {"golden_diff": "diff --git a/privacyidea/lib/applications/ssh.py b/privacyidea/lib/applications/ssh.py\n--- a/privacyidea/lib/applications/ssh.py\n+++ b/privacyidea/lib/applications/ssh.py\n@@ -70,6 +70,8 @@\n tokclass = toks[0]\n # We just return the ssh public key, so that\n # it can be included into authorized keys.\n+ log.info(u\"Using SSH key {0!s} for SSH user {1!s}\".format(tokclass.token.serial,\n+ options.get(\"user\")))\n ret[\"sshkey\"] = tokclass.get_sshkey()\n # We return the username if the token is assigned to a\n # user, so that this username could be used to save\n@@ -79,14 +81,13 @@\n uInfo = user_object.info\n if \"username\" in uInfo:\n ret[\"username\"] = uInfo.get(\"username\")\n- # ret[\"info\"] = uInfo\n else:\n- log.info(\"The requested user %s does not match the user \"\n- \"option (%s) of the SSH application.\" % (\n+ log.debug(u\"The requested user {0!s} does not match the user \"\n+ \"option ({0!s}) of the SSH application.\".format(\n user_filter, options.get(\"user\")))\n else:\n- log.info(\"Token %r, type %r is not supported by \"\n- \"SSH application module\" % (serial, token_type))\n+ log.info(u\"Token {0!r}, type {0!r} is not supported by \"\n+ \"SSH application module\".format(serial, token_type))\n \n return ret\n", "issue": "Improve the logging of SSH tokens\nIf a user has many different SSH keys assigned on one machine for different ssh-users on this machine, this line gets logged for each SSH key, that is not used:\r\n\r\nhttps://github.com/privacyidea/privacyidea/blob/745a829d89fb2824d253e27b510027316c856245/privacyidea/lib/applications/ssh.py#L84\r\n\r\nWe should change this to \"debug\".\r\n\r\nOn the other hand we should add a log here \r\nhttps://github.com/privacyidea/privacyidea/blob/745a829d89fb2824d253e27b510027316c856245/privacyidea/lib/applications/ssh.py#L73\r\nlike\r\n\r\n~~~~python\r\nlog.info(u\"Using SSH key {0!s} for user {1!s}\".format(tokclass.token.serial, options.get(\"user\")))\r\n~~~~\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA\n# Jul 18, 2014 Cornelius K\u00f6lbel\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThis file is tested in tests/test_lib_machinetokens.py\n\"\"\"\nfrom privacyidea.lib.applications import MachineApplicationBase\nimport logging\nfrom privacyidea.lib.token import get_tokens\nlog = logging.getLogger(__name__)\n\n\nclass MachineApplication(MachineApplicationBase):\n \"\"\"\n This is the application for SSH.\n\n Possible options:\n user\n\n \"\"\"\n application_name = \"ssh\"\n '''as the authentication item is no sensitive information,\n we can set bulk_call to True. Thus the admin can call\n all public keys to distribute them via salt.\n FIXME: This is only true for SSH pub keys.\n If we would support OTP with SSH, this might be sensitive information!\n '''\n allow_bulk_call = True\n\n @staticmethod\n def get_authentication_item(token_type,\n serial,\n challenge=None, options=None,\n filter_param=None):\n \"\"\"\n :param token_type: the type of the token. At the moment\n we support the tokenype \"sshkey\"\n :param serial: the serial number of the token.\n :return auth_item: Return the SSH pub keys.\n \"\"\"\n options = options or {}\n ret = {}\n filter_param = filter_param or {}\n user_filter = filter_param.get(\"user\")\n if token_type.lower() == \"sshkey\":\n toks = get_tokens(serial=serial, active=True)\n if len(toks) == 1:\n # We return this entry, either if no user_filter is requested\n # or if the user_filter matches the user\n if (user_filter and user_filter == options.get(\"user\")) or \\\n not user_filter:\n # tokenclass is a SSHkeyTokenClass\n tokclass = toks[0]\n # We just return the ssh public key, so that\n # it can be included into authorized keys.\n ret[\"sshkey\"] = tokclass.get_sshkey()\n # We return the username if the token is assigned to a\n # user, so that this username could be used to save\n # the ssh key accordingly\n user_object = toks[0].user\n if user_object:\n uInfo = user_object.info\n if \"username\" in uInfo:\n ret[\"username\"] = uInfo.get(\"username\")\n # ret[\"info\"] = uInfo\n else:\n log.info(\"The requested user %s does not match the user \"\n \"option (%s) of the SSH application.\" % (\n user_filter, options.get(\"user\")))\n else:\n log.info(\"Token %r, type %r is not supported by \"\n \"SSH application module\" % (serial, token_type))\n\n return ret\n\n @staticmethod\n def get_options():\n \"\"\"\n returns a dictionary with a list of required and optional options\n \"\"\"\n return {'required': [],\n 'optional': ['user']}\n", "path": "privacyidea/lib/applications/ssh.py"}]} | 1,778 | 375 |
gh_patches_debug_20826 | rasdani/github-patches | git_diff | dask__dask-1231 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add tests to package
In `setup.py`, make sure `tests` subdirectories are included in the package (otherwise, it is not possible for a user with an installed version of dask to verify its integrity).
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from os.path import exists
4 from setuptools import setup
5 import dask
6
7 extras_require = {
8 'array': ['numpy', 'toolz >= 0.7.2'],
9 'bag': ['cloudpickle >= 0.2.1', 'toolz >= 0.7.2', 'partd >= 0.3.3'],
10 'dataframe': ['numpy', 'pandas >= 0.18.0', 'toolz >= 0.7.2',
11 'partd >= 0.3.3', 'cloudpickle >= 0.2.1'],
12 'distributed': ['distributed >= 1.9'],
13 'imperative': ['toolz >= 0.7.2'],
14 }
15 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
16
17 setup(name='dask',
18 version=dask.__version__,
19 description='Minimal task scheduling abstraction',
20 url='http://github.com/dask/dask/',
21 maintainer='Matthew Rocklin',
22 maintainer_email='[email protected]',
23 license='BSD',
24 keywords='task-scheduling parallelism',
25 packages=['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',
26 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics'],
27 long_description=(open('README.rst').read() if exists('README.rst')
28 else ''),
29 extras_require=extras_require,
30 zip_safe=False)
31
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,6 +14,12 @@
}
extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
+packages = ['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',
+ 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics']
+
+tests = [p + '.tests' for p in packages]
+
+
setup(name='dask',
version=dask.__version__,
description='Minimal task scheduling abstraction',
@@ -22,8 +28,7 @@
maintainer_email='[email protected]',
license='BSD',
keywords='task-scheduling parallelism',
- packages=['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',
- 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics'],
+ packages=packages + tests,
long_description=(open('README.rst').read() if exists('README.rst')
else ''),
extras_require=extras_require,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,6 +14,12 @@\n }\n extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n \n+packages = ['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',\n+ 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics']\n+\n+tests = [p + '.tests' for p in packages]\n+\n+\n setup(name='dask',\n version=dask.__version__,\n description='Minimal task scheduling abstraction',\n@@ -22,8 +28,7 @@\n maintainer_email='[email protected]',\n license='BSD',\n keywords='task-scheduling parallelism',\n- packages=['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',\n- 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics'],\n+ packages=packages + tests,\n long_description=(open('README.rst').read() if exists('README.rst')\n else ''),\n extras_require=extras_require,\n", "issue": "Add tests to package\nIn `setup.py`, make sure `tests` subdirectories are included in the package (otherwise, it is not possible for a user with an installed version of dask to verify its integrity).\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom os.path import exists\nfrom setuptools import setup\nimport dask\n\nextras_require = {\n 'array': ['numpy', 'toolz >= 0.7.2'],\n 'bag': ['cloudpickle >= 0.2.1', 'toolz >= 0.7.2', 'partd >= 0.3.3'],\n 'dataframe': ['numpy', 'pandas >= 0.18.0', 'toolz >= 0.7.2',\n 'partd >= 0.3.3', 'cloudpickle >= 0.2.1'],\n 'distributed': ['distributed >= 1.9'],\n 'imperative': ['toolz >= 0.7.2'],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(name='dask',\n version=dask.__version__,\n description='Minimal task scheduling abstraction',\n url='http://github.com/dask/dask/',\n maintainer='Matthew Rocklin',\n maintainer_email='[email protected]',\n license='BSD',\n keywords='task-scheduling parallelism',\n packages=['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',\n 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics'],\n long_description=(open('README.rst').read() if exists('README.rst')\n else ''),\n extras_require=extras_require,\n zip_safe=False)\n", "path": "setup.py"}]} | 959 | 264 |
gh_patches_debug_14884 | rasdani/github-patches | git_diff | python-discord__bot-1205 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: ` chars are not escaped when parsing !source

When responding to faulty `!source` commands, the backticks aren't escaped and a formatting issue occurs.
This _might_ lead to being able to ping roles/users, should Discord ever decide to change the embed ping behavior.
</issue>
<code>
[start of bot/exts/info/source.py]
1 import inspect
2 from pathlib import Path
3 from typing import Optional, Tuple, Union
4
5 from discord import Embed
6 from discord.ext import commands
7
8 from bot.bot import Bot
9 from bot.constants import URLs
10
11 SourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]
12
13
14 class SourceConverter(commands.Converter):
15 """Convert an argument into a help command, tag, command, or cog."""
16
17 async def convert(self, ctx: commands.Context, argument: str) -> SourceType:
18 """Convert argument into source object."""
19 if argument.lower().startswith("help"):
20 return ctx.bot.help_command
21
22 cog = ctx.bot.get_cog(argument)
23 if cog:
24 return cog
25
26 cmd = ctx.bot.get_command(argument)
27 if cmd:
28 return cmd
29
30 tags_cog = ctx.bot.get_cog("Tags")
31 show_tag = True
32
33 if not tags_cog:
34 show_tag = False
35 elif argument.lower() in tags_cog._cache:
36 return argument.lower()
37
38 raise commands.BadArgument(
39 f"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog."
40 )
41
42
43 class BotSource(commands.Cog):
44 """Displays information about the bot's source code."""
45
46 def __init__(self, bot: Bot):
47 self.bot = bot
48
49 @commands.command(name="source", aliases=("src",))
50 async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:
51 """Display information and a GitHub link to the source code of a command, tag, or cog."""
52 if not source_item:
53 embed = Embed(title="Bot's GitHub Repository")
54 embed.add_field(name="Repository", value=f"[Go to GitHub]({URLs.github_bot_repo})")
55 embed.set_thumbnail(url="https://avatars1.githubusercontent.com/u/9919")
56 await ctx.send(embed=embed)
57 return
58
59 embed = await self.build_embed(source_item)
60 await ctx.send(embed=embed)
61
62 def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:
63 """
64 Build GitHub link of source item, return this link, file location and first line number.
65
66 Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).
67 """
68 if isinstance(source_item, commands.Command):
69 src = source_item.callback.__code__
70 filename = src.co_filename
71 elif isinstance(source_item, str):
72 tags_cog = self.bot.get_cog("Tags")
73 filename = tags_cog._cache[source_item]["location"]
74 else:
75 src = type(source_item)
76 try:
77 filename = inspect.getsourcefile(src)
78 except TypeError:
79 raise commands.BadArgument("Cannot get source for a dynamically-created object.")
80
81 if not isinstance(source_item, str):
82 try:
83 lines, first_line_no = inspect.getsourcelines(src)
84 except OSError:
85 raise commands.BadArgument("Cannot get source for a dynamically-created object.")
86
87 lines_extension = f"#L{first_line_no}-L{first_line_no+len(lines)-1}"
88 else:
89 first_line_no = None
90 lines_extension = ""
91
92 # Handle tag file location differently than others to avoid errors in some cases
93 if not first_line_no:
94 file_location = Path(filename).relative_to("/bot/")
95 else:
96 file_location = Path(filename).relative_to(Path.cwd()).as_posix()
97
98 url = f"{URLs.github_bot_repo}/blob/master/{file_location}{lines_extension}"
99
100 return url, file_location, first_line_no or None
101
102 async def build_embed(self, source_object: SourceType) -> Optional[Embed]:
103 """Build embed based on source object."""
104 url, location, first_line = self.get_source_link(source_object)
105
106 if isinstance(source_object, commands.HelpCommand):
107 title = "Help Command"
108 description = source_object.__doc__.splitlines()[1]
109 elif isinstance(source_object, commands.Command):
110 description = source_object.short_doc
111 title = f"Command: {source_object.qualified_name}"
112 elif isinstance(source_object, str):
113 title = f"Tag: {source_object}"
114 description = ""
115 else:
116 title = f"Cog: {source_object.qualified_name}"
117 description = source_object.description.splitlines()[0]
118
119 embed = Embed(title=title, description=description)
120 embed.add_field(name="Source Code", value=f"[Go to GitHub]({url})")
121 line_text = f":{first_line}" if first_line else ""
122 embed.set_footer(text=f"{location}{line_text}")
123
124 return embed
125
126
127 def setup(bot: Bot) -> None:
128 """Load the BotSource cog."""
129 bot.add_cog(BotSource(bot))
130
[end of bot/exts/info/source.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/exts/info/source.py b/bot/exts/info/source.py
--- a/bot/exts/info/source.py
+++ b/bot/exts/info/source.py
@@ -2,7 +2,7 @@
from pathlib import Path
from typing import Optional, Tuple, Union
-from discord import Embed
+from discord import Embed, utils
from discord.ext import commands
from bot.bot import Bot
@@ -35,8 +35,10 @@
elif argument.lower() in tags_cog._cache:
return argument.lower()
+ escaped_arg = utils.escape_markdown(argument)
+
raise commands.BadArgument(
- f"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog."
+ f"Unable to convert '{escaped_arg}' to valid command{', tag,' if show_tag else ''} or Cog."
)
| {"golden_diff": "diff --git a/bot/exts/info/source.py b/bot/exts/info/source.py\n--- a/bot/exts/info/source.py\n+++ b/bot/exts/info/source.py\n@@ -2,7 +2,7 @@\n from pathlib import Path\n from typing import Optional, Tuple, Union\n \n-from discord import Embed\n+from discord import Embed, utils\n from discord.ext import commands\n \n from bot.bot import Bot\n@@ -35,8 +35,10 @@\n elif argument.lower() in tags_cog._cache:\n return argument.lower()\n \n+ escaped_arg = utils.escape_markdown(argument)\n+\n raise commands.BadArgument(\n- f\"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog.\"\n+ f\"Unable to convert '{escaped_arg}' to valid command{', tag,' if show_tag else ''} or Cog.\"\n )\n", "issue": "Bug: ` chars are not escaped when parsing !source\n\r\n\r\nWhen responding to faulty `!source` commands, the backticks aren't escaped and a formatting issue occurs.\r\n\r\nThis _might_ lead to being able to ping roles/users, should Discord ever decide to change the embed ping behavior.\n", "before_files": [{"content": "import inspect\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Union\n\nfrom discord import Embed\nfrom discord.ext import commands\n\nfrom bot.bot import Bot\nfrom bot.constants import URLs\n\nSourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]\n\n\nclass SourceConverter(commands.Converter):\n \"\"\"Convert an argument into a help command, tag, command, or cog.\"\"\"\n\n async def convert(self, ctx: commands.Context, argument: str) -> SourceType:\n \"\"\"Convert argument into source object.\"\"\"\n if argument.lower().startswith(\"help\"):\n return ctx.bot.help_command\n\n cog = ctx.bot.get_cog(argument)\n if cog:\n return cog\n\n cmd = ctx.bot.get_command(argument)\n if cmd:\n return cmd\n\n tags_cog = ctx.bot.get_cog(\"Tags\")\n show_tag = True\n\n if not tags_cog:\n show_tag = False\n elif argument.lower() in tags_cog._cache:\n return argument.lower()\n\n raise commands.BadArgument(\n f\"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog.\"\n )\n\n\nclass BotSource(commands.Cog):\n \"\"\"Displays information about the bot's source code.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @commands.command(name=\"source\", aliases=(\"src\",))\n async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:\n \"\"\"Display information and a GitHub link to the source code of a command, tag, or cog.\"\"\"\n if not source_item:\n embed = Embed(title=\"Bot's GitHub Repository\")\n embed.add_field(name=\"Repository\", value=f\"[Go to GitHub]({URLs.github_bot_repo})\")\n embed.set_thumbnail(url=\"https://avatars1.githubusercontent.com/u/9919\")\n await ctx.send(embed=embed)\n return\n\n embed = await self.build_embed(source_item)\n await ctx.send(embed=embed)\n\n def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:\n \"\"\"\n Build GitHub link of source item, return this link, file location and first line number.\n\n Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).\n \"\"\"\n if isinstance(source_item, commands.Command):\n src = source_item.callback.__code__\n filename = src.co_filename\n elif isinstance(source_item, str):\n tags_cog = self.bot.get_cog(\"Tags\")\n filename = tags_cog._cache[source_item][\"location\"]\n else:\n src = type(source_item)\n try:\n filename = inspect.getsourcefile(src)\n except TypeError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n if not isinstance(source_item, str):\n try:\n lines, first_line_no = inspect.getsourcelines(src)\n except OSError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n lines_extension = f\"#L{first_line_no}-L{first_line_no+len(lines)-1}\"\n else:\n first_line_no = None\n lines_extension = \"\"\n\n # Handle tag file location differently than others to avoid errors in some cases\n if not first_line_no:\n file_location = Path(filename).relative_to(\"/bot/\")\n else:\n file_location = Path(filename).relative_to(Path.cwd()).as_posix()\n\n url = f\"{URLs.github_bot_repo}/blob/master/{file_location}{lines_extension}\"\n\n return url, file_location, first_line_no or None\n\n async def build_embed(self, source_object: SourceType) -> Optional[Embed]:\n \"\"\"Build embed based on source object.\"\"\"\n url, location, first_line = self.get_source_link(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n description = source_object.__doc__.splitlines()[1]\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, str):\n title = f\"Tag: {source_object}\"\n description = \"\"\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = Embed(title=title, description=description)\n embed.add_field(name=\"Source Code\", value=f\"[Go to GitHub]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the BotSource cog.\"\"\"\n bot.add_cog(BotSource(bot))\n", "path": "bot/exts/info/source.py"}]} | 1,932 | 196 |
gh_patches_debug_15470 | rasdani/github-patches | git_diff | tensorflow__addons-567 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build nightly from tf-nightly
Currently we build against `tf-nightly-2.0-preview`. Now that TF2 is released we should switch to `tf-nightly` once we confirm that the switch has been made.
</issue>
<code>
[start of setup.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """TensorFlow Addons.
16
17 TensorFlow Addons is a repository of contributions that conform to well-
18 established API patterns, but implement new functionality not available
19 in core TensorFlow. TensorFlow natively supports a large number of
20 operators, layers, metrics, losses, and optimizers. However, in a fast
21 moving field like ML, there are many interesting new developments that
22 cannot be integrated into core TensorFlow (because their broad
23 applicability is not yet clear, or it is mostly used by a smaller subset
24 of the community).
25 """
26
27 from __future__ import absolute_import
28 from __future__ import division
29 from __future__ import print_function
30
31 import os
32 import platform
33 import sys
34
35 from datetime import datetime
36 from setuptools import find_packages
37 from setuptools import setup
38 from setuptools.dist import Distribution
39 from setuptools import Extension
40
41 DOCLINES = __doc__.split('\n')
42
43 TFA_NIGHTLY = 'tfa-nightly'
44 TFA_RELEASE = 'tensorflow-addons'
45
46 if '--nightly' in sys.argv:
47 project_name = TFA_NIGHTLY
48 nightly_idx = sys.argv.index('--nightly')
49 sys.argv.pop(nightly_idx)
50 else:
51 project_name = TFA_RELEASE
52
53 # Version
54 version = {}
55 base_dir = os.path.dirname(os.path.abspath(__file__))
56 with open(os.path.join(base_dir, "tensorflow_addons", "version.py")) as fp:
57 # yapf: disable
58 exec(fp.read(), version)
59 # yapf: enable
60
61 if project_name == TFA_NIGHTLY:
62 version['__version__'] += datetime.strftime(datetime.today(), "%Y%m%d")
63
64 # Dependencies
65 REQUIRED_PACKAGES = [
66 'six >= 1.10.0',
67 ]
68
69 if project_name == TFA_RELEASE:
70 # TODO: remove if-else condition when tf supports package consolidation.
71 if platform.system() == 'Linux':
72 REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0-rc0')
73 else:
74 REQUIRED_PACKAGES.append('tensorflow == 2.0.0-rc0')
75 elif project_name == TFA_NIGHTLY:
76 # TODO: remove if-else condition when tf-nightly supports package consolidation.
77 if platform.system() == 'Linux':
78 REQUIRED_PACKAGES.append('tf-nightly-gpu-2.0-preview')
79 else:
80 REQUIRED_PACKAGES.append('tf-nightly-2.0-preview')
81
82
83 class BinaryDistribution(Distribution):
84 """This class is needed in order to create OS specific wheels."""
85
86 def has_ext_modules(self):
87 return True
88
89
90 setup(
91 name=project_name,
92 version=version['__version__'],
93 description=DOCLINES[0],
94 long_description='\n'.join(DOCLINES[2:]),
95 author='Google Inc.',
96 author_email='[email protected]',
97 packages=find_packages(),
98 ext_modules=[Extension('_foo', ['stub.cc'])],
99 install_requires=REQUIRED_PACKAGES,
100 include_package_data=True,
101 zip_safe=False,
102 distclass=BinaryDistribution,
103 classifiers=[
104 'Development Status :: 4 - Beta',
105 'Intended Audience :: Developers',
106 'Intended Audience :: Education',
107 'Intended Audience :: Science/Research',
108 'License :: OSI Approved :: Apache Software License',
109 'Programming Language :: Python :: 2.7',
110 'Programming Language :: Python :: 3.5',
111 'Programming Language :: Python :: 3.6',
112 'Programming Language :: Python :: 3.7',
113 'Topic :: Scientific/Engineering :: Mathematics',
114 'Topic :: Software Development :: Libraries :: Python Modules',
115 'Topic :: Software Development :: Libraries',
116 ],
117 license='Apache 2.0',
118 keywords='tensorflow addons machine learning',
119 )
120
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -69,15 +69,11 @@
if project_name == TFA_RELEASE:
# TODO: remove if-else condition when tf supports package consolidation.
if platform.system() == 'Linux':
- REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0-rc0')
+ REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')
else:
- REQUIRED_PACKAGES.append('tensorflow == 2.0.0-rc0')
+ REQUIRED_PACKAGES.append('tensorflow == 2.0.0')
elif project_name == TFA_NIGHTLY:
- # TODO: remove if-else condition when tf-nightly supports package consolidation.
- if platform.system() == 'Linux':
- REQUIRED_PACKAGES.append('tf-nightly-gpu-2.0-preview')
- else:
- REQUIRED_PACKAGES.append('tf-nightly-2.0-preview')
+ REQUIRED_PACKAGES.append('tf-nightly')
class BinaryDistribution(Distribution):
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -69,15 +69,11 @@\n if project_name == TFA_RELEASE:\n # TODO: remove if-else condition when tf supports package consolidation.\n if platform.system() == 'Linux':\n- REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0-rc0')\n+ REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')\n else:\n- REQUIRED_PACKAGES.append('tensorflow == 2.0.0-rc0')\n+ REQUIRED_PACKAGES.append('tensorflow == 2.0.0')\n elif project_name == TFA_NIGHTLY:\n- # TODO: remove if-else condition when tf-nightly supports package consolidation.\n- if platform.system() == 'Linux':\n- REQUIRED_PACKAGES.append('tf-nightly-gpu-2.0-preview')\n- else:\n- REQUIRED_PACKAGES.append('tf-nightly-2.0-preview')\n+ REQUIRED_PACKAGES.append('tf-nightly')\n \n \n class BinaryDistribution(Distribution):\n", "issue": "Build nightly from tf-nightly\nCurrently we build against `tf-nightly-2.0-preview`. Now that TF2 is released we should switch to `tf-nightly` once we confirm that the switch has been made.\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons.\n\nTensorFlow Addons is a repository of contributions that conform to well-\nestablished API patterns, but implement new functionality not available\nin core TensorFlow. TensorFlow natively supports a large number of\noperators, layers, metrics, losses, and optimizers. However, in a fast\nmoving field like ML, there are many interesting new developments that\ncannot be integrated into core TensorFlow (because their broad\napplicability is not yet clear, or it is mostly used by a smaller subset\nof the community).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport platform\nimport sys\n\nfrom datetime import datetime\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\nfrom setuptools import Extension\n\nDOCLINES = __doc__.split('\\n')\n\nTFA_NIGHTLY = 'tfa-nightly'\nTFA_RELEASE = 'tensorflow-addons'\n\nif '--nightly' in sys.argv:\n project_name = TFA_NIGHTLY\n nightly_idx = sys.argv.index('--nightly')\n sys.argv.pop(nightly_idx)\nelse:\n project_name = TFA_RELEASE\n\n# Version\nversion = {}\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n # yapf: disable\n exec(fp.read(), version)\n # yapf: enable\n\nif project_name == TFA_NIGHTLY:\n version['__version__'] += datetime.strftime(datetime.today(), \"%Y%m%d\")\n\n# Dependencies\nREQUIRED_PACKAGES = [\n 'six >= 1.10.0',\n]\n\nif project_name == TFA_RELEASE:\n # TODO: remove if-else condition when tf supports package consolidation.\n if platform.system() == 'Linux':\n REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0-rc0')\n else:\n REQUIRED_PACKAGES.append('tensorflow == 2.0.0-rc0')\nelif project_name == TFA_NIGHTLY:\n # TODO: remove if-else condition when tf-nightly supports package consolidation.\n if platform.system() == 'Linux':\n REQUIRED_PACKAGES.append('tf-nightly-gpu-2.0-preview')\n else:\n REQUIRED_PACKAGES.append('tf-nightly-2.0-preview')\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nsetup(\n name=project_name,\n version=version['__version__'],\n description=DOCLINES[0],\n long_description='\\n'.join(DOCLINES[2:]),\n author='Google Inc.',\n author_email='[email protected]',\n packages=find_packages(),\n ext_modules=[Extension('_foo', ['stub.cc'])],\n install_requires=REQUIRED_PACKAGES,\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n license='Apache 2.0',\n keywords='tensorflow addons machine learning',\n)\n", "path": "setup.py"}]} | 1,757 | 238 |
gh_patches_debug_34765 | rasdani/github-patches | git_diff | crytic__slither-1909 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] contract reports ether as locked when ether is sent in Yul
The following contract reports ether as locked despite it being sent in a Yul block
```
contract FPLockedEther {
receive() payable external {}
function yulSendEther() external {
bool success;
assembly {
success := call(gas(), caller(), balance(address()), 0,0,0,0)
}
}
}
```
```
Contract locking ether found:
Contract FPLockedEther (locked-ether.sol#1-13) has payable functions:
- FPLockedEther.receive() (locked-ether.sol#2-3)
But does not have a function to withdraw the ether
Reference: https://github.com/crytic/slither/wiki/Detector-Documentation#contracts-that-lock-ether
```
It could be that the IR is incorrect here as it should not be a `SOLIDITY_CALL`
```
Contract FPLockedEther
Function FPLockedEther.receive() (*)
Function FPLockedEther.yulSendEther() (*)
Expression: success = call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)(gas()(),caller()(),balance(uint256)(address()()),0,0,0,0)
IRs:
TMP_0(uint256) = SOLIDITY_CALL gas()()
TMP_1(address) := msg.sender(address)
TMP_2 = CONVERT this to address
TMP_3(uint256) = SOLIDITY_CALL balance(uint256)(TMP_2)
TMP_4(uint256) = SOLIDITY_CALL call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)(TMP_0,TMP_1,TMP_3,0,0,0,0)
success(bool) := TMP_4(uint256)
```
</issue>
<code>
[start of slither/detectors/attributes/locked_ether.py]
1 """
2 Check if ethers are locked in the contract
3 """
4 from typing import List
5
6 from slither.core.declarations.contract import Contract
7 from slither.detectors.abstract_detector import (
8 AbstractDetector,
9 DetectorClassification,
10 DETECTOR_INFO,
11 )
12 from slither.slithir.operations import (
13 HighLevelCall,
14 LowLevelCall,
15 Send,
16 Transfer,
17 NewContract,
18 LibraryCall,
19 InternalCall,
20 )
21 from slither.utils.output import Output
22
23
24 class LockedEther(AbstractDetector): # pylint: disable=too-many-nested-blocks
25
26 ARGUMENT = "locked-ether"
27 HELP = "Contracts that lock ether"
28 IMPACT = DetectorClassification.MEDIUM
29 CONFIDENCE = DetectorClassification.HIGH
30
31 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#contracts-that-lock-ether"
32
33 WIKI_TITLE = "Contracts that lock Ether"
34 WIKI_DESCRIPTION = "Contract with a `payable` function, but without a withdrawal capacity."
35
36 # region wiki_exploit_scenario
37 WIKI_EXPLOIT_SCENARIO = """
38 ```solidity
39 pragma solidity 0.4.24;
40 contract Locked{
41 function receive() payable public{
42 }
43 }
44 ```
45 Every Ether sent to `Locked` will be lost."""
46 # endregion wiki_exploit_scenario
47
48 WIKI_RECOMMENDATION = "Remove the payable attribute or add a withdraw function."
49
50 @staticmethod
51 def do_no_send_ether(contract: Contract) -> bool:
52 functions = contract.all_functions_called
53 to_explore = functions
54 explored = []
55 while to_explore: # pylint: disable=too-many-nested-blocks
56 functions = to_explore
57 explored += to_explore
58 to_explore = []
59 for function in functions:
60 calls = [c.name for c in function.internal_calls]
61 if "suicide(address)" in calls or "selfdestruct(address)" in calls:
62 return False
63 for node in function.nodes:
64 for ir in node.irs:
65 if isinstance(
66 ir,
67 (Send, Transfer, HighLevelCall, LowLevelCall, NewContract),
68 ):
69 if ir.call_value and ir.call_value != 0:
70 return False
71 if isinstance(ir, (LowLevelCall)):
72 if ir.function_name in ["delegatecall", "callcode"]:
73 return False
74 # If a new internal call or librarycall
75 # Add it to the list to explore
76 # InternalCall if to follow internal call in libraries
77 if isinstance(ir, (InternalCall, LibraryCall)):
78 if not ir.function in explored:
79 to_explore.append(ir.function)
80
81 return True
82
83 def _detect(self) -> List[Output]:
84 results = []
85
86 for contract in self.compilation_unit.contracts_derived:
87 if contract.is_signature_only():
88 continue
89 funcs_payable = [function for function in contract.functions if function.payable]
90 if funcs_payable:
91 if self.do_no_send_ether(contract):
92 info: DETECTOR_INFO = ["Contract locking ether found:\n"]
93 info += ["\tContract ", contract, " has payable functions:\n"]
94 for function in funcs_payable:
95 info += ["\t - ", function, "\n"]
96 info += "\tBut does not have a function to withdraw the ether\n"
97
98 json = self.generate_result(info)
99
100 results.append(json)
101
102 return results
103
[end of slither/detectors/attributes/locked_ether.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/slither/detectors/attributes/locked_ether.py b/slither/detectors/attributes/locked_ether.py
--- a/slither/detectors/attributes/locked_ether.py
+++ b/slither/detectors/attributes/locked_ether.py
@@ -3,7 +3,7 @@
"""
from typing import List
-from slither.core.declarations.contract import Contract
+from slither.core.declarations import Contract, SolidityFunction
from slither.detectors.abstract_detector import (
AbstractDetector,
DetectorClassification,
@@ -17,7 +17,9 @@
NewContract,
LibraryCall,
InternalCall,
+ SolidityCall,
)
+from slither.slithir.variables import Constant
from slither.utils.output import Output
@@ -68,8 +70,28 @@
):
if ir.call_value and ir.call_value != 0:
return False
- if isinstance(ir, (LowLevelCall)):
- if ir.function_name in ["delegatecall", "callcode"]:
+ if isinstance(ir, (LowLevelCall)) and ir.function_name in [
+ "delegatecall",
+ "callcode",
+ ]:
+ return False
+ if isinstance(ir, SolidityCall):
+ call_can_send_ether = ir.function in [
+ SolidityFunction(
+ "delegatecall(uint256,uint256,uint256,uint256,uint256,uint256)"
+ ),
+ SolidityFunction(
+ "callcode(uint256,uint256,uint256,uint256,uint256,uint256,uint256)"
+ ),
+ SolidityFunction(
+ "call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)"
+ ),
+ ]
+ nonzero_call_value = call_can_send_ether and (
+ not isinstance(ir.arguments[2], Constant)
+ or ir.arguments[2].value != 0
+ )
+ if nonzero_call_value:
return False
# If a new internal call or librarycall
# Add it to the list to explore
| {"golden_diff": "diff --git a/slither/detectors/attributes/locked_ether.py b/slither/detectors/attributes/locked_ether.py\n--- a/slither/detectors/attributes/locked_ether.py\n+++ b/slither/detectors/attributes/locked_ether.py\n@@ -3,7 +3,7 @@\n \"\"\"\n from typing import List\n \n-from slither.core.declarations.contract import Contract\n+from slither.core.declarations import Contract, SolidityFunction\n from slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n@@ -17,7 +17,9 @@\n NewContract,\n LibraryCall,\n InternalCall,\n+ SolidityCall,\n )\n+from slither.slithir.variables import Constant\n from slither.utils.output import Output\n \n \n@@ -68,8 +70,28 @@\n ):\n if ir.call_value and ir.call_value != 0:\n return False\n- if isinstance(ir, (LowLevelCall)):\n- if ir.function_name in [\"delegatecall\", \"callcode\"]:\n+ if isinstance(ir, (LowLevelCall)) and ir.function_name in [\n+ \"delegatecall\",\n+ \"callcode\",\n+ ]:\n+ return False\n+ if isinstance(ir, SolidityCall):\n+ call_can_send_ether = ir.function in [\n+ SolidityFunction(\n+ \"delegatecall(uint256,uint256,uint256,uint256,uint256,uint256)\"\n+ ),\n+ SolidityFunction(\n+ \"callcode(uint256,uint256,uint256,uint256,uint256,uint256,uint256)\"\n+ ),\n+ SolidityFunction(\n+ \"call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)\"\n+ ),\n+ ]\n+ nonzero_call_value = call_can_send_ether and (\n+ not isinstance(ir.arguments[2], Constant)\n+ or ir.arguments[2].value != 0\n+ )\n+ if nonzero_call_value:\n return False\n # If a new internal call or librarycall\n # Add it to the list to explore\n", "issue": "[Bug] contract reports ether as locked when ether is sent in Yul\nThe following contract reports ether as locked despite it being sent in a Yul block\r\n```\r\ncontract FPLockedEther {\r\n receive() payable external {}\r\n\r\n function yulSendEther() external {\r\n bool success;\r\n assembly {\r\n success := call(gas(), caller(), balance(address()), 0,0,0,0)\r\n }\r\n }\r\n}\r\n```\r\n```\r\nContract locking ether found:\r\n\tContract FPLockedEther (locked-ether.sol#1-13) has payable functions:\r\n\t - FPLockedEther.receive() (locked-ether.sol#2-3)\r\n\tBut does not have a function to withdraw the ether\r\nReference: https://github.com/crytic/slither/wiki/Detector-Documentation#contracts-that-lock-ether\r\n```\r\n\r\nIt could be that the IR is incorrect here as it should not be a `SOLIDITY_CALL`\r\n```\r\nContract FPLockedEther\r\n\tFunction FPLockedEther.receive() (*)\r\n\tFunction FPLockedEther.yulSendEther() (*)\r\n\t\tExpression: success = call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)(gas()(),caller()(),balance(uint256)(address()()),0,0,0,0)\r\n\t\tIRs:\r\n\t\t\tTMP_0(uint256) = SOLIDITY_CALL gas()()\r\n\t\t\tTMP_1(address) := msg.sender(address)\r\n\t\t\tTMP_2 = CONVERT this to address\r\n\t\t\tTMP_3(uint256) = SOLIDITY_CALL balance(uint256)(TMP_2)\r\n\t\t\tTMP_4(uint256) = SOLIDITY_CALL call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)(TMP_0,TMP_1,TMP_3,0,0,0,0)\r\n\t\t\tsuccess(bool) := TMP_4(uint256)\r\n```\n", "before_files": [{"content": "\"\"\"\n Check if ethers are locked in the contract\n\"\"\"\nfrom typing import List\n\nfrom slither.core.declarations.contract import Contract\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.slithir.operations import (\n HighLevelCall,\n LowLevelCall,\n Send,\n Transfer,\n NewContract,\n LibraryCall,\n InternalCall,\n)\nfrom slither.utils.output import Output\n\n\nclass LockedEther(AbstractDetector): # pylint: disable=too-many-nested-blocks\n\n ARGUMENT = \"locked-ether\"\n HELP = \"Contracts that lock ether\"\n IMPACT = DetectorClassification.MEDIUM\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#contracts-that-lock-ether\"\n\n WIKI_TITLE = \"Contracts that lock Ether\"\n WIKI_DESCRIPTION = \"Contract with a `payable` function, but without a withdrawal capacity.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\npragma solidity 0.4.24;\ncontract Locked{\n function receive() payable public{\n }\n}\n```\nEvery Ether sent to `Locked` will be lost.\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Remove the payable attribute or add a withdraw function.\"\n\n @staticmethod\n def do_no_send_ether(contract: Contract) -> bool:\n functions = contract.all_functions_called\n to_explore = functions\n explored = []\n while to_explore: # pylint: disable=too-many-nested-blocks\n functions = to_explore\n explored += to_explore\n to_explore = []\n for function in functions:\n calls = [c.name for c in function.internal_calls]\n if \"suicide(address)\" in calls or \"selfdestruct(address)\" in calls:\n return False\n for node in function.nodes:\n for ir in node.irs:\n if isinstance(\n ir,\n (Send, Transfer, HighLevelCall, LowLevelCall, NewContract),\n ):\n if ir.call_value and ir.call_value != 0:\n return False\n if isinstance(ir, (LowLevelCall)):\n if ir.function_name in [\"delegatecall\", \"callcode\"]:\n return False\n # If a new internal call or librarycall\n # Add it to the list to explore\n # InternalCall if to follow internal call in libraries\n if isinstance(ir, (InternalCall, LibraryCall)):\n if not ir.function in explored:\n to_explore.append(ir.function)\n\n return True\n\n def _detect(self) -> List[Output]:\n results = []\n\n for contract in self.compilation_unit.contracts_derived:\n if contract.is_signature_only():\n continue\n funcs_payable = [function for function in contract.functions if function.payable]\n if funcs_payable:\n if self.do_no_send_ether(contract):\n info: DETECTOR_INFO = [\"Contract locking ether found:\\n\"]\n info += [\"\\tContract \", contract, \" has payable functions:\\n\"]\n for function in funcs_payable:\n info += [\"\\t - \", function, \"\\n\"]\n info += \"\\tBut does not have a function to withdraw the ether\\n\"\n\n json = self.generate_result(info)\n\n results.append(json)\n\n return results\n", "path": "slither/detectors/attributes/locked_ether.py"}]} | 1,927 | 486 |
gh_patches_debug_22330 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1744 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Markdown preview fails CSRF validation checks
Caused by the name change of the CSRF cookie.
</issue>
<code>
[start of app/grandchallenge/core/widgets.py]
1 from django import forms
2 from markdownx.widgets import AdminMarkdownxWidget, MarkdownxWidget
3
4
5 class JSONEditorWidget(forms.Textarea):
6 template_name = "jsoneditor/jsoneditor_widget.html"
7
8 def __init__(self, schema=None, attrs=None):
9 super().__init__(attrs)
10 self.schema = schema
11
12 def get_context(self, name, value, attrs):
13 context = super().get_context(name, value, attrs)
14 context.update({"schema": self.schema})
15 return context
16
17 class Media:
18 css = {
19 "all": (
20 "https://cdnjs.cloudflare.com/ajax/libs/jsoneditor/5.25.0/jsoneditor.min.css",
21 )
22 }
23 js = (
24 "https://cdnjs.cloudflare.com/ajax/libs/jsoneditor/5.25.0/jsoneditor.min.js",
25 )
26
27
28 class MarkdownEditorWidget(MarkdownxWidget):
29 class Media(MarkdownxWidget.Media):
30 js = [
31 *MarkdownxWidget.Media.js,
32 "vendor/js/markdown-toolbar-element/index.umd.js",
33 ]
34
35
36 class MarkdownEditorAdminWidget(AdminMarkdownxWidget):
37 class Media(AdminMarkdownxWidget.Media):
38 css = {
39 "all": [
40 *AdminMarkdownxWidget.Media.css["all"],
41 "vendor/css/base.min.css",
42 "vendor/fa/css/all.css",
43 ]
44 }
45 js = [
46 *AdminMarkdownxWidget.Media.js,
47 "vendor/js/markdown-toolbar-element/index.umd.js",
48 ]
49
[end of app/grandchallenge/core/widgets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/core/widgets.py b/app/grandchallenge/core/widgets.py
--- a/app/grandchallenge/core/widgets.py
+++ b/app/grandchallenge/core/widgets.py
@@ -26,23 +26,29 @@
class MarkdownEditorWidget(MarkdownxWidget):
- class Media(MarkdownxWidget.Media):
- js = [
- *MarkdownxWidget.Media.js,
- "vendor/js/markdown-toolbar-element/index.umd.js",
- ]
+ @property
+ def media(self):
+ return forms.Media(
+ js=(
+ "js/markdownx.js",
+ "vendor/js/markdown-toolbar-element/index.umd.js",
+ )
+ )
class MarkdownEditorAdminWidget(AdminMarkdownxWidget):
- class Media(AdminMarkdownxWidget.Media):
- css = {
- "all": [
- *AdminMarkdownxWidget.Media.css["all"],
- "vendor/css/base.min.css",
- "vendor/fa/css/all.css",
- ]
- }
- js = [
- *AdminMarkdownxWidget.Media.js,
- "vendor/js/markdown-toolbar-element/index.umd.js",
- ]
+ @property
+ def media(self):
+ return forms.Media(
+ css={
+ "all": [
+ *AdminMarkdownxWidget.Media.css["all"],
+ "vendor/css/base.min.css",
+ "vendor/fa/css/all.css",
+ ]
+ },
+ js=[
+ "js/markdownx.js",
+ "vendor/js/markdown-toolbar-element/index.umd.js",
+ ],
+ )
| {"golden_diff": "diff --git a/app/grandchallenge/core/widgets.py b/app/grandchallenge/core/widgets.py\n--- a/app/grandchallenge/core/widgets.py\n+++ b/app/grandchallenge/core/widgets.py\n@@ -26,23 +26,29 @@\n \n \n class MarkdownEditorWidget(MarkdownxWidget):\n- class Media(MarkdownxWidget.Media):\n- js = [\n- *MarkdownxWidget.Media.js,\n- \"vendor/js/markdown-toolbar-element/index.umd.js\",\n- ]\n+ @property\n+ def media(self):\n+ return forms.Media(\n+ js=(\n+ \"js/markdownx.js\",\n+ \"vendor/js/markdown-toolbar-element/index.umd.js\",\n+ )\n+ )\n \n \n class MarkdownEditorAdminWidget(AdminMarkdownxWidget):\n- class Media(AdminMarkdownxWidget.Media):\n- css = {\n- \"all\": [\n- *AdminMarkdownxWidget.Media.css[\"all\"],\n- \"vendor/css/base.min.css\",\n- \"vendor/fa/css/all.css\",\n- ]\n- }\n- js = [\n- *AdminMarkdownxWidget.Media.js,\n- \"vendor/js/markdown-toolbar-element/index.umd.js\",\n- ]\n+ @property\n+ def media(self):\n+ return forms.Media(\n+ css={\n+ \"all\": [\n+ *AdminMarkdownxWidget.Media.css[\"all\"],\n+ \"vendor/css/base.min.css\",\n+ \"vendor/fa/css/all.css\",\n+ ]\n+ },\n+ js=[\n+ \"js/markdownx.js\",\n+ \"vendor/js/markdown-toolbar-element/index.umd.js\",\n+ ],\n+ )\n", "issue": "Markdown preview fails CSRF validation checks\nCaused by the name change of the CSRF cookie.\n", "before_files": [{"content": "from django import forms\nfrom markdownx.widgets import AdminMarkdownxWidget, MarkdownxWidget\n\n\nclass JSONEditorWidget(forms.Textarea):\n template_name = \"jsoneditor/jsoneditor_widget.html\"\n\n def __init__(self, schema=None, attrs=None):\n super().__init__(attrs)\n self.schema = schema\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n context.update({\"schema\": self.schema})\n return context\n\n class Media:\n css = {\n \"all\": (\n \"https://cdnjs.cloudflare.com/ajax/libs/jsoneditor/5.25.0/jsoneditor.min.css\",\n )\n }\n js = (\n \"https://cdnjs.cloudflare.com/ajax/libs/jsoneditor/5.25.0/jsoneditor.min.js\",\n )\n\n\nclass MarkdownEditorWidget(MarkdownxWidget):\n class Media(MarkdownxWidget.Media):\n js = [\n *MarkdownxWidget.Media.js,\n \"vendor/js/markdown-toolbar-element/index.umd.js\",\n ]\n\n\nclass MarkdownEditorAdminWidget(AdminMarkdownxWidget):\n class Media(AdminMarkdownxWidget.Media):\n css = {\n \"all\": [\n *AdminMarkdownxWidget.Media.css[\"all\"],\n \"vendor/css/base.min.css\",\n \"vendor/fa/css/all.css\",\n ]\n }\n js = [\n *AdminMarkdownxWidget.Media.js,\n \"vendor/js/markdown-toolbar-element/index.umd.js\",\n ]\n", "path": "app/grandchallenge/core/widgets.py"}]} | 964 | 356 |
gh_patches_debug_7037 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-1080 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exception raised with sizeof of some spans
With `master`:
```
Exception in thread AgentWriter:
Traceback (most recent call last):
File "/opt/dogweb/pyenv/versions/2.7.12/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/opt/dogweb/pyenv/versions/2.7.12/lib/python2.7/threading.py", line 754, in run
self.__target(*self.__args, **self.__kwargs)
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/_worker.py", line 67, in _target
self.run_periodic()
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/internal/writer.py", line 65, in flush_queue
traces_queue_size = sum(map(sizeof.sizeof, traces))
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 31, in sizeof
return _sizeof(o)
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof
return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof
return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof
return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof
return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof
return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))
File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 8, in <genexpr>
return (getattr(o, slot) for slot in o.__slots__)
AttributeError: rate_limit
```
Might be an issue where the attribute is declared in `__slots__` but not set.
</issue>
<code>
[start of ddtrace/utils/sizeof.py]
1 import collections
2 import sys
3 from itertools import chain
4
5
6 def iter_object(o):
7 if hasattr(o, '__slots__'):
8 return (getattr(o, slot) for slot in o.__slots__)
9 elif hasattr(o, '__dict__'):
10 return list(o.__dict__.items())
11 elif isinstance(o, dict):
12 # Make a copy to avoid corruption
13 return chain.from_iterable(list(o.items()))
14 elif isinstance(o, (list, set, frozenset, tuple, collections.deque)):
15 # Make a copy to avoid corruption
16 return iter(list(o))
17 return []
18
19
20 def sizeof(o):
21 """Returns the approximate memory footprint an object and all of its contents."""
22 seen = set()
23
24 def _sizeof(o):
25 # do not double count the same object
26 if id(o) in seen:
27 return 0
28 seen.add(id(o))
29 return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))
30
31 return _sizeof(o)
32
[end of ddtrace/utils/sizeof.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/utils/sizeof.py b/ddtrace/utils/sizeof.py
--- a/ddtrace/utils/sizeof.py
+++ b/ddtrace/utils/sizeof.py
@@ -2,10 +2,16 @@
import sys
from itertools import chain
+_UNSET = object()
+
def iter_object(o):
if hasattr(o, '__slots__'):
- return (getattr(o, slot) for slot in o.__slots__)
+ return (
+ s
+ for s in (getattr(o, slot, _UNSET) for slot in o.__slots__)
+ if s != _UNSET
+ )
elif hasattr(o, '__dict__'):
return list(o.__dict__.items())
elif isinstance(o, dict):
| {"golden_diff": "diff --git a/ddtrace/utils/sizeof.py b/ddtrace/utils/sizeof.py\n--- a/ddtrace/utils/sizeof.py\n+++ b/ddtrace/utils/sizeof.py\n@@ -2,10 +2,16 @@\n import sys\n from itertools import chain\n \n+_UNSET = object()\n+\n \n def iter_object(o):\n if hasattr(o, '__slots__'):\n- return (getattr(o, slot) for slot in o.__slots__)\n+ return (\n+ s\n+ for s in (getattr(o, slot, _UNSET) for slot in o.__slots__)\n+ if s != _UNSET\n+ )\n elif hasattr(o, '__dict__'):\n return list(o.__dict__.items())\n elif isinstance(o, dict):\n", "issue": "Exception raised with sizeof of some spans\nWith `master`:\r\n\r\n```\r\nException in thread AgentWriter:\r\nTraceback (most recent call last):\r\n File \"/opt/dogweb/pyenv/versions/2.7.12/lib/python2.7/threading.py\", line 801, in __bootstrap_inner\r\n self.run()\r\n File \"/opt/dogweb/pyenv/versions/2.7.12/lib/python2.7/threading.py\", line 754, in run\r\n self.__target(*self.__args, **self.__kwargs)\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/_worker.py\", line 67, in _target\r\n self.run_periodic()\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/internal/writer.py\", line 65, in flush_queue\r\n traces_queue_size = sum(map(sizeof.sizeof, traces))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 31, in sizeof\r\n return _sizeof(o)\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 8, in <genexpr>\r\n return (getattr(o, slot) for slot in o.__slots__)\r\nAttributeError: rate_limit\r\n```\r\n\r\nMight be an issue where the attribute is declared in `__slots__` but not set.\n", "before_files": [{"content": "import collections\nimport sys\nfrom itertools import chain\n\n\ndef iter_object(o):\n if hasattr(o, '__slots__'):\n return (getattr(o, slot) for slot in o.__slots__)\n elif hasattr(o, '__dict__'):\n return list(o.__dict__.items())\n elif isinstance(o, dict):\n # Make a copy to avoid corruption\n return chain.from_iterable(list(o.items()))\n elif isinstance(o, (list, set, frozenset, tuple, collections.deque)):\n # Make a copy to avoid corruption\n return iter(list(o))\n return []\n\n\ndef sizeof(o):\n \"\"\"Returns the approximate memory footprint an object and all of its contents.\"\"\"\n seen = set()\n\n def _sizeof(o):\n # do not double count the same object\n if id(o) in seen:\n return 0\n seen.add(id(o))\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\n\n return _sizeof(o)\n", "path": "ddtrace/utils/sizeof.py"}]} | 1,346 | 162 |
gh_patches_debug_5790 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-175 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GCE metadata ping taking ~75 seconds, not timing out after 3 seconds
google-api-python-client: 1.6.2
python version: 3.5.2
```
from google.auth.compute_engine import _metadata
import google.auth.transport._http_client
request = google.auth.transport._http_client.Request()
_metadata.ping(request=request)
```
When running the following script, the default timeout of 3 seconds is not used. I'm instead seeing the script complete in about 75 seconds. If I print out the value of line 4, I get `False`. I'm not sure if that indicates that the call was successful, or if a timeout had occurred. I see this take ~75 seconds consistently If I set the environment variable `GCE_METADATA_TIMEOUT` to a value such as 10, the script still takes ~75 seconds.
I'm running into this issue when using the pandas-gbq library. I'm writing a script to authorize pandas to access the gbq project. That library checks for the default google credentials, which do not exist in my case. [Here's my related issue with that library](https://github.com/pydata/pandas-gbq/issues/73).
Is this an issue with the google-auth library?
</issue>
<code>
[start of google/auth/transport/_http_client.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Transport adapter for http.client, for internal use only."""
16
17 import logging
18 import socket
19
20 from six.moves import http_client
21 from six.moves import urllib
22
23 from google.auth import exceptions
24 from google.auth import transport
25
26 _LOGGER = logging.getLogger(__name__)
27
28
29 class Response(transport.Response):
30 """http.client transport response adapter.
31
32 Args:
33 response (http.client.HTTPResponse): The raw http client response.
34 """
35 def __init__(self, response):
36 self._status = response.status
37 self._headers = {
38 key.lower(): value for key, value in response.getheaders()}
39 self._data = response.read()
40
41 @property
42 def status(self):
43 return self._status
44
45 @property
46 def headers(self):
47 return self._headers
48
49 @property
50 def data(self):
51 return self._data
52
53
54 class Request(transport.Request):
55 """http.client transport request adapter."""
56
57 def __call__(self, url, method='GET', body=None, headers=None,
58 timeout=None, **kwargs):
59 """Make an HTTP request using http.client.
60
61 Args:
62 url (str): The URI to be requested.
63 method (str): The HTTP method to use for the request. Defaults
64 to 'GET'.
65 body (bytes): The payload / body in HTTP request.
66 headers (Mapping): Request headers.
67 timeout (Optional(int)): The number of seconds to wait for a
68 response from the server. If not specified or if None, the
69 socket global default timeout will be used.
70 kwargs: Additional arguments passed throught to the underlying
71 :meth:`~http.client.HTTPConnection.request` method.
72
73 Returns:
74 Response: The HTTP response.
75
76 Raises:
77 google.auth.exceptions.TransportError: If any exception occurred.
78 """
79 # socket._GLOBAL_DEFAULT_TIMEOUT is the default in http.client.
80 if timeout is None:
81 timeout = socket._GLOBAL_DEFAULT_TIMEOUT
82
83 # http.client doesn't allow None as the headers argument.
84 if headers is None:
85 headers = {}
86
87 # http.client needs the host and path parts specified separately.
88 parts = urllib.parse.urlsplit(url)
89 path = urllib.parse.urlunsplit(
90 ('', '', parts.path, parts.query, parts.fragment))
91
92 if parts.scheme != 'http':
93 raise exceptions.TransportError(
94 'http.client transport only supports the http scheme, {}'
95 'was specified'.format(parts.scheme))
96
97 connection = http_client.HTTPConnection(parts.netloc)
98
99 try:
100 _LOGGER.debug('Making request: %s %s', method, url)
101
102 connection.request(
103 method, path, body=body, headers=headers, **kwargs)
104 response = connection.getresponse()
105 return Response(response)
106
107 except (http_client.HTTPException, socket.error) as exc:
108 raise exceptions.TransportError(exc)
109
110 finally:
111 connection.close()
112
[end of google/auth/transport/_http_client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/google/auth/transport/_http_client.py b/google/auth/transport/_http_client.py
--- a/google/auth/transport/_http_client.py
+++ b/google/auth/transport/_http_client.py
@@ -94,7 +94,7 @@
'http.client transport only supports the http scheme, {}'
'was specified'.format(parts.scheme))
- connection = http_client.HTTPConnection(parts.netloc)
+ connection = http_client.HTTPConnection(parts.netloc, timeout=timeout)
try:
_LOGGER.debug('Making request: %s %s', method, url)
| {"golden_diff": "diff --git a/google/auth/transport/_http_client.py b/google/auth/transport/_http_client.py\n--- a/google/auth/transport/_http_client.py\n+++ b/google/auth/transport/_http_client.py\n@@ -94,7 +94,7 @@\n 'http.client transport only supports the http scheme, {}'\n 'was specified'.format(parts.scheme))\n \n- connection = http_client.HTTPConnection(parts.netloc)\n+ connection = http_client.HTTPConnection(parts.netloc, timeout=timeout)\n \n try:\n _LOGGER.debug('Making request: %s %s', method, url)\n", "issue": "GCE metadata ping taking ~75 seconds, not timing out after 3 seconds\ngoogle-api-python-client: 1.6.2\r\npython version: 3.5.2\r\n\r\n```\r\nfrom google.auth.compute_engine import _metadata\r\nimport google.auth.transport._http_client\r\nrequest = google.auth.transport._http_client.Request()\r\n_metadata.ping(request=request)\r\n```\r\n\r\nWhen running the following script, the default timeout of 3 seconds is not used. I'm instead seeing the script complete in about 75 seconds. If I print out the value of line 4, I get `False`. I'm not sure if that indicates that the call was successful, or if a timeout had occurred. I see this take ~75 seconds consistently If I set the environment variable `GCE_METADATA_TIMEOUT` to a value such as 10, the script still takes ~75 seconds.\r\n\r\nI'm running into this issue when using the pandas-gbq library. I'm writing a script to authorize pandas to access the gbq project. That library checks for the default google credentials, which do not exist in my case. [Here's my related issue with that library](https://github.com/pydata/pandas-gbq/issues/73).\r\n\r\nIs this an issue with the google-auth library?\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transport adapter for http.client, for internal use only.\"\"\"\n\nimport logging\nimport socket\n\nfrom six.moves import http_client\nfrom six.moves import urllib\n\nfrom google.auth import exceptions\nfrom google.auth import transport\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Response(transport.Response):\n \"\"\"http.client transport response adapter.\n\n Args:\n response (http.client.HTTPResponse): The raw http client response.\n \"\"\"\n def __init__(self, response):\n self._status = response.status\n self._headers = {\n key.lower(): value for key, value in response.getheaders()}\n self._data = response.read()\n\n @property\n def status(self):\n return self._status\n\n @property\n def headers(self):\n return self._headers\n\n @property\n def data(self):\n return self._data\n\n\nclass Request(transport.Request):\n \"\"\"http.client transport request adapter.\"\"\"\n\n def __call__(self, url, method='GET', body=None, headers=None,\n timeout=None, **kwargs):\n \"\"\"Make an HTTP request using http.client.\n\n Args:\n url (str): The URI to be requested.\n method (str): The HTTP method to use for the request. Defaults\n to 'GET'.\n body (bytes): The payload / body in HTTP request.\n headers (Mapping): Request headers.\n timeout (Optional(int)): The number of seconds to wait for a\n response from the server. If not specified or if None, the\n socket global default timeout will be used.\n kwargs: Additional arguments passed throught to the underlying\n :meth:`~http.client.HTTPConnection.request` method.\n\n Returns:\n Response: The HTTP response.\n\n Raises:\n google.auth.exceptions.TransportError: If any exception occurred.\n \"\"\"\n # socket._GLOBAL_DEFAULT_TIMEOUT is the default in http.client.\n if timeout is None:\n timeout = socket._GLOBAL_DEFAULT_TIMEOUT\n\n # http.client doesn't allow None as the headers argument.\n if headers is None:\n headers = {}\n\n # http.client needs the host and path parts specified separately.\n parts = urllib.parse.urlsplit(url)\n path = urllib.parse.urlunsplit(\n ('', '', parts.path, parts.query, parts.fragment))\n\n if parts.scheme != 'http':\n raise exceptions.TransportError(\n 'http.client transport only supports the http scheme, {}'\n 'was specified'.format(parts.scheme))\n\n connection = http_client.HTTPConnection(parts.netloc)\n\n try:\n _LOGGER.debug('Making request: %s %s', method, url)\n\n connection.request(\n method, path, body=body, headers=headers, **kwargs)\n response = connection.getresponse()\n return Response(response)\n\n except (http_client.HTTPException, socket.error) as exc:\n raise exceptions.TransportError(exc)\n\n finally:\n connection.close()\n", "path": "google/auth/transport/_http_client.py"}]} | 1,783 | 129 |
gh_patches_debug_30707 | rasdani/github-patches | git_diff | encode__starlette-1147 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Session cookie should use root path
The session cookie currently uses '/'.
It should really use the ASGI root path instead, in case the application is submounted.
</issue>
<code>
[start of starlette/middleware/sessions.py]
1 import json
2 import typing
3 from base64 import b64decode, b64encode
4
5 import itsdangerous
6 from itsdangerous.exc import BadTimeSignature, SignatureExpired
7
8 from starlette.datastructures import MutableHeaders, Secret
9 from starlette.requests import HTTPConnection
10 from starlette.types import ASGIApp, Message, Receive, Scope, Send
11
12
13 class SessionMiddleware:
14 def __init__(
15 self,
16 app: ASGIApp,
17 secret_key: typing.Union[str, Secret],
18 session_cookie: str = "session",
19 max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds
20 same_site: str = "lax",
21 https_only: bool = False,
22 ) -> None:
23 self.app = app
24 self.signer = itsdangerous.TimestampSigner(str(secret_key))
25 self.session_cookie = session_cookie
26 self.max_age = max_age
27 self.security_flags = "httponly; samesite=" + same_site
28 if https_only: # Secure flag can be used with HTTPS only
29 self.security_flags += "; secure"
30
31 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
32 if scope["type"] not in ("http", "websocket"): # pragma: no cover
33 await self.app(scope, receive, send)
34 return
35
36 connection = HTTPConnection(scope)
37 initial_session_was_empty = True
38
39 if self.session_cookie in connection.cookies:
40 data = connection.cookies[self.session_cookie].encode("utf-8")
41 try:
42 data = self.signer.unsign(data, max_age=self.max_age)
43 scope["session"] = json.loads(b64decode(data))
44 initial_session_was_empty = False
45 except (BadTimeSignature, SignatureExpired):
46 scope["session"] = {}
47 else:
48 scope["session"] = {}
49
50 async def send_wrapper(message: Message) -> None:
51 if message["type"] == "http.response.start":
52 if scope["session"]:
53 # We have session data to persist.
54 data = b64encode(json.dumps(scope["session"]).encode("utf-8"))
55 data = self.signer.sign(data)
56 headers = MutableHeaders(scope=message)
57 header_value = "%s=%s; path=/; Max-Age=%d; %s" % (
58 self.session_cookie,
59 data.decode("utf-8"),
60 self.max_age,
61 self.security_flags,
62 )
63 headers.append("Set-Cookie", header_value)
64 elif not initial_session_was_empty:
65 # The session has been cleared.
66 headers = MutableHeaders(scope=message)
67 header_value = "{}={}; {}".format(
68 self.session_cookie,
69 "null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;",
70 self.security_flags,
71 )
72 headers.append("Set-Cookie", header_value)
73 await send(message)
74
75 await self.app(scope, receive, send_wrapper)
76
[end of starlette/middleware/sessions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlette/middleware/sessions.py b/starlette/middleware/sessions.py
--- a/starlette/middleware/sessions.py
+++ b/starlette/middleware/sessions.py
@@ -49,14 +49,16 @@
async def send_wrapper(message: Message) -> None:
if message["type"] == "http.response.start":
+ path = scope.get("root_path", "") or "/"
if scope["session"]:
# We have session data to persist.
data = b64encode(json.dumps(scope["session"]).encode("utf-8"))
data = self.signer.sign(data)
headers = MutableHeaders(scope=message)
- header_value = "%s=%s; path=/; Max-Age=%d; %s" % (
+ header_value = "%s=%s; path=%s; Max-Age=%d; %s" % (
self.session_cookie,
data.decode("utf-8"),
+ path,
self.max_age,
self.security_flags,
)
@@ -66,7 +68,7 @@
headers = MutableHeaders(scope=message)
header_value = "{}={}; {}".format(
self.session_cookie,
- "null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;",
+ f"null; path={path}; expires=Thu, 01 Jan 1970 00:00:00 GMT;",
self.security_flags,
)
headers.append("Set-Cookie", header_value)
| {"golden_diff": "diff --git a/starlette/middleware/sessions.py b/starlette/middleware/sessions.py\n--- a/starlette/middleware/sessions.py\n+++ b/starlette/middleware/sessions.py\n@@ -49,14 +49,16 @@\n \n async def send_wrapper(message: Message) -> None:\n if message[\"type\"] == \"http.response.start\":\n+ path = scope.get(\"root_path\", \"\") or \"/\"\n if scope[\"session\"]:\n # We have session data to persist.\n data = b64encode(json.dumps(scope[\"session\"]).encode(\"utf-8\"))\n data = self.signer.sign(data)\n headers = MutableHeaders(scope=message)\n- header_value = \"%s=%s; path=/; Max-Age=%d; %s\" % (\n+ header_value = \"%s=%s; path=%s; Max-Age=%d; %s\" % (\n self.session_cookie,\n data.decode(\"utf-8\"),\n+ path,\n self.max_age,\n self.security_flags,\n )\n@@ -66,7 +68,7 @@\n headers = MutableHeaders(scope=message)\n header_value = \"{}={}; {}\".format(\n self.session_cookie,\n- \"null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;\",\n+ f\"null; path={path}; expires=Thu, 01 Jan 1970 00:00:00 GMT;\",\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n", "issue": "Session cookie should use root path\nThe session cookie currently uses '/'.\r\nIt should really use the ASGI root path instead, in case the application is submounted.\n", "before_files": [{"content": "import json\nimport typing\nfrom base64 import b64decode, b64encode\n\nimport itsdangerous\nfrom itsdangerous.exc import BadTimeSignature, SignatureExpired\n\nfrom starlette.datastructures import MutableHeaders, Secret\nfrom starlette.requests import HTTPConnection\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\n\nclass SessionMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n secret_key: typing.Union[str, Secret],\n session_cookie: str = \"session\",\n max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds\n same_site: str = \"lax\",\n https_only: bool = False,\n ) -> None:\n self.app = app\n self.signer = itsdangerous.TimestampSigner(str(secret_key))\n self.session_cookie = session_cookie\n self.max_age = max_age\n self.security_flags = \"httponly; samesite=\" + same_site\n if https_only: # Secure flag can be used with HTTPS only\n self.security_flags += \"; secure\"\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] not in (\"http\", \"websocket\"): # pragma: no cover\n await self.app(scope, receive, send)\n return\n\n connection = HTTPConnection(scope)\n initial_session_was_empty = True\n\n if self.session_cookie in connection.cookies:\n data = connection.cookies[self.session_cookie].encode(\"utf-8\")\n try:\n data = self.signer.unsign(data, max_age=self.max_age)\n scope[\"session\"] = json.loads(b64decode(data))\n initial_session_was_empty = False\n except (BadTimeSignature, SignatureExpired):\n scope[\"session\"] = {}\n else:\n scope[\"session\"] = {}\n\n async def send_wrapper(message: Message) -> None:\n if message[\"type\"] == \"http.response.start\":\n if scope[\"session\"]:\n # We have session data to persist.\n data = b64encode(json.dumps(scope[\"session\"]).encode(\"utf-8\"))\n data = self.signer.sign(data)\n headers = MutableHeaders(scope=message)\n header_value = \"%s=%s; path=/; Max-Age=%d; %s\" % (\n self.session_cookie,\n data.decode(\"utf-8\"),\n self.max_age,\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n elif not initial_session_was_empty:\n # The session has been cleared.\n headers = MutableHeaders(scope=message)\n header_value = \"{}={}; {}\".format(\n self.session_cookie,\n \"null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;\",\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n await send(message)\n\n await self.app(scope, receive, send_wrapper)\n", "path": "starlette/middleware/sessions.py"}]} | 1,375 | 341 |
gh_patches_debug_16146 | rasdani/github-patches | git_diff | Nitrate__Nitrate-166 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installing files under /etc yields SandboxViolation in virtualenv
In one of my environments installing a newer version of Nitrate yields:
```
remote: error: Setup script exited with error: SandboxViolation: open('/etc/httpd/conf.d/nitrate-httpd.conf', 'wb') {}
```
this is due to the change made in ff4ecc75 adding the `data_files` attribute to `setup.py`. I propose installing these files under /etc via the RPM packages, not via pip.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 import os
4 from setuptools import setup, find_packages
5
6 import tcms
7
8
9 def get_install_requires():
10 requires = []
11 links = []
12 with open('requirements/base.txt', 'r') as f:
13 for line in f:
14 dep_line = line.strip()
15 parts = dep_line.split('#egg=')
16 if len(parts) == 2:
17 links.append(dep_line)
18 requires.append(parts[1])
19 else:
20 requires.append(dep_line)
21 return requires, links
22
23 install_requires, dependency_links = get_install_requires()
24
25
26 def get_long_description():
27 with open('README.rst', 'r') as f:
28 return f.read()
29
30
31 setup(
32 name='nitrate',
33 version=tcms.__version__,
34 description='Test Case Management System',
35 long_description=get_long_description(),
36 author='Nitrate Team',
37 maintainer='Chenxiong Qi',
38 maintainer_email='[email protected]',
39 url='https://github.com/Nitrate/Nitrate/',
40 license='GPLv2+',
41 keywords='test case',
42
43 install_requires=install_requires,
44 dependency_links=dependency_links,
45
46 packages=find_packages(),
47 include_package_data=True,
48 data_files=[
49 ('/etc/httpd/conf.d/', ['contrib/conf/nitrate-httpd.conf']),
50 ('/etc/init.d', ['contrib/script/celeryd']),
51 ],
52
53 classifiers=[
54 'Intended Audience :: Developers',
55 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
56 'Programming Language :: Python :: 2',
57 'Programming Language :: Python :: 2.7',
58 'Topic :: Software Development :: Quality Assurance',
59 'Topic :: Software Development :: Testing',
60 ],
61 )
62
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-import os
from setuptools import setup, find_packages
import tcms
@@ -20,6 +19,7 @@
requires.append(dep_line)
return requires, links
+
install_requires, dependency_links = get_install_requires()
@@ -45,10 +45,6 @@
packages=find_packages(),
include_package_data=True,
- data_files=[
- ('/etc/httpd/conf.d/', ['contrib/conf/nitrate-httpd.conf']),
- ('/etc/init.d', ['contrib/script/celeryd']),
- ],
classifiers=[
'Intended Audience :: Developers',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,5 @@\n # -*- coding: utf-8 -*-\n \n-import os\n from setuptools import setup, find_packages\n \n import tcms\n@@ -20,6 +19,7 @@\n requires.append(dep_line)\n return requires, links\n \n+\n install_requires, dependency_links = get_install_requires()\n \n \n@@ -45,10 +45,6 @@\n \n packages=find_packages(),\n include_package_data=True,\n- data_files=[\n- ('/etc/httpd/conf.d/', ['contrib/conf/nitrate-httpd.conf']),\n- ('/etc/init.d', ['contrib/script/celeryd']),\n- ],\n \n classifiers=[\n 'Intended Audience :: Developers',\n", "issue": "Installing files under /etc yields SandboxViolation in virtualenv\nIn one of my environments installing a newer version of Nitrate yields:\r\n```\r\nremote: error: Setup script exited with error: SandboxViolation: open('/etc/httpd/conf.d/nitrate-httpd.conf', 'wb') {}\r\n```\r\n\r\nthis is due to the change made in ff4ecc75 adding the `data_files` attribute to `setup.py`. I propose installing these files under /etc via the RPM packages, not via pip. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\nfrom setuptools import setup, find_packages\n\nimport tcms\n\n\ndef get_install_requires():\n requires = []\n links = []\n with open('requirements/base.txt', 'r') as f:\n for line in f:\n dep_line = line.strip()\n parts = dep_line.split('#egg=')\n if len(parts) == 2:\n links.append(dep_line)\n requires.append(parts[1])\n else:\n requires.append(dep_line)\n return requires, links\n\ninstall_requires, dependency_links = get_install_requires()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\nsetup(\n name='nitrate',\n version=tcms.__version__,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n\n install_requires=install_requires,\n dependency_links=dependency_links,\n\n packages=find_packages(),\n include_package_data=True,\n data_files=[\n ('/etc/httpd/conf.d/', ['contrib/conf/nitrate-httpd.conf']),\n ('/etc/init.d', ['contrib/script/celeryd']),\n ],\n\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}]} | 1,134 | 172 |
gh_patches_debug_34419 | rasdani/github-patches | git_diff | intel__dffml-567 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
docs: operations: model_predict example usage
We need a doctestable example for the `model_predict`/`dffml.model.predict` operation.
References: https://intel.github.io/dffml/master/plugins/dffml_operation.html#dffml-model-predict
</issue>
<code>
[start of dffml/operation/model.py]
1 from typing import Dict, Any
2
3 from ..record import Record
4 from ..base import config
5 from ..model import Model
6 from ..df.types import Definition
7 from ..df.base import op
8
9
10 @config
11 class ModelPredictConfig:
12 model: Model
13
14 def __post_init__(self):
15 if not isinstance(self.model, Model):
16 raise TypeError(
17 "model should be an instance of `dffml.model.model.Model`"
18 )
19
20
21 @op(
22 name="dffml.model.predict",
23 inputs={
24 "features": Definition(
25 name="record_features", primitive="Dict[str, Any]"
26 )
27 },
28 outputs={
29 "prediction": Definition(
30 name="model_predictions", primitive="Dict[str, Any]"
31 )
32 },
33 config_cls=ModelPredictConfig,
34 imp_enter={"model": (lambda self: self.config.model)},
35 ctx_enter={"mctx": (lambda self: self.parent.model())},
36 )
37 async def model_predict(self, features: Dict[str, Any]) -> Dict[str, Any]:
38 async def records():
39 yield Record("", data={"features": features})
40
41 async for record in self.mctx.predict(records()):
42 return {"prediction": record.predictions()}
43
[end of dffml/operation/model.py]
[start of docs/doctest_header.py]
1 # This file is used as a header in every file that is created to run each
2 # example when the doctests are run.
3 import os
4 import sys
5 import shutil
6 import atexit
7 import inspect
8 import asyncio
9 import tempfile
10 import builtins
11 import functools
12 from unittest import mock
13
14 # Create a temporary directory for test to run in
15 DOCTEST_TEMPDIR = tempfile.mkdtemp()
16 # Remove it when the test exits
17 atexit.register(functools.partial(shutil.rmtree, DOCTEST_TEMPDIR))
18 # Change the current working directory to the temporary directory
19 os.chdir(DOCTEST_TEMPDIR)
20
21 from dffml import *
22 from dffml.base import *
23 from dffml.record import *
24 from dffml.df.base import *
25 from dffml.df.types import *
26 from dffml.util.net import *
27 from dffml.df.memory import *
28 from dffml_model_scikit import *
29 from dffml.operation.io import *
30 from dffml.source.memory import *
31 from dffml.operation.output import *
32 from dffml.operation.dataflow import *
33 from dffml.operation.preprocess import *
34 from dffml.operation.mapping import *
35
36 # Used for mocking input() for AcceptUserInput operation.
37 mock.patch("builtins.input", return_value="Data flow is awesome").start()
38
[end of docs/doctest_header.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dffml/operation/model.py b/dffml/operation/model.py
--- a/dffml/operation/model.py
+++ b/dffml/operation/model.py
@@ -35,6 +35,62 @@
ctx_enter={"mctx": (lambda self: self.parent.model())},
)
async def model_predict(self, features: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Predict using dffml models.
+
+ Parameters
+ ++++++++++
+ features : dict
+ A dictionary contaning feature name and feature value.
+
+ Returns
+ +++++++
+ dict
+ A dictionary containing prediction.
+
+ Examples
+ ++++++++
+
+ The following example shows how to use model_predict.
+
+ >>> slr_model = SLRModel(
+ ... features=Features(DefFeature("Years", int, 1)),
+ ... predict=DefFeature("Salary", int, 1),
+ ... )
+ >>> dataflow = DataFlow(
+ ... operations={
+ ... "prediction_using_model": model_predict,
+ ... "get_single": GetSingle,
+ ... },
+ ... configs={"prediction_using_model": ModelPredictConfig(model=slr_model)},
+ ... )
+ >>> dataflow.seed.append(
+ ... Input(
+ ... value=[model_predict.op.outputs["prediction"].name],
+ ... definition=GetSingle.op.inputs["spec"],
+ ... )
+ ... )
+ >>>
+ >>> async def main():
+ ... await train(
+ ... slr_model,
+ ... {"Years": 0, "Salary": 10},
+ ... {"Years": 1, "Salary": 20},
+ ... {"Years": 2, "Salary": 30},
+ ... {"Years": 3, "Salary": 40},
+ ... )
+ ... inputs = [
+ ... Input(
+ ... value={"Years": 4}, definition=model_predict.op.inputs["features"],
+ ... )
+ ... ]
+ ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):
+ ... print(results)
+ >>>
+ >>> asyncio.run(main())
+ {'model_predictions': {'Salary': {'confidence': 1.0, 'value': 50.0}}}
+ """
+
async def records():
yield Record("", data={"features": features})
diff --git a/docs/doctest_header.py b/docs/doctest_header.py
--- a/docs/doctest_header.py
+++ b/docs/doctest_header.py
@@ -25,9 +25,11 @@
from dffml.df.types import *
from dffml.util.net import *
from dffml.df.memory import *
+from dffml.model.slr import *
from dffml_model_scikit import *
from dffml.operation.io import *
from dffml.source.memory import *
+from dffml.operation.model import *
from dffml.operation.output import *
from dffml.operation.dataflow import *
from dffml.operation.preprocess import *
| {"golden_diff": "diff --git a/dffml/operation/model.py b/dffml/operation/model.py\n--- a/dffml/operation/model.py\n+++ b/dffml/operation/model.py\n@@ -35,6 +35,62 @@\n ctx_enter={\"mctx\": (lambda self: self.parent.model())},\n )\n async def model_predict(self, features: Dict[str, Any]) -> Dict[str, Any]:\n+ \"\"\"\n+ Predict using dffml models.\n+\n+ Parameters\n+ ++++++++++\n+ features : dict\n+ A dictionary contaning feature name and feature value.\n+\n+ Returns\n+ +++++++\n+ dict\n+ A dictionary containing prediction.\n+\n+ Examples\n+ ++++++++\n+\n+ The following example shows how to use model_predict.\n+\n+ >>> slr_model = SLRModel(\n+ ... features=Features(DefFeature(\"Years\", int, 1)),\n+ ... predict=DefFeature(\"Salary\", int, 1),\n+ ... )\n+ >>> dataflow = DataFlow(\n+ ... operations={\n+ ... \"prediction_using_model\": model_predict,\n+ ... \"get_single\": GetSingle,\n+ ... },\n+ ... configs={\"prediction_using_model\": ModelPredictConfig(model=slr_model)},\n+ ... )\n+ >>> dataflow.seed.append(\n+ ... Input(\n+ ... value=[model_predict.op.outputs[\"prediction\"].name],\n+ ... definition=GetSingle.op.inputs[\"spec\"],\n+ ... )\n+ ... )\n+ >>>\n+ >>> async def main():\n+ ... await train(\n+ ... slr_model,\n+ ... {\"Years\": 0, \"Salary\": 10},\n+ ... {\"Years\": 1, \"Salary\": 20},\n+ ... {\"Years\": 2, \"Salary\": 30},\n+ ... {\"Years\": 3, \"Salary\": 40},\n+ ... )\n+ ... inputs = [\n+ ... Input(\n+ ... value={\"Years\": 4}, definition=model_predict.op.inputs[\"features\"],\n+ ... )\n+ ... ]\n+ ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):\n+ ... print(results)\n+ >>>\n+ >>> asyncio.run(main())\n+ {'model_predictions': {'Salary': {'confidence': 1.0, 'value': 50.0}}}\n+ \"\"\"\n+\n async def records():\n yield Record(\"\", data={\"features\": features})\n \ndiff --git a/docs/doctest_header.py b/docs/doctest_header.py\n--- a/docs/doctest_header.py\n+++ b/docs/doctest_header.py\n@@ -25,9 +25,11 @@\n from dffml.df.types import *\n from dffml.util.net import *\n from dffml.df.memory import *\n+from dffml.model.slr import *\n from dffml_model_scikit import *\n from dffml.operation.io import *\n from dffml.source.memory import *\n+from dffml.operation.model import *\n from dffml.operation.output import *\n from dffml.operation.dataflow import *\n from dffml.operation.preprocess import *\n", "issue": "docs: operations: model_predict example usage\nWe need a doctestable example for the `model_predict`/`dffml.model.predict` operation.\r\n\r\nReferences: https://intel.github.io/dffml/master/plugins/dffml_operation.html#dffml-model-predict\n", "before_files": [{"content": "from typing import Dict, Any\n\nfrom ..record import Record\nfrom ..base import config\nfrom ..model import Model\nfrom ..df.types import Definition\nfrom ..df.base import op\n\n\n@config\nclass ModelPredictConfig:\n model: Model\n\n def __post_init__(self):\n if not isinstance(self.model, Model):\n raise TypeError(\n \"model should be an instance of `dffml.model.model.Model`\"\n )\n\n\n@op(\n name=\"dffml.model.predict\",\n inputs={\n \"features\": Definition(\n name=\"record_features\", primitive=\"Dict[str, Any]\"\n )\n },\n outputs={\n \"prediction\": Definition(\n name=\"model_predictions\", primitive=\"Dict[str, Any]\"\n )\n },\n config_cls=ModelPredictConfig,\n imp_enter={\"model\": (lambda self: self.config.model)},\n ctx_enter={\"mctx\": (lambda self: self.parent.model())},\n)\nasync def model_predict(self, features: Dict[str, Any]) -> Dict[str, Any]:\n async def records():\n yield Record(\"\", data={\"features\": features})\n\n async for record in self.mctx.predict(records()):\n return {\"prediction\": record.predictions()}\n", "path": "dffml/operation/model.py"}, {"content": "# This file is used as a header in every file that is created to run each\n# example when the doctests are run.\nimport os\nimport sys\nimport shutil\nimport atexit\nimport inspect\nimport asyncio\nimport tempfile\nimport builtins\nimport functools\nfrom unittest import mock\n\n# Create a temporary directory for test to run in\nDOCTEST_TEMPDIR = tempfile.mkdtemp()\n# Remove it when the test exits\natexit.register(functools.partial(shutil.rmtree, DOCTEST_TEMPDIR))\n# Change the current working directory to the temporary directory\nos.chdir(DOCTEST_TEMPDIR)\n\nfrom dffml import *\nfrom dffml.base import *\nfrom dffml.record import *\nfrom dffml.df.base import *\nfrom dffml.df.types import *\nfrom dffml.util.net import *\nfrom dffml.df.memory import *\nfrom dffml_model_scikit import *\nfrom dffml.operation.io import *\nfrom dffml.source.memory import *\nfrom dffml.operation.output import *\nfrom dffml.operation.dataflow import *\nfrom dffml.operation.preprocess import *\nfrom dffml.operation.mapping import *\n\n# Used for mocking input() for AcceptUserInput operation.\nmock.patch(\"builtins.input\", return_value=\"Data flow is awesome\").start()\n", "path": "docs/doctest_header.py"}]} | 1,282 | 708 |
gh_patches_debug_4622 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1083 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Migrate from bumpversion to bump2version
# Description
@dguest has brought to my attention that [`bumpversion` is no longer maintained](https://github.com/peritus/bumpversion) (as of apparently November 2019). Given this we should probably take the project's advice
> 🎬 If you want to start using `bumpversion`, you're best advised to install one of the maintained forks, e.g. ➡ @ c4urself's [`bump2version`](https://github.com/c4urself/bump2version/#installation).
given that it seems that [transferring ownership and maintainers is taking a very long time/might not happen](https://github.com/c4urself/bump2version/issues/86).
</issue>
<code>
[start of setup.py]
1 from setuptools import setup
2
3 extras_require = {
4 'shellcomplete': ['click_completion'],
5 'tensorflow': [
6 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major
7 'tensorflow-probability~=0.10.0',
8 ],
9 'torch': ['torch~=1.2'],
10 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
11 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes
12 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch
13 }
14 extras_require['backends'] = sorted(
15 set(
16 extras_require['tensorflow']
17 + extras_require['torch']
18 + extras_require['jax']
19 + extras_require['minuit']
20 )
21 )
22 extras_require['contrib'] = sorted(set(['matplotlib']))
23 extras_require['lint'] = sorted(set(['pyflakes', 'black']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + extras_require['shellcomplete']
31 + [
32 'pytest~=6.0',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=2.0',
41 'nteract-scrapbook~=0.2',
42 'jupyter',
43 'uproot~=3.3',
44 'graphviz',
45 'jsonpatch',
46 ]
47 )
48 )
49 extras_require['docs'] = sorted(
50 set(
51 [
52 'sphinx>=3.1.2',
53 'sphinxcontrib-bibtex',
54 'sphinx-click',
55 'sphinx_rtd_theme',
56 'nbsphinx',
57 'ipywidgets',
58 'sphinx-issues',
59 'sphinx-copybutton>0.2.9',
60 ]
61 )
62 )
63 extras_require['develop'] = sorted(
64 set(
65 extras_require['docs']
66 + extras_require['lint']
67 + extras_require['test']
68 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']
69 )
70 )
71 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
72
73
74 setup(
75 extras_require=extras_require,
76 use_scm_version=lambda: {'local_scheme': lambda version: ''},
77 )
78
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -65,7 +65,7 @@
extras_require['docs']
+ extras_require['lint']
+ extras_require['test']
- + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']
+ + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine']
)
)
extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,7 +65,7 @@\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n- + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n+ + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n )\n extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n", "issue": "Migrate from bumpversion to bump2version\n# Description\r\n\r\n@dguest has brought to my attention that [`bumpversion` is no longer maintained](https://github.com/peritus/bumpversion) (as of apparently November 2019). Given this we should probably take the project's advice\r\n\r\n> \ud83c\udfac If you want to start using `bumpversion`, you're best advised to install one of the maintained forks, e.g. \u27a1 @ c4urself's [`bump2version`](https://github.com/c4urself/bump2version/#installation).\r\n\r\ngiven that it seems that [transferring ownership and maintainers is taking a very long time/might not happen](https://github.com/c4urself/bump2version/issues/86).\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes\n 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,432 | 133 |
gh_patches_debug_19873 | rasdani/github-patches | git_diff | CTFd__CTFd-2067 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Size limits on logo, favicon, image uploads
Sometimes people upload really big images for small things like the logo or the favicon. We should impose some kind of size limit or automatic resizing.
</issue>
<code>
[start of CTFd/forms/setup.py]
1 from wtforms import (
2 FileField,
3 HiddenField,
4 PasswordField,
5 RadioField,
6 SelectField,
7 StringField,
8 TextAreaField,
9 )
10 from wtforms.fields.html5 import EmailField
11 from wtforms.validators import InputRequired
12
13 from CTFd.constants.themes import DEFAULT_THEME
14 from CTFd.forms import BaseForm
15 from CTFd.forms.fields import SubmitField
16 from CTFd.utils.config import get_themes
17
18
19 class SetupForm(BaseForm):
20 ctf_name = StringField(
21 "Event Name", description="The name of your CTF event/workshop"
22 )
23 ctf_description = TextAreaField(
24 "Event Description", description="Description for the CTF"
25 )
26 user_mode = RadioField(
27 "User Mode",
28 choices=[("teams", "Team Mode"), ("users", "User Mode")],
29 default="teams",
30 description="Controls whether users join together in teams to play (Team Mode) or play as themselves (User Mode)",
31 validators=[InputRequired()],
32 )
33
34 name = StringField(
35 "Admin Username",
36 description="Your username for the administration account",
37 validators=[InputRequired()],
38 )
39 email = EmailField(
40 "Admin Email",
41 description="Your email address for the administration account",
42 validators=[InputRequired()],
43 )
44 password = PasswordField(
45 "Admin Password",
46 description="Your password for the administration account",
47 validators=[InputRequired()],
48 )
49
50 ctf_logo = FileField(
51 "Logo",
52 description="Logo to use for the website instead of a CTF name. Used as the home page button.",
53 )
54 ctf_banner = FileField("Banner", description="Banner to use for the homepage.")
55 ctf_small_icon = FileField(
56 "Small Icon",
57 description="favicon used in user's browsers. Only PNGs accepted. Must be 32x32px.",
58 )
59 ctf_theme = SelectField(
60 "Theme",
61 description="CTFd Theme to use",
62 choices=list(zip(get_themes(), get_themes())),
63 default=DEFAULT_THEME,
64 validators=[InputRequired()],
65 )
66 theme_color = HiddenField(
67 "Theme Color",
68 description="Color used by theme to control aesthetics. Requires theme support. Optional.",
69 )
70
71 start = StringField(
72 "Start Time", description="Time when your CTF is scheduled to start. Optional."
73 )
74 end = StringField(
75 "End Time", description="Time when your CTF is scheduled to end. Optional."
76 )
77 submit = SubmitField("Finish")
78
[end of CTFd/forms/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/CTFd/forms/setup.py b/CTFd/forms/setup.py
--- a/CTFd/forms/setup.py
+++ b/CTFd/forms/setup.py
@@ -49,16 +49,18 @@
ctf_logo = FileField(
"Logo",
- description="Logo to use for the website instead of a CTF name. Used as the home page button.",
+ description="Logo to use for the website instead of a CTF name. Used as the home page button. Optional.",
+ )
+ ctf_banner = FileField(
+ "Banner", description="Banner to use for the homepage. Optional."
)
- ctf_banner = FileField("Banner", description="Banner to use for the homepage.")
ctf_small_icon = FileField(
"Small Icon",
- description="favicon used in user's browsers. Only PNGs accepted. Must be 32x32px.",
+ description="favicon used in user's browsers. Only PNGs accepted. Must be 32x32px. Optional.",
)
ctf_theme = SelectField(
"Theme",
- description="CTFd Theme to use",
+ description="CTFd Theme to use. Can be changed later.",
choices=list(zip(get_themes(), get_themes())),
default=DEFAULT_THEME,
validators=[InputRequired()],
| {"golden_diff": "diff --git a/CTFd/forms/setup.py b/CTFd/forms/setup.py\n--- a/CTFd/forms/setup.py\n+++ b/CTFd/forms/setup.py\n@@ -49,16 +49,18 @@\n \n ctf_logo = FileField(\n \"Logo\",\n- description=\"Logo to use for the website instead of a CTF name. Used as the home page button.\",\n+ description=\"Logo to use for the website instead of a CTF name. Used as the home page button. Optional.\",\n+ )\n+ ctf_banner = FileField(\n+ \"Banner\", description=\"Banner to use for the homepage. Optional.\"\n )\n- ctf_banner = FileField(\"Banner\", description=\"Banner to use for the homepage.\")\n ctf_small_icon = FileField(\n \"Small Icon\",\n- description=\"favicon used in user's browsers. Only PNGs accepted. Must be 32x32px.\",\n+ description=\"favicon used in user's browsers. Only PNGs accepted. Must be 32x32px. Optional.\",\n )\n ctf_theme = SelectField(\n \"Theme\",\n- description=\"CTFd Theme to use\",\n+ description=\"CTFd Theme to use. Can be changed later.\",\n choices=list(zip(get_themes(), get_themes())),\n default=DEFAULT_THEME,\n validators=[InputRequired()],\n", "issue": "Size limits on logo, favicon, image uploads\nSometimes people upload really big images for small things like the logo or the favicon. We should impose some kind of size limit or automatic resizing. \n", "before_files": [{"content": "from wtforms import (\n FileField,\n HiddenField,\n PasswordField,\n RadioField,\n SelectField,\n StringField,\n TextAreaField,\n)\nfrom wtforms.fields.html5 import EmailField\nfrom wtforms.validators import InputRequired\n\nfrom CTFd.constants.themes import DEFAULT_THEME\nfrom CTFd.forms import BaseForm\nfrom CTFd.forms.fields import SubmitField\nfrom CTFd.utils.config import get_themes\n\n\nclass SetupForm(BaseForm):\n ctf_name = StringField(\n \"Event Name\", description=\"The name of your CTF event/workshop\"\n )\n ctf_description = TextAreaField(\n \"Event Description\", description=\"Description for the CTF\"\n )\n user_mode = RadioField(\n \"User Mode\",\n choices=[(\"teams\", \"Team Mode\"), (\"users\", \"User Mode\")],\n default=\"teams\",\n description=\"Controls whether users join together in teams to play (Team Mode) or play as themselves (User Mode)\",\n validators=[InputRequired()],\n )\n\n name = StringField(\n \"Admin Username\",\n description=\"Your username for the administration account\",\n validators=[InputRequired()],\n )\n email = EmailField(\n \"Admin Email\",\n description=\"Your email address for the administration account\",\n validators=[InputRequired()],\n )\n password = PasswordField(\n \"Admin Password\",\n description=\"Your password for the administration account\",\n validators=[InputRequired()],\n )\n\n ctf_logo = FileField(\n \"Logo\",\n description=\"Logo to use for the website instead of a CTF name. Used as the home page button.\",\n )\n ctf_banner = FileField(\"Banner\", description=\"Banner to use for the homepage.\")\n ctf_small_icon = FileField(\n \"Small Icon\",\n description=\"favicon used in user's browsers. Only PNGs accepted. Must be 32x32px.\",\n )\n ctf_theme = SelectField(\n \"Theme\",\n description=\"CTFd Theme to use\",\n choices=list(zip(get_themes(), get_themes())),\n default=DEFAULT_THEME,\n validators=[InputRequired()],\n )\n theme_color = HiddenField(\n \"Theme Color\",\n description=\"Color used by theme to control aesthetics. Requires theme support. Optional.\",\n )\n\n start = StringField(\n \"Start Time\", description=\"Time when your CTF is scheduled to start. Optional.\"\n )\n end = StringField(\n \"End Time\", description=\"Time when your CTF is scheduled to end. Optional.\"\n )\n submit = SubmitField(\"Finish\")\n", "path": "CTFd/forms/setup.py"}]} | 1,266 | 295 |
gh_patches_debug_28186 | rasdani/github-patches | git_diff | bridgecrewio__checkov-93 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dynamic blocks handling is partial
**Describe the bug**
An S3 bucket with a dynamic `logging` block is considered a violation, even if a value was set for the variable externally.
**To Reproduce**
Steps to reproduce the behavior:
S3 configuration:
```
resource "aws_s3_bucket" "bridgecrew_cws_bucket" {
count = var.existing_bucket_name == null ? 1 : 0
bucket = local.bucket_name
acl = "private"
versioning {
enabled = true
}
lifecycle_rule {
id = "Delete old log files"
enabled = true
noncurrent_version_expiration {
days = var.log_file_expiration
}
expiration {
days = var.log_file_expiration
}
}
dynamic "logging" {
for_each = var.logs_bucket_id != null ? [var.logs_bucket_id] : []
content {
target_bucket = logging.value
target_prefix = "/${local.bucket_name}"
}
}
server_side_encryption_configuration {
rule {
apply_server_side_encryption_by_default {
kms_master_key_id = local.kms_key
sse_algorithm = "aws:kms"
}
}
}
tags = {
Name = "BridgecrewCWSBucket"
}
}
```
**Expected behavior**
The check should not fail
**Desktop (please complete the following information):**
- OS: mac OSX Catalina
- Checkov Version 1.0.167
Docker command in README.md is wrong
**Describe the bug**
The docker run command in the readme is incorrect and does not work. It should be:
docker run -v /user/tf:/tf bridgecrew/checkov -d /tf
</issue>
<code>
[start of checkov/terraform/parser.py]
1 import logging
2 import os
3 from os import path
4
5 import hcl2
6
7
8 class Parser:
9 logger = logging.getLogger(__name__)
10
11 def hcl2(self, directory, tf_definitions={}, parsing_errors={}):
12 modules_scan = []
13
14 for file in os.listdir(directory):
15 if file.endswith(".tf"):
16 tf_file = os.path.join(directory, file)
17 if tf_file not in tf_definitions.keys():
18 try:
19 with(open(tf_file, 'r')) as file:
20 file.seek(0)
21 dict = hcl2.load(file)
22 tf_defenition = dict
23 tf_definitions[tf_file] = tf_defenition
24 # TODO move from here
25 # tf_defenitions = context_registry.enrich_context(tf_file,tf_defenitions)
26
27 for modules in dict.get("module", []):
28 for module in modules.values():
29 relative_path = module['source'][0]
30 abs_path = os.path.join(directory, relative_path)
31 modules_scan.append(abs_path)
32 except Exception as e:
33 self.logger.debug('failed while parsing file %s' % tf_file, exc_info=e)
34 parsing_errors[tf_file] = e
35 for m in modules_scan:
36 if path.exists(m):
37 self.hcl2(directory=m, tf_definitions=tf_definitions)
38
39 def parse_file(self, file, tf_definitions={}, parsing_errors={}):
40 if file.endswith(".tf"):
41 try:
42 with(open(file, 'r')) as tf_file:
43 tf_file.seek(0)
44 dict = hcl2.load(tf_file)
45 tf_defenition = dict
46 tf_definitions[file] = tf_defenition
47 except Exception as e:
48 self.logger.debug('failed while parsing file %s' % file, exc_info=e)
49 parsing_errors[file] = e
50
[end of checkov/terraform/parser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/parser.py b/checkov/terraform/parser.py
--- a/checkov/terraform/parser.py
+++ b/checkov/terraform/parser.py
@@ -18,13 +18,18 @@
try:
with(open(tf_file, 'r')) as file:
file.seek(0)
- dict = hcl2.load(file)
- tf_defenition = dict
- tf_definitions[tf_file] = tf_defenition
+ tf_definition = hcl2.load(file)
+ for resource_type in tf_definition.get('resource', []):
+ for resource in resource_type.values():
+ for named_resource in resource.values():
+ for dynamic_block in named_resource.get('dynamic', []):
+ for dynamic_field_name, dynamic_field_value in dynamic_block.items():
+ named_resource[dynamic_field_name] = dynamic_field_value['for_each']
+ tf_definitions[tf_file] = tf_definition
# TODO move from here
# tf_defenitions = context_registry.enrich_context(tf_file,tf_defenitions)
- for modules in dict.get("module", []):
+ for modules in tf_definition.get("module", []):
for module in modules.values():
relative_path = module['source'][0]
abs_path = os.path.join(directory, relative_path)
| {"golden_diff": "diff --git a/checkov/terraform/parser.py b/checkov/terraform/parser.py\n--- a/checkov/terraform/parser.py\n+++ b/checkov/terraform/parser.py\n@@ -18,13 +18,18 @@\n try:\n with(open(tf_file, 'r')) as file:\n file.seek(0)\n- dict = hcl2.load(file)\n- tf_defenition = dict\n- tf_definitions[tf_file] = tf_defenition\n+ tf_definition = hcl2.load(file)\n+ for resource_type in tf_definition.get('resource', []):\n+ for resource in resource_type.values():\n+ for named_resource in resource.values():\n+ for dynamic_block in named_resource.get('dynamic', []):\n+ for dynamic_field_name, dynamic_field_value in dynamic_block.items():\n+ named_resource[dynamic_field_name] = dynamic_field_value['for_each']\n+ tf_definitions[tf_file] = tf_definition\n # TODO move from here\n # tf_defenitions = context_registry.enrich_context(tf_file,tf_defenitions)\n \n- for modules in dict.get(\"module\", []):\n+ for modules in tf_definition.get(\"module\", []):\n for module in modules.values():\n relative_path = module['source'][0]\n abs_path = os.path.join(directory, relative_path)\n", "issue": "Dynamic blocks handling is partial\n**Describe the bug**\r\nAn S3 bucket with a dynamic `logging` block is considered a violation, even if a value was set for the variable externally.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nS3 configuration:\r\n```\r\nresource \"aws_s3_bucket\" \"bridgecrew_cws_bucket\" {\r\n count = var.existing_bucket_name == null ? 1 : 0\r\n\r\n bucket = local.bucket_name\r\n acl = \"private\"\r\n\r\n versioning {\r\n enabled = true\r\n }\r\n\r\n lifecycle_rule {\r\n id = \"Delete old log files\"\r\n enabled = true\r\n\r\n noncurrent_version_expiration {\r\n days = var.log_file_expiration\r\n }\r\n\r\n expiration {\r\n days = var.log_file_expiration\r\n }\r\n }\r\n\r\n dynamic \"logging\" {\r\n for_each = var.logs_bucket_id != null ? [var.logs_bucket_id] : []\r\n\r\n content {\r\n target_bucket = logging.value\r\n target_prefix = \"/${local.bucket_name}\"\r\n }\r\n }\r\n\r\n server_side_encryption_configuration {\r\n rule {\r\n apply_server_side_encryption_by_default {\r\n kms_master_key_id = local.kms_key\r\n sse_algorithm = \"aws:kms\"\r\n }\r\n }\r\n }\r\n\r\n tags = {\r\n Name = \"BridgecrewCWSBucket\"\r\n }\r\n}\r\n```\r\n\r\n**Expected behavior**\r\nThe check should not fail\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: mac OSX Catalina\r\n - Checkov Version 1.0.167\r\n\r\n\nDocker command in README.md is wrong\n**Describe the bug**\r\nThe docker run command in the readme is incorrect and does not work. It should be: \r\ndocker run -v /user/tf:/tf bridgecrew/checkov -d /tf\r\n\r\n\n", "before_files": [{"content": "import logging\nimport os\nfrom os import path\n\nimport hcl2\n\n\nclass Parser:\n logger = logging.getLogger(__name__)\n\n def hcl2(self, directory, tf_definitions={}, parsing_errors={}):\n modules_scan = []\n\n for file in os.listdir(directory):\n if file.endswith(\".tf\"):\n tf_file = os.path.join(directory, file)\n if tf_file not in tf_definitions.keys():\n try:\n with(open(tf_file, 'r')) as file:\n file.seek(0)\n dict = hcl2.load(file)\n tf_defenition = dict\n tf_definitions[tf_file] = tf_defenition\n # TODO move from here\n # tf_defenitions = context_registry.enrich_context(tf_file,tf_defenitions)\n\n for modules in dict.get(\"module\", []):\n for module in modules.values():\n relative_path = module['source'][0]\n abs_path = os.path.join(directory, relative_path)\n modules_scan.append(abs_path)\n except Exception as e:\n self.logger.debug('failed while parsing file %s' % tf_file, exc_info=e)\n parsing_errors[tf_file] = e\n for m in modules_scan:\n if path.exists(m):\n self.hcl2(directory=m, tf_definitions=tf_definitions)\n\n def parse_file(self, file, tf_definitions={}, parsing_errors={}):\n if file.endswith(\".tf\"):\n try:\n with(open(file, 'r')) as tf_file:\n tf_file.seek(0)\n dict = hcl2.load(tf_file)\n tf_defenition = dict\n tf_definitions[file] = tf_defenition\n except Exception as e:\n self.logger.debug('failed while parsing file %s' % file, exc_info=e)\n parsing_errors[file] = e\n", "path": "checkov/terraform/parser.py"}]} | 1,392 | 288 |
gh_patches_debug_15817 | rasdani/github-patches | git_diff | OpenMined__PySyft-3588 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Disable manual register() ids in syft.grid.register()
**Is your feature request related to a problem? Please describe.**
It is a security risk for people to specify their own IDs given that GridNetwork will let you connect to anyone whose id you already know. Thus, we should disable the ability for people to specify their own ID and replace it with a randomly generated hash.
This hash should be printed with clear instructions ("Send this to whomever you'd like to connect with") when register() is called.
</issue>
<code>
[start of syft/grid/__init__.py]
1 from .network import Network
2
3 DEFAULT_NETWORK_URL = "ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com"
4
5
6 def register(node_id: str, **kwargs):
7 """ Add this process as a new peer registering it in the grid network.
8
9 Args:
10 node_id: Id used to identify this node.
11 Returns:
12 peer: Peer Network instance.
13 """
14 if not kwargs:
15 args = args = {"max_size": None, "timeout": 444, "url": DEFAULT_NETWORK_URL}
16 else:
17 args = kwargs
18
19 peer = Network(node_id, **args)
20 peer.start()
21 return peer
22
[end of syft/grid/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/syft/grid/__init__.py b/syft/grid/__init__.py
--- a/syft/grid/__init__.py
+++ b/syft/grid/__init__.py
@@ -1,13 +1,12 @@
from .network import Network
+import uuid
DEFAULT_NETWORK_URL = "ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com"
-def register(node_id: str, **kwargs):
+def register(**kwargs):
""" Add this process as a new peer registering it in the grid network.
- Args:
- node_id: Id used to identify this node.
Returns:
peer: Peer Network instance.
"""
@@ -16,6 +15,8 @@
else:
args = kwargs
- peer = Network(node_id, **args)
+ peer_id = str(uuid.uuid4())
+ peer = Network(peer_id, **args)
peer.start()
+
return peer
| {"golden_diff": "diff --git a/syft/grid/__init__.py b/syft/grid/__init__.py\n--- a/syft/grid/__init__.py\n+++ b/syft/grid/__init__.py\n@@ -1,13 +1,12 @@\n from .network import Network\n+import uuid\n \n DEFAULT_NETWORK_URL = \"ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com\"\n \n \n-def register(node_id: str, **kwargs):\n+def register(**kwargs):\n \"\"\" Add this process as a new peer registering it in the grid network.\n \n- Args:\n- node_id: Id used to identify this node.\n Returns:\n peer: Peer Network instance.\n \"\"\"\n@@ -16,6 +15,8 @@\n else:\n args = kwargs\n \n- peer = Network(node_id, **args)\n+ peer_id = str(uuid.uuid4())\n+ peer = Network(peer_id, **args)\n peer.start()\n+\n return peer\n", "issue": "Disable manual register() ids in syft.grid.register()\n**Is your feature request related to a problem? Please describe.**\r\nIt is a security risk for people to specify their own IDs given that GridNetwork will let you connect to anyone whose id you already know. Thus, we should disable the ability for people to specify their own ID and replace it with a randomly generated hash.\r\n\r\nThis hash should be printed with clear instructions (\"Send this to whomever you'd like to connect with\") when register() is called.\n", "before_files": [{"content": "from .network import Network\n\nDEFAULT_NETWORK_URL = \"ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com\"\n\n\ndef register(node_id: str, **kwargs):\n \"\"\" Add this process as a new peer registering it in the grid network.\n \n Args:\n node_id: Id used to identify this node.\n Returns:\n peer: Peer Network instance.\n \"\"\"\n if not kwargs:\n args = args = {\"max_size\": None, \"timeout\": 444, \"url\": DEFAULT_NETWORK_URL}\n else:\n args = kwargs\n\n peer = Network(node_id, **args)\n peer.start()\n return peer\n", "path": "syft/grid/__init__.py"}]} | 829 | 220 |
gh_patches_debug_22494 | rasdani/github-patches | git_diff | tobymao__sqlglot-3129 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Athena Iceberg Tables parsing issue
Hi,
I want to parse a SQL Statement that creates an Iceberg table on Athena:
```sql
create table if not exists tmp.mytable (
name string
)
location 's3://bucket/tmp/mytable/'
tblproperties (
'table_type'='iceberg',
'format'='parquet'
);
```
running
```python
stmts = sqlglot.parse(sql, read=sqlglot.Dialects.ATHENA)
stmts[0].sql()
```
returns:
```sql
CREATE TABLE IF NOT EXISTS tmp.mytable
(name TEXT)
LOCATION 's3://bucket/tmp/mytable/'
WITH (
table_type='iceberg',
FORMAT='parquet'
)
```
Unfortunately, the syntax in Athena is different for Iceberg Tables and Hive-style tables.
The parsed statement should look like this:
```sql
CREATE TABLE IF NOT EXISTS tmp.mytable
(name STRING)
LOCATION 's3://bucket/tmp/mytable/'
TBLPROPERTIES (
'table_type'='iceberg',
'FORMAT'='parquet'
)
```
Instead of WITH -> TBLPROPERTIES
The keys in the this block are wrapped in upper quotes and iceberg has slightly different data types. In this case STRING instead of TEXT
https://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-supported-data-types.html
https://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-creating-tables.html
</issue>
<code>
[start of sqlglot/dialects/athena.py]
1 from __future__ import annotations
2
3 from sqlglot.dialects.trino import Trino
4 from sqlglot.tokens import TokenType
5
6
7 class Athena(Trino):
8 class Parser(Trino.Parser):
9 STATEMENT_PARSERS = {
10 **Trino.Parser.STATEMENT_PARSERS,
11 TokenType.USING: lambda self: self._parse_as_command(self._prev),
12 }
13
[end of sqlglot/dialects/athena.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sqlglot/dialects/athena.py b/sqlglot/dialects/athena.py
--- a/sqlglot/dialects/athena.py
+++ b/sqlglot/dialects/athena.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+from sqlglot import exp
from sqlglot.dialects.trino import Trino
from sqlglot.tokens import TokenType
@@ -10,3 +11,27 @@
**Trino.Parser.STATEMENT_PARSERS,
TokenType.USING: lambda self: self._parse_as_command(self._prev),
}
+
+ class Generator(Trino.Generator):
+ PROPERTIES_LOCATION = {
+ **Trino.Generator.PROPERTIES_LOCATION,
+ exp.LocationProperty: exp.Properties.Location.POST_SCHEMA,
+ }
+
+ TYPE_MAPPING = {
+ **Trino.Generator.TYPE_MAPPING,
+ exp.DataType.Type.TEXT: "STRING",
+ }
+
+ TRANSFORMS = {
+ **Trino.Generator.TRANSFORMS,
+ exp.FileFormatProperty: lambda self, e: f"'FORMAT'={self.sql(e, 'this')}",
+ }
+
+ def property_sql(self, expression: exp.Property) -> str:
+ return (
+ f"{self.property_name(expression, string_key=True)}={self.sql(expression, 'value')}"
+ )
+
+ def with_properties(self, properties: exp.Properties) -> str:
+ return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
| {"golden_diff": "diff --git a/sqlglot/dialects/athena.py b/sqlglot/dialects/athena.py\n--- a/sqlglot/dialects/athena.py\n+++ b/sqlglot/dialects/athena.py\n@@ -1,5 +1,6 @@\n from __future__ import annotations\n \n+from sqlglot import exp\n from sqlglot.dialects.trino import Trino\n from sqlglot.tokens import TokenType\n \n@@ -10,3 +11,27 @@\n **Trino.Parser.STATEMENT_PARSERS,\n TokenType.USING: lambda self: self._parse_as_command(self._prev),\n }\n+\n+ class Generator(Trino.Generator):\n+ PROPERTIES_LOCATION = {\n+ **Trino.Generator.PROPERTIES_LOCATION,\n+ exp.LocationProperty: exp.Properties.Location.POST_SCHEMA,\n+ }\n+\n+ TYPE_MAPPING = {\n+ **Trino.Generator.TYPE_MAPPING,\n+ exp.DataType.Type.TEXT: \"STRING\",\n+ }\n+\n+ TRANSFORMS = {\n+ **Trino.Generator.TRANSFORMS,\n+ exp.FileFormatProperty: lambda self, e: f\"'FORMAT'={self.sql(e, 'this')}\",\n+ }\n+\n+ def property_sql(self, expression: exp.Property) -> str:\n+ return (\n+ f\"{self.property_name(expression, string_key=True)}={self.sql(expression, 'value')}\"\n+ )\n+\n+ def with_properties(self, properties: exp.Properties) -> str:\n+ return self.properties(properties, prefix=self.seg(\"TBLPROPERTIES\"))\n", "issue": "Athena Iceberg Tables parsing issue\nHi,\r\nI want to parse a SQL Statement that creates an Iceberg table on Athena:\r\n\r\n```sql\r\ncreate table if not exists tmp.mytable (\r\n name string\r\n)\r\nlocation 's3://bucket/tmp/mytable/'\r\ntblproperties (\r\n 'table_type'='iceberg',\r\n 'format'='parquet'\r\n);\r\n```\r\nrunning \r\n```python\r\nstmts = sqlglot.parse(sql, read=sqlglot.Dialects.ATHENA)\r\nstmts[0].sql()\r\n```\r\nreturns:\r\n```sql\r\nCREATE TABLE IF NOT EXISTS tmp.mytable \r\n (name TEXT) \r\nLOCATION 's3://bucket/tmp/mytable/' \r\nWITH (\r\n table_type='iceberg', \r\n FORMAT='parquet'\r\n)\r\n```\r\n\r\nUnfortunately, the syntax in Athena is different for Iceberg Tables and Hive-style tables.\r\n\r\nThe parsed statement should look like this:\r\n\r\n```sql\r\nCREATE TABLE IF NOT EXISTS tmp.mytable \r\n (name STRING) \r\nLOCATION 's3://bucket/tmp/mytable/' \r\nTBLPROPERTIES (\r\n 'table_type'='iceberg', \r\n 'FORMAT'='parquet'\r\n)\r\n```\r\n\r\nInstead of WITH -> TBLPROPERTIES\r\nThe keys in the this block are wrapped in upper quotes and iceberg has slightly different data types. In this case STRING instead of TEXT\r\n\r\nhttps://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-supported-data-types.html\r\nhttps://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-creating-tables.html\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom sqlglot.dialects.trino import Trino\nfrom sqlglot.tokens import TokenType\n\n\nclass Athena(Trino):\n class Parser(Trino.Parser):\n STATEMENT_PARSERS = {\n **Trino.Parser.STATEMENT_PARSERS,\n TokenType.USING: lambda self: self._parse_as_command(self._prev),\n }\n", "path": "sqlglot/dialects/athena.py"}]} | 972 | 342 |
gh_patches_debug_25411 | rasdani/github-patches | git_diff | scikit-hep__pyhf-338 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add README to PyPI
# Description
At the moment we have no README for the [PyPI page](https://pypi.org/project/pyhf/0.0.15/). The addition of one would be a nice touch (even though I assume that most users will discover the project through GitHub).
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4
5 extras_require = {
6 'tensorflow': [
7 'tensorflow>=1.10.0',
8 'tensorflow-probability==0.3.0',
9 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
10 'setuptools<=39.1.0',
11 ],
12 'torch': ['torch>=0.4.0'],
13 'mxnet': [
14 'mxnet>=1.0.0',
15 'requests<2.19.0,>=2.18.4',
16 'numpy<1.15.0,>=1.8.2',
17 'requests<2.19.0,>=2.18.4',
18 ],
19 # 'dask': [
20 # 'dask[array]'
21 # ],
22 'xmlimport': ['uproot'],
23 'minuit': ['iminuit'],
24 'develop': [
25 'pyflakes',
26 'pytest>=3.5.1',
27 'pytest-cov>=2.5.1',
28 'pytest-benchmark[histogram]',
29 'pytest-console-scripts',
30 'python-coveralls',
31 'coverage>=4.0', # coveralls
32 'matplotlib',
33 'jupyter',
34 'nbdime',
35 'uproot>=3.0.0',
36 'papermill',
37 'graphviz',
38 'bumpversion',
39 'sphinx',
40 'sphinxcontrib-bibtex',
41 'sphinxcontrib-napoleon',
42 'sphinx_rtd_theme',
43 'nbsphinx',
44 'm2r',
45 'jsonpatch',
46 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now
47 'pre-commit',
48 'black;python_version>="3.6"', # Black is Python3 only
49 ],
50 }
51 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
52
53 setup(
54 name='pyhf',
55 version='0.0.15',
56 description='(partial) pure python histfactory implementation',
57 url='https://github.com/diana-hep/pyhf',
58 author='Lukas Heinrich',
59 author_email='[email protected]',
60 license='Apache',
61 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',
62 classifiers=[
63 "Programming Language :: Python :: 2",
64 "Programming Language :: Python :: 2.7",
65 "Programming Language :: Python :: 3",
66 "Programming Language :: Python :: 3.6",
67 ],
68 packages=find_packages(),
69 include_package_data=True,
70 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*",
71 install_requires=[
72 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet
73 'click>=6.0', # for console scripts,
74 'tqdm', # for readxml
75 'six', # for modifiers
76 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
77 'jsonpatch',
78 ],
79 extras_require=extras_require,
80 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},
81 dependency_links=[],
82 )
83
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,11 @@
#!/usr/bin/env python
from setuptools import setup, find_packages
+from os import path
+
+this_directory = path.abspath(path.dirname(__file__))
+with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:
+ long_description = readme_md.read()
extras_require = {
'tensorflow': [
@@ -46,6 +51,7 @@
'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now
'pre-commit',
'black;python_version>="3.6"', # Black is Python3 only
+ 'twine',
],
}
extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
@@ -54,6 +60,8 @@
name='pyhf',
version='0.0.15',
description='(partial) pure python histfactory implementation',
+ long_description=long_description,
+ long_description_content_type='text/markdown',
url='https://github.com/diana-hep/pyhf',
author='Lukas Heinrich',
author_email='[email protected]',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,11 @@\n #!/usr/bin/env python\n \n from setuptools import setup, find_packages\n+from os import path\n+\n+this_directory = path.abspath(path.dirname(__file__))\n+with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n+ long_description = readme_md.read()\n \n extras_require = {\n 'tensorflow': [\n@@ -46,6 +51,7 @@\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n+ 'twine',\n ],\n }\n extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n@@ -54,6 +60,8 @@\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n+ long_description=long_description,\n+ long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n", "issue": "Add README to PyPI\n# Description\r\n\r\nAt the moment we have no README for the [PyPI page](https://pypi.org/project/pyhf/0.0.15/). The addition of one would be a nice touch (even though I assume that most users will discover the project through GitHub).\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=0.4.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.0.0',\n 'papermill',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}]} | 1,526 | 290 |
gh_patches_debug_7584 | rasdani/github-patches | git_diff | pwndbg__pwndbg-774 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
QEMU uses binfmt root instead of pwndbg.qemu.root()
This bit here should probably use pwndbg.qemu.root() instead of using the module variable directly:
https://github.com/pwndbg/pwndbg/blob/609284cee279de345dcb0706e11a0b56abe349f4/pwndbg/file.py#L35
</issue>
<code>
[start of pwndbg/file.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 Retrieve files from the debuggee's filesystem. Useful when
5 debugging a remote process over SSH or similar, where e.g.
6 /proc/FOO/maps is needed from the remote system.
7 """
8 from __future__ import absolute_import
9 from __future__ import division
10 from __future__ import print_function
11 from __future__ import unicode_literals
12
13 import binascii
14 import os
15 import tempfile
16
17 import gdb
18
19 import pwndbg.qemu
20 import pwndbg.remote
21 import pwndbg.symbol
22
23
24 def get_file(path):
25 """
26 Downloads the specified file from the system where the current process is
27 being debugged.
28
29 Returns:
30 The local path to the file
31 """
32 local_path = path
33
34 if pwndbg.qemu.root():
35 return os.path.join(pwndbg.qemu.binfmt_root, path)
36 elif pwndbg.remote.is_remote() and not pwndbg.qemu.is_qemu():
37 local_path = tempfile.mktemp(dir=pwndbg.symbol.remote_files_dir)
38 error = None
39 try:
40 error = gdb.execute('remote get "%s" "%s"' % (path, local_path),
41 to_string=True)
42 except gdb.error as e:
43 error = e
44
45 if error:
46 raise OSError("Could not download remote file %r:\n" \
47 "Error: %s" % (path, error))
48
49 return local_path
50
51 def get(path):
52 """
53 Retrieves the contents of the specified file on the system
54 where the current process is being debugged.
55
56 Returns:
57 A byte array, or None.
58 """
59 local_path = get_file(path)
60
61 try:
62 with open(local_path,'rb') as f:
63 return f.read()
64 except:
65 return b''
66
67 def readlink(path):
68 """readlink(path) -> str
69
70 Read the link specified by 'path' on the system being debugged.
71
72 Handles local, qemu-usermode, and remote debugging cases.
73 """
74 is_qemu = pwndbg.qemu.is_qemu_usermode()
75
76 if is_qemu:
77 if not os.path.exists(path):
78 path = os.path.join(pwndbg.qemu.root(), path)
79
80 if is_qemu or not pwndbg.remote.is_remote():
81 try:
82 return os.readlink(path)
83 except Exception:
84 return ''
85
86 #
87 # Hurray unexposed packets!
88 #
89 # The 'vFile:readlink:' packet does exactly what it sounds like,
90 # but there is no API exposed to do this and there is also no
91 # command exposed... so we have to send the packet manually.
92 #
93 cmd = 'maintenance packet vFile:readlink:%s'
94
95 # The path must be uppercase hex-encoded and NULL-terminated.
96 path += '\x00'
97 path = binascii.hexlify(path.encode())
98 path = path.upper()
99 path = path.decode()
100
101 result = gdb.execute(cmd % path, from_tty=False, to_string=True)
102
103 """
104 sending: "vFile:readlink:2F70726F632F3130303839302F66642F3000"
105 received: "Fc;pipe:[98420]"
106
107 sending: "vFile:readlink:2F70726F632F3130303839302F66642F333300"
108 received: "F-1,2"
109 """
110
111 _, data = result.split('\n', 1)
112
113 # Sanity check
114 expected = 'received: "F'
115 if not data.startswith(expected):
116 return ''
117
118 # Negative values are errors
119 data = data[len(expected):]
120 if data[0] == '-':
121 return ''
122
123 # If non-negative, there will be a hex-encoded length followed
124 # by a semicolon.
125 n, data = data.split(';', 1)
126
127 n = int(n, 16)
128 if n < 0:
129 return ''
130
131 # The result is quoted by GDB, strip the quote and newline.
132 # I have no idea how well it handles other crazy stuff.
133 ending = '"\n'
134 data = data[:-len(ending)]
135
136 return data
137
[end of pwndbg/file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/file.py b/pwndbg/file.py
--- a/pwndbg/file.py
+++ b/pwndbg/file.py
@@ -30,9 +30,9 @@
The local path to the file
"""
local_path = path
-
- if pwndbg.qemu.root():
- return os.path.join(pwndbg.qemu.binfmt_root, path)
+ qemu_root = pwndbg.qemu.root()
+ if qemu_root:
+ return os.path.join(qemu_root, path)
elif pwndbg.remote.is_remote() and not pwndbg.qemu.is_qemu():
local_path = tempfile.mktemp(dir=pwndbg.symbol.remote_files_dir)
error = None
| {"golden_diff": "diff --git a/pwndbg/file.py b/pwndbg/file.py\n--- a/pwndbg/file.py\n+++ b/pwndbg/file.py\n@@ -30,9 +30,9 @@\n The local path to the file\n \"\"\"\n local_path = path\n-\n- if pwndbg.qemu.root():\n- return os.path.join(pwndbg.qemu.binfmt_root, path)\n+ qemu_root = pwndbg.qemu.root()\n+ if qemu_root:\n+ return os.path.join(qemu_root, path)\n elif pwndbg.remote.is_remote() and not pwndbg.qemu.is_qemu():\n local_path = tempfile.mktemp(dir=pwndbg.symbol.remote_files_dir)\n error = None\n", "issue": "QEMU uses binfmt root instead of pwndbg.qemu.root()\nThis bit here should probably use pwndbg.qemu.root() instead of using the module variable directly: \r\n\r\nhttps://github.com/pwndbg/pwndbg/blob/609284cee279de345dcb0706e11a0b56abe349f4/pwndbg/file.py#L35\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nRetrieve files from the debuggee's filesystem. Useful when\ndebugging a remote process over SSH or similar, where e.g.\n/proc/FOO/maps is needed from the remote system.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport binascii\nimport os\nimport tempfile\n\nimport gdb\n\nimport pwndbg.qemu\nimport pwndbg.remote\nimport pwndbg.symbol\n\n\ndef get_file(path):\n \"\"\"\n Downloads the specified file from the system where the current process is\n being debugged.\n\n Returns:\n The local path to the file\n \"\"\"\n local_path = path\n\n if pwndbg.qemu.root():\n return os.path.join(pwndbg.qemu.binfmt_root, path)\n elif pwndbg.remote.is_remote() and not pwndbg.qemu.is_qemu():\n local_path = tempfile.mktemp(dir=pwndbg.symbol.remote_files_dir)\n error = None\n try:\n error = gdb.execute('remote get \"%s\" \"%s\"' % (path, local_path),\n to_string=True)\n except gdb.error as e:\n error = e\n\n if error:\n raise OSError(\"Could not download remote file %r:\\n\" \\\n \"Error: %s\" % (path, error))\n\n return local_path\n\ndef get(path):\n \"\"\"\n Retrieves the contents of the specified file on the system\n where the current process is being debugged.\n\n Returns:\n A byte array, or None.\n \"\"\"\n local_path = get_file(path)\n\n try:\n with open(local_path,'rb') as f:\n return f.read()\n except:\n return b''\n\ndef readlink(path):\n \"\"\"readlink(path) -> str\n\n Read the link specified by 'path' on the system being debugged.\n\n Handles local, qemu-usermode, and remote debugging cases.\n \"\"\"\n is_qemu = pwndbg.qemu.is_qemu_usermode()\n\n if is_qemu:\n if not os.path.exists(path):\n path = os.path.join(pwndbg.qemu.root(), path)\n\n if is_qemu or not pwndbg.remote.is_remote():\n try:\n return os.readlink(path)\n except Exception:\n return ''\n\n #\n # Hurray unexposed packets!\n #\n # The 'vFile:readlink:' packet does exactly what it sounds like,\n # but there is no API exposed to do this and there is also no\n # command exposed... so we have to send the packet manually.\n #\n cmd = 'maintenance packet vFile:readlink:%s'\n\n # The path must be uppercase hex-encoded and NULL-terminated.\n path += '\\x00'\n path = binascii.hexlify(path.encode())\n path = path.upper()\n path = path.decode()\n\n result = gdb.execute(cmd % path, from_tty=False, to_string=True)\n\n \"\"\"\n sending: \"vFile:readlink:2F70726F632F3130303839302F66642F3000\"\n received: \"Fc;pipe:[98420]\"\n\n sending: \"vFile:readlink:2F70726F632F3130303839302F66642F333300\"\n received: \"F-1,2\"\n \"\"\"\n\n _, data = result.split('\\n', 1)\n\n # Sanity check\n expected = 'received: \"F'\n if not data.startswith(expected):\n return ''\n\n # Negative values are errors\n data = data[len(expected):]\n if data[0] == '-':\n return ''\n\n # If non-negative, there will be a hex-encoded length followed\n # by a semicolon.\n n, data = data.split(';', 1)\n\n n = int(n, 16)\n if n < 0:\n return ''\n\n # The result is quoted by GDB, strip the quote and newline.\n # I have no idea how well it handles other crazy stuff.\n ending = '\"\\n'\n data = data[:-len(ending)]\n\n return data\n", "path": "pwndbg/file.py"}]} | 1,898 | 161 |
gh_patches_debug_32529 | rasdani/github-patches | git_diff | OpenMined__PySyft-2254 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Syft Keras bug on Windows
Relevant slack discussion: https://openmined.slack.com/archives/C6DEWA4FR/p1559899875021800
Bug:

It looks like the problem here is that the `tfe.config` is being saved in a location that is not a valid filepath in Windows. As a result, there is likely a file with the name `/tmp/tfe.config` being saved in some folder on the machine, as opposed to a file with the name `tfe.config` being saved in the root subdirectory called `tmp`.
The fix for this should use `os.path` to figure out which filepath the tfe.config should be saved to, and then the logging messages should print the OS-specific CLI command for launching each `TFEWorker` process.
</issue>
<code>
[start of syft/workers/tfe.py]
1 """To be extended in the near future."""
2 from collections import OrderedDict
3 import logging
4 import subprocess
5
6 import tf_encrypted as tfe
7
8
9 logger = logging.getLogger("tf_encrypted")
10
11
12 class TFEWorker:
13 # TODO(Morten) this should be turned into a proxy, with existing code
14 # extracted into a new component that's launched via a script
15
16 def __init__(self, host=None, auto_managed=True):
17 self.host = host
18 self._server_process = None
19 self._auto_managed = auto_managed
20
21 def start(self, player_name, *workers):
22 if self.host is None:
23 # we're running using a tfe.LocalConfig which doesn't require us to do anything
24 return
25
26 config_filename = "/tmp/tfe.config"
27
28 config, _ = self.config_from_workers(workers)
29 config.save(config_filename)
30
31 if self._auto_managed:
32 cmd = "python -m tf_encrypted.player --config {} {}".format(
33 config_filename, player_name
34 )
35 self._server_process = subprocess.Popen(cmd.split(" "))
36 else:
37 logger.info(
38 "If not done already, please launch the following "
39 "command in a terminal on host '%s':\n"
40 "'python -m tf_encrypted.player --config %s %s'\n"
41 "This can be done automatically in a local subprocess by "
42 "setting `auto_managed=True` when instantiating a TFEWorker.",
43 self.host,
44 config_filename,
45 player_name,
46 )
47
48 def stop(self):
49 if self.host is None:
50 # we're running using a tfe.LocalConfig which doesn't require us to do anything
51 return
52
53 if self._auto_managed:
54 if self._server_process is None:
55 return
56 self._server_process.kill()
57 self._server_process.communicate()
58 self._server_process = None
59 else:
60 logger.info("Please terminate the process on host '%s'.", self.host)
61
62 def connect_to_model(self, input_shape, output_shape, *workers):
63 config, _ = self.config_from_workers(workers)
64 tfe.set_config(config)
65
66 prot = tfe.protocol.SecureNN(
67 config.get_player("server0"), config.get_player("server1"), config.get_player("server2")
68 )
69 tfe.set_protocol(prot)
70
71 self._tf_client = tfe.serving.QueueClient(
72 input_shape=input_shape, output_shape=output_shape
73 )
74
75 sess = tfe.Session(config=config)
76 self._tf_session = sess
77
78 def query_model(self, data):
79 self.query_model_async(data)
80 return self.query_model_join()
81
82 def query_model_async(self, data):
83 self._tf_client.send_input(self._tf_session, data)
84
85 def query_model_join(self):
86 return self._tf_client.receive_output(self._tf_session)
87
88 @classmethod
89 def config_from_workers(cls, workers):
90 if len(workers) != 3:
91 raise ValueError("Expected three workers but {} were given".format(len(workers)))
92
93 player_to_worker_mapping = OrderedDict()
94 player_to_worker_mapping["server0"] = workers[0]
95 player_to_worker_mapping["server1"] = workers[1]
96 player_to_worker_mapping["server2"] = workers[2]
97
98 use_local_config = all(worker.host is None for worker in workers)
99 if use_local_config:
100 config = tfe.LocalConfig(
101 player_names=player_to_worker_mapping.keys(), auto_add_unknown_players=False
102 )
103 return config, player_to_worker_mapping
104
105 # use tfe.RemoteConfig
106 hostmap = OrderedDict(
107 [(player_name, worker.host) for player_name, worker in player_to_worker_mapping.items()]
108 )
109 config = tfe.RemoteConfig(hostmap)
110 return config, player_to_worker_mapping
111
[end of syft/workers/tfe.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/syft/workers/tfe.py b/syft/workers/tfe.py
--- a/syft/workers/tfe.py
+++ b/syft/workers/tfe.py
@@ -1,12 +1,15 @@
"""To be extended in the near future."""
from collections import OrderedDict
import logging
+import os
import subprocess
+import tempfile
import tf_encrypted as tfe
logger = logging.getLogger("tf_encrypted")
+_TMP_DIR = tempfile.gettempdir()
class TFEWorker:
@@ -23,26 +26,24 @@
# we're running using a tfe.LocalConfig which doesn't require us to do anything
return
- config_filename = "/tmp/tfe.config"
+ config_filename = os.path.join(_TMP_DIR, "tfe.config")
config, _ = self.config_from_workers(workers)
config.save(config_filename)
+ launch_cmd = "python -m tf_encrypted.player --config {} {}".format(
+ config_filename, player_name
+ )
if self._auto_managed:
- cmd = "python -m tf_encrypted.player --config {} {}".format(
- config_filename, player_name
- )
- self._server_process = subprocess.Popen(cmd.split(" "))
+ self._server_process = subprocess.Popen(launch_cmd.split(" "))
else:
logger.info(
"If not done already, please launch the following "
- "command in a terminal on host '%s':\n"
- "'python -m tf_encrypted.player --config %s %s'\n"
+ "command in a terminal on host %s: '%s'\n"
"This can be done automatically in a local subprocess by "
- "setting `auto_managed=True` when instantiating a TFEWorker.",
+ "setting `auto_managed=True` when instantiating a TFEWorker.\n",
self.host,
- config_filename,
- player_name,
+ launch_cmd,
)
def stop(self):
| {"golden_diff": "diff --git a/syft/workers/tfe.py b/syft/workers/tfe.py\n--- a/syft/workers/tfe.py\n+++ b/syft/workers/tfe.py\n@@ -1,12 +1,15 @@\n \"\"\"To be extended in the near future.\"\"\"\n from collections import OrderedDict\n import logging\n+import os\n import subprocess\n+import tempfile\n \n import tf_encrypted as tfe\n \n \n logger = logging.getLogger(\"tf_encrypted\")\n+_TMP_DIR = tempfile.gettempdir()\n \n \n class TFEWorker:\n@@ -23,26 +26,24 @@\n # we're running using a tfe.LocalConfig which doesn't require us to do anything\n return\n \n- config_filename = \"/tmp/tfe.config\"\n+ config_filename = os.path.join(_TMP_DIR, \"tfe.config\")\n \n config, _ = self.config_from_workers(workers)\n config.save(config_filename)\n \n+ launch_cmd = \"python -m tf_encrypted.player --config {} {}\".format(\n+ config_filename, player_name\n+ )\n if self._auto_managed:\n- cmd = \"python -m tf_encrypted.player --config {} {}\".format(\n- config_filename, player_name\n- )\n- self._server_process = subprocess.Popen(cmd.split(\" \"))\n+ self._server_process = subprocess.Popen(launch_cmd.split(\" \"))\n else:\n logger.info(\n \"If not done already, please launch the following \"\n- \"command in a terminal on host '%s':\\n\"\n- \"'python -m tf_encrypted.player --config %s %s'\\n\"\n+ \"command in a terminal on host %s: '%s'\\n\"\n \"This can be done automatically in a local subprocess by \"\n- \"setting `auto_managed=True` when instantiating a TFEWorker.\",\n+ \"setting `auto_managed=True` when instantiating a TFEWorker.\\n\",\n self.host,\n- config_filename,\n- player_name,\n+ launch_cmd,\n )\n \n def stop(self):\n", "issue": "Syft Keras bug on Windows\nRelevant slack discussion: https://openmined.slack.com/archives/C6DEWA4FR/p1559899875021800\r\n\r\nBug:\r\n\r\n\r\nIt looks like the problem here is that the `tfe.config` is being saved in a location that is not a valid filepath in Windows. As a result, there is likely a file with the name `/tmp/tfe.config` being saved in some folder on the machine, as opposed to a file with the name `tfe.config` being saved in the root subdirectory called `tmp`.\r\n\r\nThe fix for this should use `os.path` to figure out which filepath the tfe.config should be saved to, and then the logging messages should print the OS-specific CLI command for launching each `TFEWorker` process.\n", "before_files": [{"content": "\"\"\"To be extended in the near future.\"\"\"\nfrom collections import OrderedDict\nimport logging\nimport subprocess\n\nimport tf_encrypted as tfe\n\n\nlogger = logging.getLogger(\"tf_encrypted\")\n\n\nclass TFEWorker:\n # TODO(Morten) this should be turned into a proxy, with existing code\n # extracted into a new component that's launched via a script\n\n def __init__(self, host=None, auto_managed=True):\n self.host = host\n self._server_process = None\n self._auto_managed = auto_managed\n\n def start(self, player_name, *workers):\n if self.host is None:\n # we're running using a tfe.LocalConfig which doesn't require us to do anything\n return\n\n config_filename = \"/tmp/tfe.config\"\n\n config, _ = self.config_from_workers(workers)\n config.save(config_filename)\n\n if self._auto_managed:\n cmd = \"python -m tf_encrypted.player --config {} {}\".format(\n config_filename, player_name\n )\n self._server_process = subprocess.Popen(cmd.split(\" \"))\n else:\n logger.info(\n \"If not done already, please launch the following \"\n \"command in a terminal on host '%s':\\n\"\n \"'python -m tf_encrypted.player --config %s %s'\\n\"\n \"This can be done automatically in a local subprocess by \"\n \"setting `auto_managed=True` when instantiating a TFEWorker.\",\n self.host,\n config_filename,\n player_name,\n )\n\n def stop(self):\n if self.host is None:\n # we're running using a tfe.LocalConfig which doesn't require us to do anything\n return\n\n if self._auto_managed:\n if self._server_process is None:\n return\n self._server_process.kill()\n self._server_process.communicate()\n self._server_process = None\n else:\n logger.info(\"Please terminate the process on host '%s'.\", self.host)\n\n def connect_to_model(self, input_shape, output_shape, *workers):\n config, _ = self.config_from_workers(workers)\n tfe.set_config(config)\n\n prot = tfe.protocol.SecureNN(\n config.get_player(\"server0\"), config.get_player(\"server1\"), config.get_player(\"server2\")\n )\n tfe.set_protocol(prot)\n\n self._tf_client = tfe.serving.QueueClient(\n input_shape=input_shape, output_shape=output_shape\n )\n\n sess = tfe.Session(config=config)\n self._tf_session = sess\n\n def query_model(self, data):\n self.query_model_async(data)\n return self.query_model_join()\n\n def query_model_async(self, data):\n self._tf_client.send_input(self._tf_session, data)\n\n def query_model_join(self):\n return self._tf_client.receive_output(self._tf_session)\n\n @classmethod\n def config_from_workers(cls, workers):\n if len(workers) != 3:\n raise ValueError(\"Expected three workers but {} were given\".format(len(workers)))\n\n player_to_worker_mapping = OrderedDict()\n player_to_worker_mapping[\"server0\"] = workers[0]\n player_to_worker_mapping[\"server1\"] = workers[1]\n player_to_worker_mapping[\"server2\"] = workers[2]\n\n use_local_config = all(worker.host is None for worker in workers)\n if use_local_config:\n config = tfe.LocalConfig(\n player_names=player_to_worker_mapping.keys(), auto_add_unknown_players=False\n )\n return config, player_to_worker_mapping\n\n # use tfe.RemoteConfig\n hostmap = OrderedDict(\n [(player_name, worker.host) for player_name, worker in player_to_worker_mapping.items()]\n )\n config = tfe.RemoteConfig(hostmap)\n return config, player_to_worker_mapping\n", "path": "syft/workers/tfe.py"}]} | 1,829 | 443 |
gh_patches_debug_15306 | rasdani/github-patches | git_diff | great-expectations__great_expectations-2531 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py]
1 from typing import Optional
2
3 from great_expectations.core import ExpectationConfiguration
4 from great_expectations.execution_engine import (
5 ExecutionEngine,
6 PandasExecutionEngine,
7 SparkDFExecutionEngine,
8 )
9 from great_expectations.execution_engine.sqlalchemy_execution_engine import (
10 SqlAlchemyExecutionEngine,
11 )
12 from great_expectations.expectations.metrics.column_aggregate_metric import (
13 ColumnMetricProvider,
14 column_aggregate_partial,
15 column_aggregate_value,
16 )
17 from great_expectations.expectations.metrics.column_aggregate_metric import sa as sa
18 from great_expectations.expectations.metrics.metric_provider import metric_value
19 from great_expectations.validator.validation_graph import MetricConfiguration
20
21
22 def unique_proportion(_metrics):
23 total_values = _metrics.get("table.row_count")
24 unique_values = _metrics.get("column.distinct_values.count")
25 null_count = _metrics.get("column_values.nonnull.unexpected_count")
26
27 if total_values > 0:
28 return unique_values / (total_values - null_count)
29 else:
30 return 0
31
32
33 class ColumnUniqueProportion(ColumnMetricProvider):
34 metric_name = "column.unique_proportion"
35
36 @metric_value(engine=PandasExecutionEngine)
37 def _pandas(*args, metrics, **kwargs):
38 return unique_proportion(metrics)
39
40 @metric_value(engine=SqlAlchemyExecutionEngine)
41 def _sqlalchemy(*args, metrics, **kwargs):
42 return unique_proportion(metrics)
43
44 @metric_value(engine=SparkDFExecutionEngine)
45 def _spark(*args, metrics, **kwargs):
46 return unique_proportion(metrics)
47
48 @classmethod
49 def _get_evaluation_dependencies(
50 cls,
51 metric: MetricConfiguration,
52 configuration: Optional[ExpectationConfiguration] = None,
53 execution_engine: Optional[ExecutionEngine] = None,
54 runtime_configuration: Optional[dict] = None,
55 ):
56 table_domain_kwargs = {
57 k: v for k, v in metric.metric_domain_kwargs.items() if k != "column"
58 }
59 return {
60 "column.distinct_values.count": MetricConfiguration(
61 "column.distinct_values.count", metric.metric_domain_kwargs
62 ),
63 "table.row_count": MetricConfiguration(
64 "table.row_count", table_domain_kwargs
65 ),
66 "column_values.nonnull.unexpected_count": MetricConfiguration(
67 "column_values.nonnull.unexpected_count", metric.metric_domain_kwargs
68 ),
69 }
70
[end of great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py
--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py
+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py
@@ -20,11 +20,13 @@
def unique_proportion(_metrics):
+ """Computes the proportion of unique non-null values out of all non-null values"""
total_values = _metrics.get("table.row_count")
unique_values = _metrics.get("column.distinct_values.count")
null_count = _metrics.get("column_values.nonnull.unexpected_count")
- if total_values > 0:
+ # Ensuring that we do not divide by 0, returning 0 if all values are nulls (we only consider non-nulls unique values)
+ if total_values > 0 and total_values != null_count:
return unique_values / (total_values - null_count)
else:
return 0
| {"golden_diff": "diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py\n--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py\n+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py\n@@ -20,11 +20,13 @@\n \n \n def unique_proportion(_metrics):\n+ \"\"\"Computes the proportion of unique non-null values out of all non-null values\"\"\"\n total_values = _metrics.get(\"table.row_count\")\n unique_values = _metrics.get(\"column.distinct_values.count\")\n null_count = _metrics.get(\"column_values.nonnull.unexpected_count\")\n \n- if total_values > 0:\n+ # Ensuring that we do not divide by 0, returning 0 if all values are nulls (we only consider non-nulls unique values)\n+ if total_values > 0 and total_values != null_count:\n return unique_values / (total_values - null_count)\n else:\n return 0\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from typing import Optional\n\nfrom great_expectations.core import ExpectationConfiguration\nfrom great_expectations.execution_engine import (\n ExecutionEngine,\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n)\nfrom great_expectations.execution_engine.sqlalchemy_execution_engine import (\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric import (\n ColumnMetricProvider,\n column_aggregate_partial,\n column_aggregate_value,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric import sa as sa\nfrom great_expectations.expectations.metrics.metric_provider import metric_value\nfrom great_expectations.validator.validation_graph import MetricConfiguration\n\n\ndef unique_proportion(_metrics):\n total_values = _metrics.get(\"table.row_count\")\n unique_values = _metrics.get(\"column.distinct_values.count\")\n null_count = _metrics.get(\"column_values.nonnull.unexpected_count\")\n\n if total_values > 0:\n return unique_values / (total_values - null_count)\n else:\n return 0\n\n\nclass ColumnUniqueProportion(ColumnMetricProvider):\n metric_name = \"column.unique_proportion\"\n\n @metric_value(engine=PandasExecutionEngine)\n def _pandas(*args, metrics, **kwargs):\n return unique_proportion(metrics)\n\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(*args, metrics, **kwargs):\n return unique_proportion(metrics)\n\n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(*args, metrics, **kwargs):\n return unique_proportion(metrics)\n\n @classmethod\n def _get_evaluation_dependencies(\n cls,\n metric: MetricConfiguration,\n configuration: Optional[ExpectationConfiguration] = None,\n execution_engine: Optional[ExecutionEngine] = None,\n runtime_configuration: Optional[dict] = None,\n ):\n table_domain_kwargs = {\n k: v for k, v in metric.metric_domain_kwargs.items() if k != \"column\"\n }\n return {\n \"column.distinct_values.count\": MetricConfiguration(\n \"column.distinct_values.count\", metric.metric_domain_kwargs\n ),\n \"table.row_count\": MetricConfiguration(\n \"table.row_count\", table_domain_kwargs\n ),\n \"column_values.nonnull.unexpected_count\": MetricConfiguration(\n \"column_values.nonnull.unexpected_count\", metric.metric_domain_kwargs\n ),\n }\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py"}]} | 1,221 | 254 |
gh_patches_debug_6408 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-5228 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py]
1 #!/usr/bin/env python
2 # -*- encoding: utf-8 -*-
3
4 """
5 Initialize new tokenizer for continual pre-training
6 """
7
8 import argparse
9 import os
10 import json
11 from typing import List, Union
12
13 from transformers.models.llama.tokenization_llama import LlamaTokenizer
14 from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model
15
16 from colossalai.logging import get_dist_logger
17
18 os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
19
20 logger = get_dist_logger()
21
22
23 def expand_vocab_tokenizer(
24 source_tokenizer_dir: Union[str, os.PathLike], target_tokenizer_dir: Union[str, os.PathLike], new_tokens: List[str]
25 ) -> None:
26 """Expand tokenizer for continue pre-training."""
27 if os.path.exists(target_tokenizer_dir):
28 raise RuntimeError(f"Find existed directory {target_tokenizer_dir}")
29
30 source_tokenizer = LlamaTokenizer.from_pretrained(source_tokenizer_dir)
31 logger.info(source_tokenizer)
32 source_sp_processor = source_tokenizer.sp_model
33 source_spm = sp_pb2_model.ModelProto()
34 source_spm.ParseFromString(source_sp_processor.serialized_model_proto())
35
36 logger.info(f"Source tokenizer size: {len(source_sp_processor)}")
37
38 # Add new tokens to source tokenizer.
39 source_spm_tokens = set([p.piece for p in source_spm.pieces])
40 for piece in new_tokens:
41 assert isinstance(piece, str), f"Invalid token({piece}) type {type(piece)}"
42 if piece in source_spm_tokens:
43 # Skip existed token.
44 continue
45 new_p = sp_pb2_model.ModelProto().SentencePiece()
46 new_p.piece = piece
47 new_p.score = 0
48 source_spm.pieces.append(new_p)
49 logger.info(f"Expand vocab from {len(source_spm_tokens)} to {len(source_spm.pieces)}")
50
51 # Save
52 os.makedirs(target_tokenizer_dir)
53 target_tokenizer_model_path = os.path.join(target_tokenizer_dir, "tokenizer.model")
54 with open(file=target_tokenizer_model_path, mode="wb") as fp:
55 fp.write(source_spm.SerializeToString())
56
57 target_tokenizer = LlamaTokenizer(vocab_file=target_tokenizer_model_path)
58 target_tokenizer.save_pretrained(save_directory=target_tokenizer_dir)
59 logger.info(f"Successfully save expand tokenizer to {target_tokenizer_dir}")
60
61
62 def main():
63 parser = argparse.ArgumentParser()
64 parser.add_argument(
65 "--source_tokenizer_dir", type=str, required=True, default=None, help="Source tokenizer directory"
66 )
67 parser.add_argument(
68 "--target_tokenizer_dir", type=str, required=True, default=None, help="Target tokenizer directory"
69 )
70 parser.add_argument(
71 "--expand_tokens_file",
72 type=str,
73 required=True,
74 default=None,
75 help="Path of the file containing tokens to be extended",
76 )
77 args = parser.parse_args()
78
79 expand_tokens = []
80 with open(file=args.expand_tokens_file, mode="r", encoding="utf-8") as fp_reader:
81 for line in fp_reader:
82 item = json.loads(line)
83 # e.g., {"piece": "你好"}
84 token = item["piece"]
85 if token in expand_tokens:
86 continue
87 expand_tokens.append(token)
88 expand_tokens.sort(key=lambda t: len(t), reverse=False)
89
90 expand_vocab_tokenizer(
91 source_tokenizer_dir=args.source_tokenizer_dir,
92 target_tokenizer_dir=args.target_tokenizer_dir,
93 new_tokens=expand_tokens,
94 )
95
96
97 if __name__ == "__main__":
98 main()
99
[end of applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py b/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py
--- a/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py
+++ b/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py
@@ -6,12 +6,12 @@
"""
import argparse
-import os
import json
+import os
from typing import List, Union
-from transformers.models.llama.tokenization_llama import LlamaTokenizer
from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model
+from transformers.models.llama.tokenization_llama import LlamaTokenizer
from colossalai.logging import get_dist_logger
| {"golden_diff": "diff --git a/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py b/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py\n--- a/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py\n+++ b/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py\n@@ -6,12 +6,12 @@\n \"\"\"\n \n import argparse\n-import os\n import json\n+import os\n from typing import List, Union\n \n-from transformers.models.llama.tokenization_llama import LlamaTokenizer\n from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model\n+from transformers.models.llama.tokenization_llama import LlamaTokenizer\n \n from colossalai.logging import get_dist_logger\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n\"\"\"\nInitialize new tokenizer for continual pre-training\n\"\"\"\n\nimport argparse\nimport os\nimport json\nfrom typing import List, Union\n\nfrom transformers.models.llama.tokenization_llama import LlamaTokenizer\nfrom sentencepiece import sentencepiece_model_pb2 as sp_pb2_model\n\nfrom colossalai.logging import get_dist_logger\n\nos.environ[\"PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION\"] = \"python\"\n\nlogger = get_dist_logger()\n\n\ndef expand_vocab_tokenizer(\n source_tokenizer_dir: Union[str, os.PathLike], target_tokenizer_dir: Union[str, os.PathLike], new_tokens: List[str]\n) -> None:\n \"\"\"Expand tokenizer for continue pre-training.\"\"\"\n if os.path.exists(target_tokenizer_dir):\n raise RuntimeError(f\"Find existed directory {target_tokenizer_dir}\")\n\n source_tokenizer = LlamaTokenizer.from_pretrained(source_tokenizer_dir)\n logger.info(source_tokenizer)\n source_sp_processor = source_tokenizer.sp_model\n source_spm = sp_pb2_model.ModelProto()\n source_spm.ParseFromString(source_sp_processor.serialized_model_proto())\n\n logger.info(f\"Source tokenizer size: {len(source_sp_processor)}\")\n\n # Add new tokens to source tokenizer.\n source_spm_tokens = set([p.piece for p in source_spm.pieces])\n for piece in new_tokens:\n assert isinstance(piece, str), f\"Invalid token({piece}) type {type(piece)}\"\n if piece in source_spm_tokens:\n # Skip existed token.\n continue\n new_p = sp_pb2_model.ModelProto().SentencePiece()\n new_p.piece = piece\n new_p.score = 0\n source_spm.pieces.append(new_p)\n logger.info(f\"Expand vocab from {len(source_spm_tokens)} to {len(source_spm.pieces)}\")\n\n # Save\n os.makedirs(target_tokenizer_dir)\n target_tokenizer_model_path = os.path.join(target_tokenizer_dir, \"tokenizer.model\")\n with open(file=target_tokenizer_model_path, mode=\"wb\") as fp:\n fp.write(source_spm.SerializeToString())\n\n target_tokenizer = LlamaTokenizer(vocab_file=target_tokenizer_model_path)\n target_tokenizer.save_pretrained(save_directory=target_tokenizer_dir)\n logger.info(f\"Successfully save expand tokenizer to {target_tokenizer_dir}\")\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--source_tokenizer_dir\", type=str, required=True, default=None, help=\"Source tokenizer directory\"\n )\n parser.add_argument(\n \"--target_tokenizer_dir\", type=str, required=True, default=None, help=\"Target tokenizer directory\"\n )\n parser.add_argument(\n \"--expand_tokens_file\",\n type=str,\n required=True,\n default=None,\n help=\"Path of the file containing tokens to be extended\",\n )\n args = parser.parse_args()\n\n expand_tokens = []\n with open(file=args.expand_tokens_file, mode=\"r\", encoding=\"utf-8\") as fp_reader:\n for line in fp_reader:\n item = json.loads(line)\n # e.g., {\"piece\": \"\u4f60\u597d\"}\n token = item[\"piece\"]\n if token in expand_tokens:\n continue\n expand_tokens.append(token)\n expand_tokens.sort(key=lambda t: len(t), reverse=False)\n\n expand_vocab_tokenizer(\n source_tokenizer_dir=args.source_tokenizer_dir,\n target_tokenizer_dir=args.target_tokenizer_dir,\n new_tokens=expand_tokens,\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py"}]} | 1,543 | 192 |
gh_patches_debug_7274 | rasdani/github-patches | git_diff | cupy__cupy-186 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make cupy.sort support arrays with rank two or more.
# Background
Arrays sorted with `cupy.sort` operation have some properties such as dtype, rank, sorting axis and C/F-contiguousness. Currently, `cupy.sort` supports sorting arrays only with the rank of one because of its implementation reason, see #55.
# Problem
This issue addresses a problem that makes `cupy.sort` support sorting arrays with the rank of two or more, with the last axis and C-contiguousness.
# Approach
**Rank two**
For an array with the rank of two,
```
[[4, 3]
[2, 1]]
```
treating the array as flattened one, `[4, 3, 2 ,1]`, and providing the following comparator in pseudo code to underlying Thrust library:
```
if floor(i / 2) < floor(j / 2) then return true;
else if floor(i / 2) > floor(j / 2) then return false;
else return data[i] < data[j];
```
where `i` and `j` are array indices, and `data[i]` represents `i` th element of array `data`,
we get the C-contiguous array sorted with the last axis.
```
[[3, 4]
[1, 2]]
```
**Rank N**
Generalized to the rank of N with shape `(d_0, d_1, ..., d_n-1)`, the following comparator works:
```
if floor(i / d_n-1) < floor(j / d_n-1) then return true;
else if floor(i / d_n-1) > floor(j / d_n-1) then return false;
else return data[i] < data[j];
```
</issue>
<code>
[start of cupy/sorting/sort.py]
1 import cupy
2 import numpy
3
4 if cupy.cuda.thrust_enabled:
5 from cupy.cuda import thrust
6
7
8 def sort(a):
9 """Returns a sorted copy of an array with a stable sorting algorithm.
10
11 Args:
12 a (cupy.ndarray): Array to be sorted.
13
14 Returns:
15 cupy.ndarray: Array of the same type and shape as ``a``.
16
17 .. note::
18 For its implementation reason, ``cupy.sort`` currently supports only
19 arrays with their rank of one and does not support ``axis``, ``kind``
20 and ``order`` parameters that ``numpy.sort`` does support.
21
22 .. seealso:: :func:`numpy.sort`
23
24 """
25 ret = a.copy()
26 ret.sort()
27 return ret
28
29
30 def lexsort(keys):
31 """Perform an indirect sort using an array of keys.
32
33 Args:
34 keys (cupy.ndarray): ``(k, N)`` array containing ``k`` ``(N,)``-shaped
35 arrays. The ``k`` different "rows" to be sorted. The last row is
36 the primary sort key.
37
38 Returns:
39 cupy.ndarray: Array of indices that sort the keys.
40
41 .. note::
42 For its implementation reason, ``cupy.lexsort`` currently supports only
43 keys with their rank of one or two and does not support ``axis``
44 parameter that ``numpy.lexsort`` supports.
45
46 .. seealso:: :func:`numpy.lexsort`
47
48 """
49
50 # TODO(takagi): Support axis argument.
51
52 if not cupy.cuda.thrust_enabled:
53 raise RuntimeError('Thrust is needed to use cupy.lexsort. Please '
54 'install CUDA Toolkit with Thrust then reinstall '
55 'CuPy after uninstalling it.')
56
57 if keys.ndim == ():
58 # as numpy.lexsort() raises
59 raise TypeError('need sequence of keys with len > 0 in lexsort')
60
61 if keys.ndim == 1:
62 return 0
63
64 # TODO(takagi): Support ranks of three or more.
65 if keys.ndim > 2:
66 raise NotImplementedError('Keys with the rank of three or more is not '
67 'supported in lexsort')
68
69 idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp)
70 k = keys._shape[0]
71 n = keys._shape[1]
72 thrust.lexsort(keys.dtype, idx_array.data.ptr, keys.data.ptr, k, n)
73
74 return idx_array
75
76
77 def argsort(a):
78 """Return the indices that would sort an array with a stable sorting.
79
80 Args:
81 a (cupy.ndarray): Array to sort.
82
83 Returns:
84 cupy.ndarray: Array of indices that sort ``a``.
85
86 .. note::
87 For its implementation reason, ``cupy.argsort`` currently supports only
88 arrays with their rank of one and does not support ``axis``, ``kind``
89 and ``order`` parameters that ``numpy.argsort`` supports.
90
91 .. seealso:: :func:`numpy.argsort`
92
93 """
94 return a.argsort()
95
96
97 # TODO(okuta): Implement msort
98
99
100 # TODO(okuta): Implement sort_complex
101
102
103 # TODO(okuta): Implement partition
104
105
106 # TODO(okuta): Implement argpartition
107
[end of cupy/sorting/sort.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/sorting/sort.py b/cupy/sorting/sort.py
--- a/cupy/sorting/sort.py
+++ b/cupy/sorting/sort.py
@@ -15,9 +15,9 @@
cupy.ndarray: Array of the same type and shape as ``a``.
.. note::
- For its implementation reason, ``cupy.sort`` currently supports only
- arrays with their rank of one and does not support ``axis``, ``kind``
- and ``order`` parameters that ``numpy.sort`` does support.
+ For its implementation reason, ``cupy.sort`` currently does not support
+ ``axis``, ``kind`` and ``order`` parameters that ``numpy.sort`` does
+ support.
.. seealso:: :func:`numpy.sort`
| {"golden_diff": "diff --git a/cupy/sorting/sort.py b/cupy/sorting/sort.py\n--- a/cupy/sorting/sort.py\n+++ b/cupy/sorting/sort.py\n@@ -15,9 +15,9 @@\n cupy.ndarray: Array of the same type and shape as ``a``.\n \n .. note::\n- For its implementation reason, ``cupy.sort`` currently supports only\n- arrays with their rank of one and does not support ``axis``, ``kind``\n- and ``order`` parameters that ``numpy.sort`` does support.\n+ For its implementation reason, ``cupy.sort`` currently does not support\n+ ``axis``, ``kind`` and ``order`` parameters that ``numpy.sort`` does\n+ support.\n \n .. seealso:: :func:`numpy.sort`\n", "issue": "Make cupy.sort support arrays with rank two or more.\n# Background\r\nArrays sorted with `cupy.sort` operation have some properties such as dtype, rank, sorting axis and C/F-contiguousness. Currently, `cupy.sort` supports sorting arrays only with the rank of one because of its implementation reason, see #55.\r\n\r\n# Problem\r\nThis issue addresses a problem that makes `cupy.sort` support sorting arrays with the rank of two or more, with the last axis and C-contiguousness.\r\n\r\n# Approach\r\n\r\n**Rank two**\r\n\r\nFor an array with the rank of two, \r\n\r\n```\r\n[[4, 3]\r\n [2, 1]]\r\n```\r\n\r\ntreating the array as flattened one, `[4, 3, 2 ,1]`, and providing the following comparator in pseudo code to underlying Thrust library:\r\n\r\n```\r\nif floor(i / 2) < floor(j / 2) then return true;\r\nelse if floor(i / 2) > floor(j / 2) then return false;\r\nelse return data[i] < data[j];\r\n```\r\n\r\nwhere `i` and `j` are array indices, and `data[i]` represents `i` th element of array `data`,\r\n\r\nwe get the C-contiguous array sorted with the last axis.\r\n\r\n```\r\n[[3, 4]\r\n [1, 2]]\r\n```\r\n\r\n**Rank N**\r\n\r\nGeneralized to the rank of N with shape `(d_0, d_1, ..., d_n-1)`, the following comparator works:\r\n\r\n```\r\nif floor(i / d_n-1) < floor(j / d_n-1) then return true;\r\nelse if floor(i / d_n-1) > floor(j / d_n-1) then return false;\r\nelse return data[i] < data[j];\r\n```\r\n\n", "before_files": [{"content": "import cupy\nimport numpy\n\nif cupy.cuda.thrust_enabled:\n from cupy.cuda import thrust\n\n\ndef sort(a):\n \"\"\"Returns a sorted copy of an array with a stable sorting algorithm.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. note::\n For its implementation reason, ``cupy.sort`` currently supports only\n arrays with their rank of one and does not support ``axis``, ``kind``\n and ``order`` parameters that ``numpy.sort`` does support.\n\n .. seealso:: :func:`numpy.sort`\n\n \"\"\"\n ret = a.copy()\n ret.sort()\n return ret\n\n\ndef lexsort(keys):\n \"\"\"Perform an indirect sort using an array of keys.\n\n Args:\n keys (cupy.ndarray): ``(k, N)`` array containing ``k`` ``(N,)``-shaped\n arrays. The ``k`` different \"rows\" to be sorted. The last row is\n the primary sort key.\n\n Returns:\n cupy.ndarray: Array of indices that sort the keys.\n\n .. note::\n For its implementation reason, ``cupy.lexsort`` currently supports only\n keys with their rank of one or two and does not support ``axis``\n parameter that ``numpy.lexsort`` supports.\n\n .. seealso:: :func:`numpy.lexsort`\n\n \"\"\"\n\n # TODO(takagi): Support axis argument.\n\n if not cupy.cuda.thrust_enabled:\n raise RuntimeError('Thrust is needed to use cupy.lexsort. Please '\n 'install CUDA Toolkit with Thrust then reinstall '\n 'CuPy after uninstalling it.')\n\n if keys.ndim == ():\n # as numpy.lexsort() raises\n raise TypeError('need sequence of keys with len > 0 in lexsort')\n\n if keys.ndim == 1:\n return 0\n\n # TODO(takagi): Support ranks of three or more.\n if keys.ndim > 2:\n raise NotImplementedError('Keys with the rank of three or more is not '\n 'supported in lexsort')\n\n idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp)\n k = keys._shape[0]\n n = keys._shape[1]\n thrust.lexsort(keys.dtype, idx_array.data.ptr, keys.data.ptr, k, n)\n\n return idx_array\n\n\ndef argsort(a):\n \"\"\"Return the indices that would sort an array with a stable sorting.\n\n Args:\n a (cupy.ndarray): Array to sort.\n\n Returns:\n cupy.ndarray: Array of indices that sort ``a``.\n\n .. note::\n For its implementation reason, ``cupy.argsort`` currently supports only\n arrays with their rank of one and does not support ``axis``, ``kind``\n and ``order`` parameters that ``numpy.argsort`` supports.\n\n .. seealso:: :func:`numpy.argsort`\n\n \"\"\"\n return a.argsort()\n\n\n# TODO(okuta): Implement msort\n\n\n# TODO(okuta): Implement sort_complex\n\n\n# TODO(okuta): Implement partition\n\n\n# TODO(okuta): Implement argpartition\n", "path": "cupy/sorting/sort.py"}]} | 1,831 | 177 |
gh_patches_debug_14378 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1649 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pearson Correlation Coefficient raises error when 2D tensor but single task
## 🐛 Bug
I have a regression based modelling repository where the predictions can be multi-output or single-output based on configuration. My network outputs `[n_samples, n_tasks]` where `n_task` varies according to the task. If `n_task` is 1 then trying, `torchmetrics.functional.pearson_corrcoef(predictions, targets)` gives the error,
```bash
ValueError: Expected argument `num_outputs` to match the second dimension of input, but got 1 and 1
```
Changing the output shape for a single task specifically just so as to fit the metric function does not seem like a good solution. I think a simple change should be able to fix it.
My current workout around,
```python
import torchmetrics.functional as Fm
# predictions are [n, 1] for single task/output
Fm.pearson_corrcoef(predictions, targets) if predictions.shape[1] > 1 else Fm.pearson_corrcoef(predictions[:, 0], targets[:, 0])
```
There are other metrics that handle this,
```python
metrics = {
"mse": Fm.mean_squared_error(predictions, targets, squared=True),
"rmse": Fm.mean_squared_error(predictions, targets, squared=False),
"mae": Fm.mean_absolute_error(predictions, targets),
"r2": Fm.r2_score(predictions, targets, multioutput="raw_values"),
"mape": Fm.mean_absolute_percentage_error(predictions, targets),
# TODO: Raise issue on torchmetrics
"pcc": (
Fm.pearson_corrcoef(predictions, targets) if predictions.shape[1] > 1 else
Fm.pearson_corrcoef(predictions[:, 0], targets[:, 0])
),
}
```
<!-- A clear and concise description of what the bug is. -->
### To Reproduce
Steps to reproduce the behavior...
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
<details>
<summary>Code sample</summary>
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
</details>
### Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
### Environment
- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source):
- Python & PyTorch Version (e.g., 1.0):
- Any other relevant information such as OS (e.g., Linux):
### Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of src/torchmetrics/functional/regression/utils.py]
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from torch import Tensor
15
16
17 def _check_data_shape_to_num_outputs(preds: Tensor, target: Tensor, num_outputs: int) -> None:
18 """Check that predictions and target have the correct shape, else raise error."""
19 if preds.ndim > 2 or target.ndim > 2:
20 raise ValueError(
21 f"Expected both predictions and target to be either 1- or 2-dimensional tensors,"
22 f" but got {target.ndim} and {preds.ndim}."
23 )
24 if (num_outputs == 1 and preds.ndim != 1) or (num_outputs > 1 and num_outputs != preds.shape[1]):
25 raise ValueError(
26 f"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}"
27 f" and {preds.shape[1]}."
28 )
29
[end of src/torchmetrics/functional/regression/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/torchmetrics/functional/regression/utils.py b/src/torchmetrics/functional/regression/utils.py
--- a/src/torchmetrics/functional/regression/utils.py
+++ b/src/torchmetrics/functional/regression/utils.py
@@ -21,7 +21,9 @@
f"Expected both predictions and target to be either 1- or 2-dimensional tensors,"
f" but got {target.ndim} and {preds.ndim}."
)
- if (num_outputs == 1 and preds.ndim != 1) or (num_outputs > 1 and num_outputs != preds.shape[1]):
+ cond1 = num_outputs == 1 and not (preds.ndim == 1 or preds.shape[1] == 1)
+ cond2 = num_outputs > 1 and num_outputs != preds.shape[1]
+ if cond1 or cond2:
raise ValueError(
f"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}"
f" and {preds.shape[1]}."
| {"golden_diff": "diff --git a/src/torchmetrics/functional/regression/utils.py b/src/torchmetrics/functional/regression/utils.py\n--- a/src/torchmetrics/functional/regression/utils.py\n+++ b/src/torchmetrics/functional/regression/utils.py\n@@ -21,7 +21,9 @@\n f\"Expected both predictions and target to be either 1- or 2-dimensional tensors,\"\n f\" but got {target.ndim} and {preds.ndim}.\"\n )\n- if (num_outputs == 1 and preds.ndim != 1) or (num_outputs > 1 and num_outputs != preds.shape[1]):\n+ cond1 = num_outputs == 1 and not (preds.ndim == 1 or preds.shape[1] == 1)\n+ cond2 = num_outputs > 1 and num_outputs != preds.shape[1]\n+ if cond1 or cond2:\n raise ValueError(\n f\"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}\"\n f\" and {preds.shape[1]}.\"\n", "issue": "Pearson Correlation Coefficient raises error when 2D tensor but single task\n## \ud83d\udc1b Bug\r\n\r\nI have a regression based modelling repository where the predictions can be multi-output or single-output based on configuration. My network outputs `[n_samples, n_tasks]` where `n_task` varies according to the task. If `n_task` is 1 then trying, `torchmetrics.functional.pearson_corrcoef(predictions, targets)` gives the error,\r\n\r\n```bash\r\nValueError: Expected argument `num_outputs` to match the second dimension of input, but got 1 and 1\r\n```\r\n\r\nChanging the output shape for a single task specifically just so as to fit the metric function does not seem like a good solution. I think a simple change should be able to fix it.\r\nMy current workout around,\r\n```python\r\nimport torchmetrics.functional as Fm\r\n\r\n# predictions are [n, 1] for single task/output\r\nFm.pearson_corrcoef(predictions, targets) if predictions.shape[1] > 1 else Fm.pearson_corrcoef(predictions[:, 0], targets[:, 0])\r\n```\r\n\r\nThere are other metrics that handle this,\r\n```python\r\n metrics = {\r\n \"mse\": Fm.mean_squared_error(predictions, targets, squared=True),\r\n \"rmse\": Fm.mean_squared_error(predictions, targets, squared=False),\r\n \"mae\": Fm.mean_absolute_error(predictions, targets),\r\n \"r2\": Fm.r2_score(predictions, targets, multioutput=\"raw_values\"),\r\n \"mape\": Fm.mean_absolute_percentage_error(predictions, targets),\r\n # TODO: Raise issue on torchmetrics\r\n \"pcc\": (\r\n Fm.pearson_corrcoef(predictions, targets) if predictions.shape[1] > 1 else\r\n Fm.pearson_corrcoef(predictions[:, 0], targets[:, 0])\r\n ),\r\n }\r\n```\r\n\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior...\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n<details>\r\n <summary>Code sample</summary>\r\n\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n</details>\r\n\r\n### Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n\r\n- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source):\r\n- Python & PyTorch Version (e.g., 1.0):\r\n- Any other relevant information such as OS (e.g., Linux):\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom torch import Tensor\n\n\ndef _check_data_shape_to_num_outputs(preds: Tensor, target: Tensor, num_outputs: int) -> None:\n \"\"\"Check that predictions and target have the correct shape, else raise error.\"\"\"\n if preds.ndim > 2 or target.ndim > 2:\n raise ValueError(\n f\"Expected both predictions and target to be either 1- or 2-dimensional tensors,\"\n f\" but got {target.ndim} and {preds.ndim}.\"\n )\n if (num_outputs == 1 and preds.ndim != 1) or (num_outputs > 1 and num_outputs != preds.shape[1]):\n raise ValueError(\n f\"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}\"\n f\" and {preds.shape[1]}.\"\n )\n", "path": "src/torchmetrics/functional/regression/utils.py"}]} | 1,470 | 232 |
gh_patches_debug_27807 | rasdani/github-patches | git_diff | nilearn__nilearn-2214 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python 3.5 deprecation FutureWarning in Nilearn 0.6.0
Python 3.5 will be EOL'd in September 2020. I will add a FutureWarning before release of Nilearn 0.6.0 stable, and we can drop support for it for Nilearn 0.8.0 stable.
@GaelVaroquaux
</issue>
<code>
[start of nilearn/__init__.py]
1 """
2 Machine Learning module for NeuroImaging in python
3 --------------------------------------------------
4
5 Documentation is available in the docstrings and online at
6 http://nilearn.github.io.
7
8 Contents
9 --------
10 Nilearn aims at simplifying the use of the scikit-learn package in the context of
11 neuroimaging. It provides specific input/output functions, algorithms and
12 visualization tools.
13
14 Submodules
15 ---------
16 datasets --- Utilities to download NeuroImaging datasets
17 decoding --- Decoding tools and algorithms
18 decomposition --- Includes a subject level variant of the ICA
19 algorithm called Canonical ICA
20 connectome --- Set of tools for computing functional connectivity matrices
21 and for sparse multi-subjects learning of Gaussian graphical models
22 image --- Set of functions defining mathematical operations
23 working on Niimg-like objects
24 input_data --- includes scikit-learn tranformers and tools to
25 preprocess neuro-imaging data
26 masking --- Utilities to compute and operate on brain masks
27 mass_univariate --- Defines a Massively Univariate Linear Model
28 estimated with OLS and permutation test
29 plotting --- Plotting code for nilearn
30 region --- Set of functions for extracting region-defined
31 signals, clustering methods, connected regions extraction
32 signal --- Set of preprocessing functions for time series
33 """
34
35 import gzip
36 import sys
37 import warnings
38 import os
39
40 from distutils.version import LooseVersion
41
42 from .version import _check_module_dependencies, __version__
43
44 # Workaround issue discovered in intel-openmp 2019.5:
45 # https://github.com/ContinuumIO/anaconda-issues/issues/11294
46 #
47 # see also https://github.com/scikit-learn/scikit-learn/pull/15020
48 os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
49
50 def _py2_deprecation_warning():
51 py2_warning = ('Python2 support is deprecated and will be removed in '
52 'the next release. Consider switching to Python 3.6 or 3.7.'
53 )
54 warnings.filterwarnings('once', message=py2_warning)
55 warnings.warn(message=py2_warning,
56 category=DeprecationWarning,
57 stacklevel=3,
58 )
59
60 def _py34_deprecation_warning():
61 py34_warning = ('Python 3.4 support is deprecated and will be removed in '
62 'the next release. Consider switching to Python 3.6 or 3.7.'
63 )
64 warnings.filterwarnings('once', message=py34_warning)
65 warnings.warn(message=py34_warning,
66 category=DeprecationWarning,
67 stacklevel=3,
68 )
69
70
71 def _python_deprecation_warnings():
72 if sys.version_info.major == 2:
73 _py2_deprecation_warning()
74 elif sys.version_info.major == 3 and sys.version_info.minor == 4:
75 _py34_deprecation_warning()
76
77
78 _check_module_dependencies()
79 _python_deprecation_warnings()
80
81 # Temporary work around to address formatting issues in doc tests
82 # with NumPy 1.14. NumPy had made more consistent str/repr formatting
83 # of numpy arrays. Hence we print the options to old versions.
84 import numpy as np
85 if LooseVersion(np.__version__) >= LooseVersion("1.14"):
86 # See issue #1600 in nilearn for reason to add try and except
87 try:
88 from ._utils.testing import is_nose_running
89 if is_nose_running():
90 np.set_printoptions(legacy='1.13')
91 except ImportError:
92 pass
93
94 # Monkey-patch gzip to have faster reads on large gzip files
95 if hasattr(gzip.GzipFile, 'max_read_chunk'):
96 gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb
97
98 # Boolean controlling the default globbing technique when using check_niimg
99 # and the os.path.expanduser usage in CacheMixin.
100 # Default value it True, set it to False to completely deactivate this
101 # behavior.
102 EXPAND_PATH_WILDCARDS = True
103
104 # Boolean controlling whether the joblib caches should be
105 # flushed if the version of certain modules changes (eg nibabel, as it
106 # does not respect the backward compatibility in some of its internal
107 # structures
108 # This is used in nilearn._utils.cache_mixin
109 CHECK_CACHE_VERSION = True
110
111 # list all submodules available in nilearn and version
112 __all__ = ['datasets', 'decoding', 'decomposition', 'connectome',
113 'image', 'input_data', 'masking', 'mass_univariate', 'plotting',
114 'region', 'signal', 'surface', 'parcellations', '__version__']
115
116
[end of nilearn/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nilearn/__init__.py b/nilearn/__init__.py
--- a/nilearn/__init__.py
+++ b/nilearn/__init__.py
@@ -47,32 +47,21 @@
# see also https://github.com/scikit-learn/scikit-learn/pull/15020
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
-def _py2_deprecation_warning():
- py2_warning = ('Python2 support is deprecated and will be removed in '
- 'the next release. Consider switching to Python 3.6 or 3.7.'
- )
- warnings.filterwarnings('once', message=py2_warning)
- warnings.warn(message=py2_warning,
- category=DeprecationWarning,
- stacklevel=3,
- )
-def _py34_deprecation_warning():
- py34_warning = ('Python 3.4 support is deprecated and will be removed in '
- 'the next release. Consider switching to Python 3.6 or 3.7.'
- )
- warnings.filterwarnings('once', message=py34_warning)
- warnings.warn(message=py34_warning,
- category=DeprecationWarning,
+def _py35_deprecation_warning():
+ py35_warning = ('Python 3.5 support is deprecated and will be removed in '
+ 'a future release. Consider switching to Python 3.6 or 3.7'
+ )
+ warnings.filterwarnings('once', message=py35_warning)
+ warnings.warn(message=py35_warning,
+ category=FutureWarning,
stacklevel=3,
)
def _python_deprecation_warnings():
- if sys.version_info.major == 2:
- _py2_deprecation_warning()
- elif sys.version_info.major == 3 and sys.version_info.minor == 4:
- _py34_deprecation_warning()
+ if sys.version_info.major == 3 and sys.version_info.minor == 5:
+ _py35_deprecation_warning()
_check_module_dependencies()
| {"golden_diff": "diff --git a/nilearn/__init__.py b/nilearn/__init__.py\n--- a/nilearn/__init__.py\n+++ b/nilearn/__init__.py\n@@ -47,32 +47,21 @@\n # see also https://github.com/scikit-learn/scikit-learn/pull/15020\n os.environ.setdefault(\"KMP_INIT_AT_FORK\", \"FALSE\")\n \n-def _py2_deprecation_warning():\n- py2_warning = ('Python2 support is deprecated and will be removed in '\n- 'the next release. Consider switching to Python 3.6 or 3.7.'\n- )\n- warnings.filterwarnings('once', message=py2_warning)\n- warnings.warn(message=py2_warning,\n- category=DeprecationWarning,\n- stacklevel=3,\n- )\n \n-def _py34_deprecation_warning():\n- py34_warning = ('Python 3.4 support is deprecated and will be removed in '\n- 'the next release. Consider switching to Python 3.6 or 3.7.'\n- )\n- warnings.filterwarnings('once', message=py34_warning)\n- warnings.warn(message=py34_warning,\n- category=DeprecationWarning,\n+def _py35_deprecation_warning():\n+ py35_warning = ('Python 3.5 support is deprecated and will be removed in '\n+ 'a future release. Consider switching to Python 3.6 or 3.7'\n+ )\n+ warnings.filterwarnings('once', message=py35_warning)\n+ warnings.warn(message=py35_warning,\n+ category=FutureWarning,\n stacklevel=3,\n )\n \n \n def _python_deprecation_warnings():\n- if sys.version_info.major == 2:\n- _py2_deprecation_warning()\n- elif sys.version_info.major == 3 and sys.version_info.minor == 4:\n- _py34_deprecation_warning()\n+ if sys.version_info.major == 3 and sys.version_info.minor == 5:\n+ _py35_deprecation_warning()\n \n \n _check_module_dependencies()\n", "issue": "Python 3.5 deprecation FutureWarning in Nilearn 0.6.0\nPython 3.5 will be EOL'd in September 2020. I will add a FutureWarning before release of Nilearn 0.6.0 stable, and we can drop support for it for Nilearn 0.8.0 stable.\r\n@GaelVaroquaux \n", "before_files": [{"content": "\"\"\"\nMachine Learning module for NeuroImaging in python\n--------------------------------------------------\n\nDocumentation is available in the docstrings and online at\nhttp://nilearn.github.io.\n\nContents\n--------\nNilearn aims at simplifying the use of the scikit-learn package in the context of\nneuroimaging. It provides specific input/output functions, algorithms and\nvisualization tools.\n\nSubmodules\n---------\ndatasets --- Utilities to download NeuroImaging datasets\ndecoding --- Decoding tools and algorithms\ndecomposition --- Includes a subject level variant of the ICA\n algorithm called Canonical ICA\nconnectome --- Set of tools for computing functional connectivity matrices\n and for sparse multi-subjects learning of Gaussian graphical models\nimage --- Set of functions defining mathematical operations\n working on Niimg-like objects\ninput_data --- includes scikit-learn tranformers and tools to\n preprocess neuro-imaging data\nmasking --- Utilities to compute and operate on brain masks\nmass_univariate --- Defines a Massively Univariate Linear Model\n estimated with OLS and permutation test\nplotting --- Plotting code for nilearn\nregion --- Set of functions for extracting region-defined\n signals, clustering methods, connected regions extraction\nsignal --- Set of preprocessing functions for time series\n\"\"\"\n\nimport gzip\nimport sys\nimport warnings\nimport os\n\nfrom distutils.version import LooseVersion\n\nfrom .version import _check_module_dependencies, __version__\n\n# Workaround issue discovered in intel-openmp 2019.5:\n# https://github.com/ContinuumIO/anaconda-issues/issues/11294\n#\n# see also https://github.com/scikit-learn/scikit-learn/pull/15020\nos.environ.setdefault(\"KMP_INIT_AT_FORK\", \"FALSE\")\n\ndef _py2_deprecation_warning():\n py2_warning = ('Python2 support is deprecated and will be removed in '\n 'the next release. Consider switching to Python 3.6 or 3.7.'\n )\n warnings.filterwarnings('once', message=py2_warning)\n warnings.warn(message=py2_warning,\n category=DeprecationWarning,\n stacklevel=3,\n )\n\ndef _py34_deprecation_warning():\n py34_warning = ('Python 3.4 support is deprecated and will be removed in '\n 'the next release. Consider switching to Python 3.6 or 3.7.'\n )\n warnings.filterwarnings('once', message=py34_warning)\n warnings.warn(message=py34_warning,\n category=DeprecationWarning,\n stacklevel=3,\n )\n\n\ndef _python_deprecation_warnings():\n if sys.version_info.major == 2:\n _py2_deprecation_warning()\n elif sys.version_info.major == 3 and sys.version_info.minor == 4:\n _py34_deprecation_warning()\n\n\n_check_module_dependencies()\n_python_deprecation_warnings()\n\n# Temporary work around to address formatting issues in doc tests\n# with NumPy 1.14. NumPy had made more consistent str/repr formatting\n# of numpy arrays. Hence we print the options to old versions.\nimport numpy as np\nif LooseVersion(np.__version__) >= LooseVersion(\"1.14\"):\n # See issue #1600 in nilearn for reason to add try and except\n try:\n from ._utils.testing import is_nose_running\n if is_nose_running():\n np.set_printoptions(legacy='1.13')\n except ImportError:\n pass\n\n# Monkey-patch gzip to have faster reads on large gzip files\nif hasattr(gzip.GzipFile, 'max_read_chunk'):\n gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb\n\n# Boolean controlling the default globbing technique when using check_niimg\n# and the os.path.expanduser usage in CacheMixin.\n# Default value it True, set it to False to completely deactivate this\n# behavior.\nEXPAND_PATH_WILDCARDS = True\n\n# Boolean controlling whether the joblib caches should be\n# flushed if the version of certain modules changes (eg nibabel, as it\n# does not respect the backward compatibility in some of its internal\n# structures\n# This is used in nilearn._utils.cache_mixin\nCHECK_CACHE_VERSION = True\n\n# list all submodules available in nilearn and version\n__all__ = ['datasets', 'decoding', 'decomposition', 'connectome',\n 'image', 'input_data', 'masking', 'mass_univariate', 'plotting',\n 'region', 'signal', 'surface', 'parcellations', '__version__']\n\n", "path": "nilearn/__init__.py"}]} | 1,878 | 469 |
gh_patches_debug_21874 | rasdani/github-patches | git_diff | streamlink__streamlink-3459 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No man page with pip install
### Checklist
- [ ] This is a bug report.
- [x] This is a feature request.
- [ ] This is a plugin (improvement) request.
- [x] I have read the contribution guidelines.
### Description
When installing streamlink with pip, no man page gets installed
### Expected / Actual behavior
a man page gets installed during installation of streamlink with pip
### Reproduction steps / Explicit stream URLs to test
1. ``pip install --user streamlink``
2. ``man streamlink``
3. ``No manual entry for streamlink``
4. I get the same results when using ``pip install streamlink``
### Logs
```
[cli][debug] OS: Linux-4.13.0-43-generic-x86_64-with-Ubuntu-17.10-artful
[cli][debug] Python: 3.6.3
[cli][debug] Streamlink: 0.12.1
[cli][debug] Requests(2.18.4), Socks(1.6.7), Websocket(0.47.0)
usage: streamlink [OPTIONS] <URL> [STREAM]
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import codecs
3 from os import environ, path
4 from sys import argv, path as sys_path
5
6 from setuptools import find_packages, setup
7
8 import versioneer
9
10
11 deps = [
12 "requests>=2.21.0,<3.0",
13 "isodate",
14 "websocket-client",
15 # Support for SOCKS proxies
16 "PySocks!=1.5.7,>=1.5.6",
17 ]
18
19 # for encrypted streams
20 if environ.get("STREAMLINK_USE_PYCRYPTO"):
21 deps.append("pycrypto")
22 else:
23 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6
24 deps.append("pycryptodome>=3.4.3,<4")
25
26 # for localization
27 if environ.get("STREAMLINK_USE_PYCOUNTRY"):
28 deps.append("pycountry")
29 else:
30 deps.append("iso-639")
31 deps.append("iso3166")
32
33 # When we build an egg for the Win32 bootstrap we don"t want dependency
34 # information built into it.
35 if environ.get("NO_DEPS"):
36 deps = []
37
38 this_directory = path.abspath(path.dirname(__file__))
39 srcdir = path.join(this_directory, "src/")
40 sys_path.insert(0, srcdir)
41
42 with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f:
43 long_description = f.read()
44
45
46 def is_wheel_for_windows():
47 if "bdist_wheel" in argv:
48 names = ["win32", "win-amd64", "cygwin"]
49 length = len(argv)
50 for pos in range(argv.index("bdist_wheel") + 1, length):
51 if argv[pos] == "--plat-name" and pos + 1 < length:
52 return argv[pos + 1] in names
53 elif argv[pos][:12] == "--plat-name=":
54 return argv[pos][12:] in names
55 return False
56
57
58 entry_points = {
59 "console_scripts": ["streamlink=streamlink_cli.main:main"]
60 }
61
62 if is_wheel_for_windows():
63 entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"]
64
65
66 setup(name="streamlink",
67 version=versioneer.get_version(),
68 cmdclass=versioneer.get_cmdclass(),
69 description="Streamlink is a command-line utility that extracts streams "
70 "from various services and pipes them into a video player of "
71 "choice.",
72 long_description=long_description,
73 long_description_content_type="text/markdown",
74 url="https://github.com/streamlink/streamlink",
75 project_urls={
76 "Documentation": "https://streamlink.github.io/",
77 "Tracker": "https://github.com/streamlink/streamlink/issues",
78 "Source": "https://github.com/streamlink/streamlink",
79 "Funding": "https://opencollective.com/streamlink"
80 },
81 author="Streamlink",
82 # temp until we have a mailing list / global email
83 author_email="[email protected]",
84 license="Simplified BSD",
85 packages=find_packages("src"),
86 package_dir={"": "src"},
87 entry_points=entry_points,
88 install_requires=deps,
89 test_suite="tests",
90 python_requires=">=3.6, <4",
91 classifiers=["Development Status :: 5 - Production/Stable",
92 "License :: OSI Approved :: BSD License",
93 "Environment :: Console",
94 "Intended Audience :: End Users/Desktop",
95 "Operating System :: POSIX",
96 "Operating System :: Microsoft :: Windows",
97 "Operating System :: MacOS",
98 "Programming Language :: Python :: 3",
99 "Programming Language :: Python :: 3 :: Only",
100 "Programming Language :: Python :: 3.6",
101 "Programming Language :: Python :: 3.7",
102 "Programming Language :: Python :: 3.8",
103 "Programming Language :: Python :: 3.9",
104 "Topic :: Internet :: WWW/HTTP",
105 "Topic :: Multimedia :: Sound/Audio",
106 "Topic :: Multimedia :: Video",
107 "Topic :: Utilities"])
108
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -8,6 +8,7 @@
import versioneer
+data_files = []
deps = [
"requests>=2.21.0,<3.0",
"isodate",
@@ -63,6 +64,19 @@
entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"]
+additional_files = [
+ ("share/man/man1", ["docs/_build/man/streamlink.1"])
+]
+
+for destdir, srcfiles in additional_files:
+ files = []
+ for srcfile in srcfiles:
+ if path.exists(srcfile):
+ files.append(srcfile)
+ if files:
+ data_files.append((destdir, files))
+
+
setup(name="streamlink",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
@@ -85,6 +99,7 @@
packages=find_packages("src"),
package_dir={"": "src"},
entry_points=entry_points,
+ data_files=data_files,
install_requires=deps,
test_suite="tests",
python_requires=">=3.6, <4",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -8,6 +8,7 @@\n import versioneer\n \n \n+data_files = []\n deps = [\n \"requests>=2.21.0,<3.0\",\n \"isodate\",\n@@ -63,6 +64,19 @@\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n \n \n+additional_files = [\n+ (\"share/man/man1\", [\"docs/_build/man/streamlink.1\"])\n+]\n+\n+for destdir, srcfiles in additional_files:\n+ files = []\n+ for srcfile in srcfiles:\n+ if path.exists(srcfile):\n+ files.append(srcfile)\n+ if files:\n+ data_files.append((destdir, files))\n+\n+\n setup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n@@ -85,6 +99,7 @@\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points=entry_points,\n+ data_files=data_files,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n", "issue": "No man page with pip install\n### Checklist\r\n\r\n- [ ] This is a bug report.\r\n- [x] This is a feature request.\r\n- [ ] This is a plugin (improvement) request.\r\n- [x] I have read the contribution guidelines.\r\n\r\n### Description\r\n\r\nWhen installing streamlink with pip, no man page gets installed\r\n\r\n### Expected / Actual behavior\r\n\r\na man page gets installed during installation of streamlink with pip\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n1. ``pip install --user streamlink``\r\n2. ``man streamlink``\r\n3. ``No manual entry for streamlink``\r\n4. I get the same results when using ``pip install streamlink``\r\n\r\n### Logs\r\n\r\n```\r\n[cli][debug] OS: Linux-4.13.0-43-generic-x86_64-with-Ubuntu-17.10-artful\r\n[cli][debug] Python: 3.6.3\r\n[cli][debug] Streamlink: 0.12.1\r\n[cli][debug] Requests(2.18.4), Socks(1.6.7), Websocket(0.47.0)\r\nusage: streamlink [OPTIONS] <URL> [STREAM]\r\n\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ, path\nfrom sys import argv, path as sys_path\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n\ndeps = [\n \"requests>=2.21.0,<3.0\",\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\n\ndef is_wheel_for_windows():\n if \"bdist_wheel\" in argv:\n names = [\"win32\", \"win-amd64\", \"cygwin\"]\n length = len(argv)\n for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n if argv[pos] == \"--plat-name\" and pos + 1 < length:\n return argv[pos + 1] in names\n elif argv[pos][:12] == \"--plat-name=\":\n return argv[pos][12:] in names\n return False\n\n\nentry_points = {\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n}\n\nif is_wheel_for_windows():\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is a command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points=entry_points,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]} | 1,905 | 273 |
gh_patches_debug_38759 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-2501 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update django-filter to 1.0
## Details
Sorry for deleting the issue template: This is about technical debt :) It may not be immediately critical, but the advice from the author of django-filter is that it's worth it.
django-filter 1.0 has changes that are backwards incompatible. The release notes are here:
http://django-filter.readthedocs.io/en/latest/migration.html
It means, amongst other this, that all where `Filter` object instances are iterated on, we have to [add the `.qs` method](http://django-filter.readthedocs.io/en/latest/migration.html#queryset-methods-are-no-longer-proxied).
Pin django-filter
The new 1.0 series is incompatible, and I've opened #2498 for this purpose.
Meanwhile, as the current master is broken because of this, the version should be pinned - I guess it's sort of bad practice to use the `master` branch anyways, am thinking it's possibly also an outdated decision now.
This fixes #2495 and #2490
</issue>
<code>
[start of readthedocs/builds/filters.py]
1 from django.utils.translation import ugettext_lazy as _
2
3 import django_filters
4
5 from readthedocs.builds import constants
6 from readthedocs.builds.models import Build, Version
7
8
9 ANY_REPO = (
10 ('', _('Any')),
11 )
12
13 BUILD_TYPES = ANY_REPO + constants.BUILD_TYPES
14
15
16 class VersionSlugFilter(django_filters.FilterSet):
17
18 class Meta:
19 model = Version
20 fields = {
21 'identifier': ['icontains'],
22 'slug': ['icontains'],
23 }
24
25
26 class VersionFilter(django_filters.FilterSet):
27 project = django_filters.CharFilter(name='project__slug')
28 # Allow filtering on slug= or version=
29 slug = django_filters.CharFilter(label=_("Name"), name='slug',
30 lookup_type='exact')
31 version = django_filters.CharFilter(label=_("Version"), name='slug',
32 lookup_type='exact')
33
34 class Meta:
35 model = Version
36 fields = ['project', 'slug', 'version']
37
38
39 class BuildFilter(django_filters.FilterSet):
40 date = django_filters.DateRangeFilter(label=_("Build Date"), name="date", lookup_type='range')
41 type = django_filters.ChoiceFilter(label=_("Build Type"),
42 choices=BUILD_TYPES)
43
44 class Meta:
45 model = Build
46 fields = ['type', 'date', 'success']
47
[end of readthedocs/builds/filters.py]
[start of readthedocs/projects/filters.py]
1 """Project query filters"""
2
3 from django.utils.translation import ugettext_lazy as _
4
5 import django_filters
6
7 from readthedocs.projects import constants
8 from readthedocs.projects.models import Project, Domain
9
10 ANY_REPO = (
11 ('', _('Any')),
12 )
13
14 REPO_CHOICES = ANY_REPO + constants.REPO_CHOICES
15
16
17 def sort_slug(queryset, query):
18 """Fuzzy filter for slug fields
19
20 Returns sorted queryset where slug approximates ``query``
21 """
22 queryset = queryset.filter(slug__icontains=query)
23 ret = []
24 ret.extend([q.pk for q in queryset
25 if q.slug == query])
26 ret.extend([q.pk for q in queryset
27 if q.slug.startswith(query) and q.pk not in ret])
28 ret.extend([q.pk for q in queryset
29 if q.slug.endswith(query) and q.pk not in ret])
30 ret.extend([q.pk for q in queryset
31 if q.pk not in ret])
32
33 # Create a QS preserving ordering
34 clauses = ' '.join(['WHEN projects_project.id=%s THEN %s' % (pk, i)
35 for i, pk in enumerate(ret)])
36 ordering = 'CASE %s END' % clauses
37 ret_queryset = Project.objects.filter(pk__in=ret).extra(
38 select={'ordering': ordering}, order_by=('ordering',))
39 return ret_queryset
40
41
42 class ProjectFilter(django_filters.FilterSet):
43
44 """Project filter for filter views"""
45
46 name = django_filters.CharFilter(label=_("Name"), name='name',
47 lookup_type='icontains')
48 slug = django_filters.CharFilter(label=_("Slug"), name='slug',
49 lookup_type='icontains')
50 pub_date = django_filters.DateRangeFilter(label=_("Created Date"),
51 name="pub_date")
52 repo = django_filters.CharFilter(label=_("Repository URL"), name='repo',
53 lookup_type='icontains')
54 repo_type = django_filters.ChoiceFilter(
55 label=_("Repository Type"),
56 name='repo',
57 lookup_type='icontains',
58 choices=REPO_CHOICES,
59 )
60
61 class Meta:
62 model = Project
63 fields = ['name', 'slug', 'pub_date', 'repo', 'repo_type']
64
65
66 class DomainFilter(django_filters.FilterSet):
67 project = django_filters.CharFilter(label=_("Project"), name='project__slug',
68 lookup_type='exact')
69
70 class Meta:
71 model = Domain
72 fields = ['domain', 'project', 'canonical']
73
[end of readthedocs/projects/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/builds/filters.py b/readthedocs/builds/filters.py
--- a/readthedocs/builds/filters.py
+++ b/readthedocs/builds/filters.py
@@ -27,9 +27,9 @@
project = django_filters.CharFilter(name='project__slug')
# Allow filtering on slug= or version=
slug = django_filters.CharFilter(label=_("Name"), name='slug',
- lookup_type='exact')
+ lookup_expr='exact')
version = django_filters.CharFilter(label=_("Version"), name='slug',
- lookup_type='exact')
+ lookup_expr='exact')
class Meta:
model = Version
@@ -37,7 +37,7 @@
class BuildFilter(django_filters.FilterSet):
- date = django_filters.DateRangeFilter(label=_("Build Date"), name="date", lookup_type='range')
+ date = django_filters.DateRangeFilter(label=_("Build Date"), name="date", lookup_expr='range')
type = django_filters.ChoiceFilter(label=_("Build Type"),
choices=BUILD_TYPES)
diff --git a/readthedocs/projects/filters.py b/readthedocs/projects/filters.py
--- a/readthedocs/projects/filters.py
+++ b/readthedocs/projects/filters.py
@@ -44,17 +44,17 @@
"""Project filter for filter views"""
name = django_filters.CharFilter(label=_("Name"), name='name',
- lookup_type='icontains')
+ lookup_expr='icontains')
slug = django_filters.CharFilter(label=_("Slug"), name='slug',
- lookup_type='icontains')
+ lookup_expr='icontains')
pub_date = django_filters.DateRangeFilter(label=_("Created Date"),
name="pub_date")
repo = django_filters.CharFilter(label=_("Repository URL"), name='repo',
- lookup_type='icontains')
+ lookup_expr='icontains')
repo_type = django_filters.ChoiceFilter(
label=_("Repository Type"),
name='repo',
- lookup_type='icontains',
+ lookup_expr='icontains',
choices=REPO_CHOICES,
)
@@ -65,7 +65,7 @@
class DomainFilter(django_filters.FilterSet):
project = django_filters.CharFilter(label=_("Project"), name='project__slug',
- lookup_type='exact')
+ lookup_expr='exact')
class Meta:
model = Domain
| {"golden_diff": "diff --git a/readthedocs/builds/filters.py b/readthedocs/builds/filters.py\n--- a/readthedocs/builds/filters.py\n+++ b/readthedocs/builds/filters.py\n@@ -27,9 +27,9 @@\n project = django_filters.CharFilter(name='project__slug')\n # Allow filtering on slug= or version=\n slug = django_filters.CharFilter(label=_(\"Name\"), name='slug',\n- lookup_type='exact')\n+ lookup_expr='exact')\n version = django_filters.CharFilter(label=_(\"Version\"), name='slug',\n- lookup_type='exact')\n+ lookup_expr='exact')\n \n class Meta:\n model = Version\n@@ -37,7 +37,7 @@\n \n \n class BuildFilter(django_filters.FilterSet):\n- date = django_filters.DateRangeFilter(label=_(\"Build Date\"), name=\"date\", lookup_type='range')\n+ date = django_filters.DateRangeFilter(label=_(\"Build Date\"), name=\"date\", lookup_expr='range')\n type = django_filters.ChoiceFilter(label=_(\"Build Type\"),\n choices=BUILD_TYPES)\n \ndiff --git a/readthedocs/projects/filters.py b/readthedocs/projects/filters.py\n--- a/readthedocs/projects/filters.py\n+++ b/readthedocs/projects/filters.py\n@@ -44,17 +44,17 @@\n \"\"\"Project filter for filter views\"\"\"\n \n name = django_filters.CharFilter(label=_(\"Name\"), name='name',\n- lookup_type='icontains')\n+ lookup_expr='icontains')\n slug = django_filters.CharFilter(label=_(\"Slug\"), name='slug',\n- lookup_type='icontains')\n+ lookup_expr='icontains')\n pub_date = django_filters.DateRangeFilter(label=_(\"Created Date\"),\n name=\"pub_date\")\n repo = django_filters.CharFilter(label=_(\"Repository URL\"), name='repo',\n- lookup_type='icontains')\n+ lookup_expr='icontains')\n repo_type = django_filters.ChoiceFilter(\n label=_(\"Repository Type\"),\n name='repo',\n- lookup_type='icontains',\n+ lookup_expr='icontains',\n choices=REPO_CHOICES,\n )\n \n@@ -65,7 +65,7 @@\n \n class DomainFilter(django_filters.FilterSet):\n project = django_filters.CharFilter(label=_(\"Project\"), name='project__slug',\n- lookup_type='exact')\n+ lookup_expr='exact')\n \n class Meta:\n model = Domain\n", "issue": "Update django-filter to 1.0\n## Details\r\n\r\nSorry for deleting the issue template: This is about technical debt :) It may not be immediately critical, but the advice from the author of django-filter is that it's worth it.\r\n\r\ndjango-filter 1.0 has changes that are backwards incompatible. The release notes are here:\r\n\r\nhttp://django-filter.readthedocs.io/en/latest/migration.html\r\n\r\nIt means, amongst other this, that all where `Filter` object instances are iterated on, we have to [add the `.qs` method](http://django-filter.readthedocs.io/en/latest/migration.html#queryset-methods-are-no-longer-proxied).\nPin django-filter\nThe new 1.0 series is incompatible, and I've opened #2498 for this purpose.\r\n\r\nMeanwhile, as the current master is broken because of this, the version should be pinned - I guess it's sort of bad practice to use the `master` branch anyways, am thinking it's possibly also an outdated decision now.\r\n\r\nThis fixes #2495 and #2490\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nimport django_filters\n\nfrom readthedocs.builds import constants\nfrom readthedocs.builds.models import Build, Version\n\n\nANY_REPO = (\n ('', _('Any')),\n)\n\nBUILD_TYPES = ANY_REPO + constants.BUILD_TYPES\n\n\nclass VersionSlugFilter(django_filters.FilterSet):\n\n class Meta:\n model = Version\n fields = {\n 'identifier': ['icontains'],\n 'slug': ['icontains'],\n }\n\n\nclass VersionFilter(django_filters.FilterSet):\n project = django_filters.CharFilter(name='project__slug')\n # Allow filtering on slug= or version=\n slug = django_filters.CharFilter(label=_(\"Name\"), name='slug',\n lookup_type='exact')\n version = django_filters.CharFilter(label=_(\"Version\"), name='slug',\n lookup_type='exact')\n\n class Meta:\n model = Version\n fields = ['project', 'slug', 'version']\n\n\nclass BuildFilter(django_filters.FilterSet):\n date = django_filters.DateRangeFilter(label=_(\"Build Date\"), name=\"date\", lookup_type='range')\n type = django_filters.ChoiceFilter(label=_(\"Build Type\"),\n choices=BUILD_TYPES)\n\n class Meta:\n model = Build\n fields = ['type', 'date', 'success']\n", "path": "readthedocs/builds/filters.py"}, {"content": "\"\"\"Project query filters\"\"\"\n\nfrom django.utils.translation import ugettext_lazy as _\n\nimport django_filters\n\nfrom readthedocs.projects import constants\nfrom readthedocs.projects.models import Project, Domain\n\nANY_REPO = (\n ('', _('Any')),\n)\n\nREPO_CHOICES = ANY_REPO + constants.REPO_CHOICES\n\n\ndef sort_slug(queryset, query):\n \"\"\"Fuzzy filter for slug fields\n\n Returns sorted queryset where slug approximates ``query``\n \"\"\"\n queryset = queryset.filter(slug__icontains=query)\n ret = []\n ret.extend([q.pk for q in queryset\n if q.slug == query])\n ret.extend([q.pk for q in queryset\n if q.slug.startswith(query) and q.pk not in ret])\n ret.extend([q.pk for q in queryset\n if q.slug.endswith(query) and q.pk not in ret])\n ret.extend([q.pk for q in queryset\n if q.pk not in ret])\n\n # Create a QS preserving ordering\n clauses = ' '.join(['WHEN projects_project.id=%s THEN %s' % (pk, i)\n for i, pk in enumerate(ret)])\n ordering = 'CASE %s END' % clauses\n ret_queryset = Project.objects.filter(pk__in=ret).extra(\n select={'ordering': ordering}, order_by=('ordering',))\n return ret_queryset\n\n\nclass ProjectFilter(django_filters.FilterSet):\n\n \"\"\"Project filter for filter views\"\"\"\n\n name = django_filters.CharFilter(label=_(\"Name\"), name='name',\n lookup_type='icontains')\n slug = django_filters.CharFilter(label=_(\"Slug\"), name='slug',\n lookup_type='icontains')\n pub_date = django_filters.DateRangeFilter(label=_(\"Created Date\"),\n name=\"pub_date\")\n repo = django_filters.CharFilter(label=_(\"Repository URL\"), name='repo',\n lookup_type='icontains')\n repo_type = django_filters.ChoiceFilter(\n label=_(\"Repository Type\"),\n name='repo',\n lookup_type='icontains',\n choices=REPO_CHOICES,\n )\n\n class Meta:\n model = Project\n fields = ['name', 'slug', 'pub_date', 'repo', 'repo_type']\n\n\nclass DomainFilter(django_filters.FilterSet):\n project = django_filters.CharFilter(label=_(\"Project\"), name='project__slug',\n lookup_type='exact')\n\n class Meta:\n model = Domain\n fields = ['domain', 'project', 'canonical']\n", "path": "readthedocs/projects/filters.py"}]} | 1,802 | 515 |
gh_patches_debug_20195 | rasdani/github-patches | git_diff | kivy__python-for-android-1723 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Psycopg2 error after the apk installation.

I got this error while debugging the android apk. I associate this to Buildozer because I specified into the buildozer.spec requirements the psycopg2 library. It means that is not working.
How do I fix it? I know that is a recipe for psycopg2 here: https://github.com/kivy/python-for-android/blob/master/pythonforandroid/recipes/psycopg2/__init__.py
How can I add this recipe to my project, to buildozer compile it successfully ?
</issue>
<code>
[start of pythonforandroid/recipes/psycopg2/__init__.py]
1 from pythonforandroid.recipe import PythonRecipe
2 from pythonforandroid.toolchain import current_directory, shprint
3 import sh
4
5
6 class Psycopg2Recipe(PythonRecipe):
7 """
8 Requires `libpq-dev` system dependency e.g. for `pg_config` binary.
9 """
10 version = 'latest'
11 url = 'http://initd.org/psycopg/tarballs/psycopg2-{version}.tar.gz'
12 depends = ['libpq']
13 site_packages_name = 'psycopg2'
14 call_hostpython_via_targetpython = False
15
16 def prebuild_arch(self, arch):
17 libdir = self.ctx.get_libs_dir(arch.arch)
18 with current_directory(self.get_build_dir(arch.arch)):
19 # pg_config_helper will return the system installed libpq, but we
20 # need the one we just cross-compiled
21 shprint(sh.sed, '-i',
22 "s|pg_config_helper.query(.libdir.)|'{}'|".format(libdir),
23 'setup.py')
24
25 def get_recipe_env(self, arch):
26 env = super(Psycopg2Recipe, self).get_recipe_env(arch)
27 env['LDFLAGS'] = "{} -L{}".format(env['LDFLAGS'], self.ctx.get_libs_dir(arch.arch))
28 env['EXTRA_CFLAGS'] = "--host linux-armv"
29 return env
30
31 def install_python_package(self, arch, name=None, env=None, is_dir=True):
32 '''Automate the installation of a Python package (or a cython
33 package where the cython components are pre-built).'''
34 if env is None:
35 env = self.get_recipe_env(arch)
36
37 with current_directory(self.get_build_dir(arch.arch)):
38 hostpython = sh.Command(self.ctx.hostpython)
39
40 shprint(hostpython, 'setup.py', 'build_ext', '--static-libpq',
41 _env=env)
42 shprint(hostpython, 'setup.py', 'install', '-O2',
43 '--root={}'.format(self.ctx.get_python_install_dir()),
44 '--install-lib=lib/python2.7/site-packages', _env=env)
45
46
47 recipe = Psycopg2Recipe()
48
[end of pythonforandroid/recipes/psycopg2/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pythonforandroid/recipes/psycopg2/__init__.py b/pythonforandroid/recipes/psycopg2/__init__.py
--- a/pythonforandroid/recipes/psycopg2/__init__.py
+++ b/pythonforandroid/recipes/psycopg2/__init__.py
@@ -6,6 +6,9 @@
class Psycopg2Recipe(PythonRecipe):
"""
Requires `libpq-dev` system dependency e.g. for `pg_config` binary.
+ If you get `nl_langinfo` symbol runtime error, make sure you're running on
+ `ANDROID_API` (`ndk-api`) >= 26, see:
+ https://github.com/kivy/python-for-android/issues/1711#issuecomment-465747557
"""
version = 'latest'
url = 'http://initd.org/psycopg/tarballs/psycopg2-{version}.tar.gz'
@@ -41,7 +44,7 @@
_env=env)
shprint(hostpython, 'setup.py', 'install', '-O2',
'--root={}'.format(self.ctx.get_python_install_dir()),
- '--install-lib=lib/python2.7/site-packages', _env=env)
+ '--install-lib=.', _env=env)
recipe = Psycopg2Recipe()
| {"golden_diff": "diff --git a/pythonforandroid/recipes/psycopg2/__init__.py b/pythonforandroid/recipes/psycopg2/__init__.py\n--- a/pythonforandroid/recipes/psycopg2/__init__.py\n+++ b/pythonforandroid/recipes/psycopg2/__init__.py\n@@ -6,6 +6,9 @@\n class Psycopg2Recipe(PythonRecipe):\n \"\"\"\n Requires `libpq-dev` system dependency e.g. for `pg_config` binary.\n+ If you get `nl_langinfo` symbol runtime error, make sure you're running on\n+ `ANDROID_API` (`ndk-api`) >= 26, see:\n+ https://github.com/kivy/python-for-android/issues/1711#issuecomment-465747557\n \"\"\"\n version = 'latest'\n url = 'http://initd.org/psycopg/tarballs/psycopg2-{version}.tar.gz'\n@@ -41,7 +44,7 @@\n _env=env)\n shprint(hostpython, 'setup.py', 'install', '-O2',\n '--root={}'.format(self.ctx.get_python_install_dir()),\n- '--install-lib=lib/python2.7/site-packages', _env=env)\n+ '--install-lib=.', _env=env)\n \n \n recipe = Psycopg2Recipe()\n", "issue": "Psycopg2 error after the apk installation.\n\r\n\r\nI got this error while debugging the android apk. I associate this to Buildozer because I specified into the buildozer.spec requirements the psycopg2 library. It means that is not working.\r\n\r\nHow do I fix it? I know that is a recipe for psycopg2 here: https://github.com/kivy/python-for-android/blob/master/pythonforandroid/recipes/psycopg2/__init__.py\r\n\r\nHow can I add this recipe to my project, to buildozer compile it successfully ?\n", "before_files": [{"content": "from pythonforandroid.recipe import PythonRecipe\nfrom pythonforandroid.toolchain import current_directory, shprint\nimport sh\n\n\nclass Psycopg2Recipe(PythonRecipe):\n \"\"\"\n Requires `libpq-dev` system dependency e.g. for `pg_config` binary.\n \"\"\"\n version = 'latest'\n url = 'http://initd.org/psycopg/tarballs/psycopg2-{version}.tar.gz'\n depends = ['libpq']\n site_packages_name = 'psycopg2'\n call_hostpython_via_targetpython = False\n\n def prebuild_arch(self, arch):\n libdir = self.ctx.get_libs_dir(arch.arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # pg_config_helper will return the system installed libpq, but we\n # need the one we just cross-compiled\n shprint(sh.sed, '-i',\n \"s|pg_config_helper.query(.libdir.)|'{}'|\".format(libdir),\n 'setup.py')\n\n def get_recipe_env(self, arch):\n env = super(Psycopg2Recipe, self).get_recipe_env(arch)\n env['LDFLAGS'] = \"{} -L{}\".format(env['LDFLAGS'], self.ctx.get_libs_dir(arch.arch))\n env['EXTRA_CFLAGS'] = \"--host linux-armv\"\n return env\n\n def install_python_package(self, arch, name=None, env=None, is_dir=True):\n '''Automate the installation of a Python package (or a cython\n package where the cython components are pre-built).'''\n if env is None:\n env = self.get_recipe_env(arch)\n\n with current_directory(self.get_build_dir(arch.arch)):\n hostpython = sh.Command(self.ctx.hostpython)\n\n shprint(hostpython, 'setup.py', 'build_ext', '--static-libpq',\n _env=env)\n shprint(hostpython, 'setup.py', 'install', '-O2',\n '--root={}'.format(self.ctx.get_python_install_dir()),\n '--install-lib=lib/python2.7/site-packages', _env=env)\n\n\nrecipe = Psycopg2Recipe()\n", "path": "pythonforandroid/recipes/psycopg2/__init__.py"}]} | 1,277 | 295 |
gh_patches_debug_7498 | rasdani/github-patches | git_diff | beeware__toga-1751 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
iOS app not showing content with Rubicon ObjC 0.4.4
### Describe the bug
When running an iOS app with Toga 0.3.0dev39 and Rubicon 0.4.4, the app isn't successfully started, and the main app content is never displayed. When the app runs, you'll see the following in the log:
```
2023-01-24 12:14:13.871494+0800 Hello World[94057:4239245] Running app module: helloworld
2023-01-24 12:14:14.399629+0800 Hello World[94057:4239245] /Users/rkm/Library/Developer/CoreSimulator/Devices/84FC86CA-1D89-46EF-9349-29DDCF840143/data/Containers/Bundle/Application/7038F3CE-2212-4C60-9067-1978A80DEC8D/Hello World.app/app_packages/toga_iOS/app.py:95: DeprecationWarning: There is no current event loop
2023-01-24 12:14:14.399738+0800 Hello World[94057:4239245] self.loop = asyncio.get_event_loop()
```
This is a warning, not an error; the app will continue to run.
### Steps to reproduce
1. Run `examples/tutorial0` on iOS
2. See error
The app won't crash; but the app window will remain black.
### Expected behavior
The app should run and window content should be displayed.
### Screenshots
_No response_
### Environment
- Operating System: iOS
- Python version: All
- Software versions:
- Briefcase: All
- Toga: <=0.3.0.dev39
- Rubicon-objc 0.4.4
### Logs
N/A
### Additional context
The error has been caused because Toga-iOS 0.3.0.dev39 [included a shim](https://github.com/beeware/toga/blob/v0.3.0.dev39/src/iOS/src/toga_iOS/app.py#L112) that reproduced the implementation of `run_forever_cooperatively()`. This was done when the iOS implementation was originally created, with the expectation that this shim would be replaced with the actual call once Rubicon 0.3 was released. This didn't happen, but the old shim continued to work as it matched the implementation in Rubicon.
However, Rubicon 0.4.4 altered the implementation of `run_forever_cooperatively()`. As a result, the shim in Toga-iOS 0.3.0.dev39 no longer does everything it needs to in order to start the app.
The issue has already been [fixed in the main branch](https://github.com/beeware/toga/blob/main/iOS/src/toga_iOS/app.py#L117) - the shim has been replaced with the actual call to `run_forever_cooperatively()`.
Two workarounds exist:
1. Use the `main` branch of Toga in your app.
2. Block the use of rubicon-objc 0.4.4. If you add `rubicon-objc!=0.4.4` to the requires list in your iOS configuration, this will prevent toga-iOS from using the new version.
</issue>
<code>
[start of iOS/setup.py]
1 #!/usr/bin/env python
2 import re
3
4 from setuptools import setup
5
6 # Version handline needs to be programatic because
7 # we can't import toga_iOS to compute the version;
8 # and to support versioned subpackage dependencies
9 with open("src/toga_iOS/__init__.py", encoding="utf8") as version_file:
10 version_match = re.search(
11 r"^__version__ = ['\"]([^'\"]*)['\"]", version_file.read(), re.M
12 )
13 if version_match:
14 version = version_match.group(1)
15 else:
16 raise RuntimeError("Unable to find version string.")
17
18 setup(
19 version=version,
20 install_requires=[
21 "rubicon-objc>=0.4.4",
22 f"toga-core=={version}",
23 ],
24 )
25
[end of iOS/setup.py]
[start of cocoa/setup.py]
1 #!/usr/bin/env python
2 import re
3
4 from setuptools import setup
5
6 # Version handline needs to be programatic because
7 # we can't import toga_cocoa to compute the version;
8 # and to support versioned subpackage dependencies
9 with open("src/toga_cocoa/__init__.py", encoding="utf8") as version_file:
10 version_match = re.search(
11 r"^__version__ = ['\"]([^'\"]*)['\"]", version_file.read(), re.M
12 )
13 if version_match:
14 version = version_match.group(1)
15 else:
16 raise RuntimeError("Unable to find version string.")
17
18 setup(
19 version=version,
20 install_requires=[
21 "rubicon-objc>=0.4.4",
22 f"toga-core=={version}",
23 ],
24 )
25
[end of cocoa/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cocoa/setup.py b/cocoa/setup.py
--- a/cocoa/setup.py
+++ b/cocoa/setup.py
@@ -18,7 +18,7 @@
setup(
version=version,
install_requires=[
- "rubicon-objc>=0.4.4",
- f"toga-core=={version}",
+ "rubicon-objc >= 0.4.5rc1, < 0.5.0",
+ f"toga-core == {version}",
],
)
diff --git a/iOS/setup.py b/iOS/setup.py
--- a/iOS/setup.py
+++ b/iOS/setup.py
@@ -18,7 +18,7 @@
setup(
version=version,
install_requires=[
- "rubicon-objc>=0.4.4",
- f"toga-core=={version}",
+ "rubicon-objc >= 0.4.5rc1, < 0.5.0",
+ f"toga-core == {version}",
],
)
| {"golden_diff": "diff --git a/cocoa/setup.py b/cocoa/setup.py\n--- a/cocoa/setup.py\n+++ b/cocoa/setup.py\n@@ -18,7 +18,7 @@\n setup(\n version=version,\n install_requires=[\n- \"rubicon-objc>=0.4.4\",\n- f\"toga-core=={version}\",\n+ \"rubicon-objc >= 0.4.5rc1, < 0.5.0\",\n+ f\"toga-core == {version}\",\n ],\n )\ndiff --git a/iOS/setup.py b/iOS/setup.py\n--- a/iOS/setup.py\n+++ b/iOS/setup.py\n@@ -18,7 +18,7 @@\n setup(\n version=version,\n install_requires=[\n- \"rubicon-objc>=0.4.4\",\n- f\"toga-core=={version}\",\n+ \"rubicon-objc >= 0.4.5rc1, < 0.5.0\",\n+ f\"toga-core == {version}\",\n ],\n )\n", "issue": "iOS app not showing content with Rubicon ObjC 0.4.4\n### Describe the bug\r\n\r\nWhen running an iOS app with Toga 0.3.0dev39 and Rubicon 0.4.4, the app isn't successfully started, and the main app content is never displayed. When the app runs, you'll see the following in the log:\r\n \r\n```\r\n2023-01-24 12:14:13.871494+0800 Hello World[94057:4239245] Running app module: helloworld\r\n2023-01-24 12:14:14.399629+0800 Hello World[94057:4239245] /Users/rkm/Library/Developer/CoreSimulator/Devices/84FC86CA-1D89-46EF-9349-29DDCF840143/data/Containers/Bundle/Application/7038F3CE-2212-4C60-9067-1978A80DEC8D/Hello World.app/app_packages/toga_iOS/app.py:95: DeprecationWarning: There is no current event loop\r\n2023-01-24 12:14:14.399738+0800 Hello World[94057:4239245] self.loop = asyncio.get_event_loop()\r\n```\r\n\r\nThis is a warning, not an error; the app will continue to run.\r\n\r\n### Steps to reproduce\r\n\r\n1. Run `examples/tutorial0` on iOS\r\n2. See error\r\n\r\nThe app won't crash; but the app window will remain black.\r\n\r\n### Expected behavior\r\n\r\nThe app should run and window content should be displayed.\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n- Operating System: iOS\r\n- Python version: All\r\n- Software versions:\r\n - Briefcase: All\r\n - Toga: <=0.3.0.dev39\r\n - Rubicon-objc 0.4.4\r\n\r\n\r\n### Logs\r\n\r\nN/A\r\n\r\n### Additional context\r\n\r\nThe error has been caused because Toga-iOS 0.3.0.dev39 [included a shim](https://github.com/beeware/toga/blob/v0.3.0.dev39/src/iOS/src/toga_iOS/app.py#L112) that reproduced the implementation of `run_forever_cooperatively()`. This was done when the iOS implementation was originally created, with the expectation that this shim would be replaced with the actual call once Rubicon 0.3 was released. This didn't happen, but the old shim continued to work as it matched the implementation in Rubicon.\r\n\r\nHowever, Rubicon 0.4.4 altered the implementation of `run_forever_cooperatively()`. As a result, the shim in Toga-iOS 0.3.0.dev39 no longer does everything it needs to in order to start the app. \r\n\r\nThe issue has already been [fixed in the main branch](https://github.com/beeware/toga/blob/main/iOS/src/toga_iOS/app.py#L117) - the shim has been replaced with the actual call to `run_forever_cooperatively()`.\r\n\r\nTwo workarounds exist:\r\n1. Use the `main` branch of Toga in your app.\r\n2. Block the use of rubicon-objc 0.4.4. If you add `rubicon-objc!=0.4.4` to the requires list in your iOS configuration, this will prevent toga-iOS from using the new version.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport re\n\nfrom setuptools import setup\n\n# Version handline needs to be programatic because\n# we can't import toga_iOS to compute the version;\n# and to support versioned subpackage dependencies\nwith open(\"src/toga_iOS/__init__.py\", encoding=\"utf8\") as version_file:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read(), re.M\n )\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n version=version,\n install_requires=[\n \"rubicon-objc>=0.4.4\",\n f\"toga-core=={version}\",\n ],\n)\n", "path": "iOS/setup.py"}, {"content": "#!/usr/bin/env python\nimport re\n\nfrom setuptools import setup\n\n# Version handline needs to be programatic because\n# we can't import toga_cocoa to compute the version;\n# and to support versioned subpackage dependencies\nwith open(\"src/toga_cocoa/__init__.py\", encoding=\"utf8\") as version_file:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read(), re.M\n )\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n version=version,\n install_requires=[\n \"rubicon-objc>=0.4.4\",\n f\"toga-core=={version}\",\n ],\n)\n", "path": "cocoa/setup.py"}]} | 1,781 | 231 |
gh_patches_debug_5408 | rasdani/github-patches | git_diff | Mailu__Mailu-2982 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use official clamav docker image for Mailu clamav image
With https://github.com/Cisco-Talos/clamav having official docker support https://hub.docker.com/r/clamav/clamav it might be worth considering referring or preferring that container in the future?
</issue>
<code>
[start of optional/clamav/start.py]
1 #!/usr/bin/env python3
2
3 import os
4 import logging as logger
5 import sys
6 from socrate import system
7
8 system.set_env(log_filters=r'SelfCheck: Database status OK\.$')
9
10 # Bootstrap the database if clamav is running for the first time
11 if not os.path.isfile("/data/main.cvd"):
12 logger.info("Starting primary virus DB download")
13 os.system("freshclam")
14
15 # Run the update daemon
16 logger.info("Starting the update daemon")
17 os.system("freshclam -d -c 6")
18
19 # Run clamav
20 logger.info("Starting clamav")
21 os.system("clamd")
22
[end of optional/clamav/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optional/clamav/start.py b/optional/clamav/start.py
deleted file mode 100755
--- a/optional/clamav/start.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python3
-
-import os
-import logging as logger
-import sys
-from socrate import system
-
-system.set_env(log_filters=r'SelfCheck: Database status OK\.$')
-
-# Bootstrap the database if clamav is running for the first time
-if not os.path.isfile("/data/main.cvd"):
- logger.info("Starting primary virus DB download")
- os.system("freshclam")
-
-# Run the update daemon
-logger.info("Starting the update daemon")
-os.system("freshclam -d -c 6")
-
-# Run clamav
-logger.info("Starting clamav")
-os.system("clamd")
| {"golden_diff": "diff --git a/optional/clamav/start.py b/optional/clamav/start.py\ndeleted file mode 100755\n--- a/optional/clamav/start.py\n+++ /dev/null\n@@ -1,21 +0,0 @@\n-#!/usr/bin/env python3\n-\n-import os\n-import logging as logger\n-import sys\n-from socrate import system\n-\n-system.set_env(log_filters=r'SelfCheck: Database status OK\\.$')\n-\n-# Bootstrap the database if clamav is running for the first time\n-if not os.path.isfile(\"/data/main.cvd\"):\n- logger.info(\"Starting primary virus DB download\")\n- os.system(\"freshclam\")\n-\n-# Run the update daemon\n-logger.info(\"Starting the update daemon\")\n-os.system(\"freshclam -d -c 6\")\n-\n-# Run clamav\n-logger.info(\"Starting clamav\")\n-os.system(\"clamd\")\n", "issue": "Use official clamav docker image for Mailu clamav image\nWith https://github.com/Cisco-Talos/clamav having official docker support https://hub.docker.com/r/clamav/clamav it might be worth considering referring or preferring that container in the future?\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport os\nimport logging as logger\nimport sys\nfrom socrate import system\n\nsystem.set_env(log_filters=r'SelfCheck: Database status OK\\.$')\n\n# Bootstrap the database if clamav is running for the first time\nif not os.path.isfile(\"/data/main.cvd\"):\n logger.info(\"Starting primary virus DB download\")\n os.system(\"freshclam\")\n\n# Run the update daemon\nlogger.info(\"Starting the update daemon\")\nos.system(\"freshclam -d -c 6\")\n\n# Run clamav\nlogger.info(\"Starting clamav\")\nos.system(\"clamd\")\n", "path": "optional/clamav/start.py"}]} | 764 | 200 |
gh_patches_debug_27959 | rasdani/github-patches | git_diff | pwndbg__pwndbg-2009 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pattern_create & run features
It is annoying to have to have multiple versions of gdb to complete some of my work. I don't understand why this feature hasn't been brought over yet like gdb-peda has implemented. Reversing takes long enough, this would make our lives a little bit easier.
I would like to add the pattern_create feature into pwndbg. As well as run, so that I can quickly create our cyclic values and then run our output (run < payload-100.txt) so we can check the registers in under 10 seconds without restarting the program.
</issue>
<code>
[start of pwndbg/commands/cyclic.py]
1 from __future__ import annotations
2
3 import argparse
4 import string
5
6 import gdb
7 from pwnlib.util.cyclic import cyclic
8 from pwnlib.util.cyclic import cyclic_find
9
10 import pwndbg.commands
11 import pwndbg.gdblib.arch
12 from pwndbg.color import message
13
14 parser = argparse.ArgumentParser(description="Cyclic pattern creator/finder.")
15
16 parser.add_argument(
17 "-a",
18 "--alphabet",
19 metavar="charset",
20 default=string.ascii_lowercase,
21 type=str.encode,
22 help="The alphabet to use in the cyclic pattern",
23 )
24
25 parser.add_argument(
26 "-n",
27 "--length",
28 metavar="length",
29 type=int,
30 help="Size of the unique subsequences (defaults to the pointer size for the current arch)",
31 )
32
33 group = parser.add_mutually_exclusive_group(required=False)
34 group.add_argument(
35 "-l",
36 "-o",
37 "--offset",
38 "--lookup",
39 dest="lookup",
40 metavar="lookup_value",
41 type=str,
42 help="Do a lookup instead of printing the sequence (accepts constant values as well as expressions)",
43 )
44
45 group.add_argument(
46 "count",
47 type=int,
48 nargs="?",
49 default=100,
50 help="Number of characters to print from the sequence (default: print the entire sequence)",
51 )
52
53
54 @pwndbg.commands.ArgparsedCommand(parser, command_name="cyclic")
55 def cyclic_cmd(alphabet, length, lookup, count=100) -> None:
56 if length:
57 # Convert from gdb.Value
58 length = int(length)
59 else:
60 length = pwndbg.gdblib.arch.ptrsize
61
62 if lookup:
63 lookup = pwndbg.commands.fix(lookup, sloppy=True)
64
65 if isinstance(lookup, (gdb.Value, int)):
66 lookup = int(lookup).to_bytes(length, pwndbg.gdblib.arch.endian)
67 elif isinstance(lookup, str):
68 lookup = bytes(lookup, "utf-8")
69
70 if len(lookup) != length:
71 print(
72 message.error(
73 f"Lookup pattern must be {length} bytes (use `-n <length>` to lookup pattern of different length)"
74 )
75 )
76 return
77
78 hexstr = "0x" + lookup.hex()
79 print(
80 message.notice(
81 f"Finding cyclic pattern of {length} bytes: {str(lookup)} (hex: {hexstr})"
82 )
83 )
84
85 if any(c not in alphabet for c in lookup):
86 print(message.error("Pattern contains characters not present in the alphabet"))
87 return
88
89 offset = cyclic_find(lookup, alphabet, length)
90
91 if offset == -1:
92 print(message.error("Given lookup pattern does not exist in the sequence"))
93 else:
94 print(message.success(f"Found at offset {offset}"))
95 else:
96 sequence = cyclic(int(count), alphabet, length)
97 print(sequence.decode())
98
[end of pwndbg/commands/cyclic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/commands/cyclic.py b/pwndbg/commands/cyclic.py
--- a/pwndbg/commands/cyclic.py
+++ b/pwndbg/commands/cyclic.py
@@ -30,6 +30,7 @@
help="Size of the unique subsequences (defaults to the pointer size for the current arch)",
)
+
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-l",
@@ -50,9 +51,17 @@
help="Number of characters to print from the sequence (default: print the entire sequence)",
)
+parser.add_argument(
+ "filename",
+ type=str,
+ help="Name (path) of the file to save the cyclic pattern to",
+ default="",
+ nargs="?",
+)
+
@pwndbg.commands.ArgparsedCommand(parser, command_name="cyclic")
-def cyclic_cmd(alphabet, length, lookup, count=100) -> None:
+def cyclic_cmd(alphabet, length, lookup, count=100, filename="") -> None:
if length:
# Convert from gdb.Value
length = int(length)
@@ -93,5 +102,12 @@
else:
print(message.success(f"Found at offset {offset}"))
else:
- sequence = cyclic(int(count), alphabet, length)
- print(sequence.decode())
+ count = int(count)
+ sequence = cyclic(count, alphabet, length)
+
+ if not filename:
+ print(sequence.decode())
+ else:
+ with open(filename, "wb") as f:
+ f.write(sequence)
+ print(f"Written a cyclic sequence of length {count} to file {filename}")
| {"golden_diff": "diff --git a/pwndbg/commands/cyclic.py b/pwndbg/commands/cyclic.py\n--- a/pwndbg/commands/cyclic.py\n+++ b/pwndbg/commands/cyclic.py\n@@ -30,6 +30,7 @@\n help=\"Size of the unique subsequences (defaults to the pointer size for the current arch)\",\n )\n \n+\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument(\n \"-l\",\n@@ -50,9 +51,17 @@\n help=\"Number of characters to print from the sequence (default: print the entire sequence)\",\n )\n \n+parser.add_argument(\n+ \"filename\",\n+ type=str,\n+ help=\"Name (path) of the file to save the cyclic pattern to\",\n+ default=\"\",\n+ nargs=\"?\",\n+)\n+\n \n @pwndbg.commands.ArgparsedCommand(parser, command_name=\"cyclic\")\n-def cyclic_cmd(alphabet, length, lookup, count=100) -> None:\n+def cyclic_cmd(alphabet, length, lookup, count=100, filename=\"\") -> None:\n if length:\n # Convert from gdb.Value\n length = int(length)\n@@ -93,5 +102,12 @@\n else:\n print(message.success(f\"Found at offset {offset}\"))\n else:\n- sequence = cyclic(int(count), alphabet, length)\n- print(sequence.decode())\n+ count = int(count)\n+ sequence = cyclic(count, alphabet, length)\n+\n+ if not filename:\n+ print(sequence.decode())\n+ else:\n+ with open(filename, \"wb\") as f:\n+ f.write(sequence)\n+ print(f\"Written a cyclic sequence of length {count} to file {filename}\")\n", "issue": "pattern_create & run features\nIt is annoying to have to have multiple versions of gdb to complete some of my work. I don't understand why this feature hasn't been brought over yet like gdb-peda has implemented. Reversing takes long enough, this would make our lives a little bit easier.\r\n\r\nI would like to add the pattern_create feature into pwndbg. As well as run, so that I can quickly create our cyclic values and then run our output (run < payload-100.txt) so we can check the registers in under 10 seconds without restarting the program. \n", "before_files": [{"content": "from __future__ import annotations\n\nimport argparse\nimport string\n\nimport gdb\nfrom pwnlib.util.cyclic import cyclic\nfrom pwnlib.util.cyclic import cyclic_find\n\nimport pwndbg.commands\nimport pwndbg.gdblib.arch\nfrom pwndbg.color import message\n\nparser = argparse.ArgumentParser(description=\"Cyclic pattern creator/finder.\")\n\nparser.add_argument(\n \"-a\",\n \"--alphabet\",\n metavar=\"charset\",\n default=string.ascii_lowercase,\n type=str.encode,\n help=\"The alphabet to use in the cyclic pattern\",\n)\n\nparser.add_argument(\n \"-n\",\n \"--length\",\n metavar=\"length\",\n type=int,\n help=\"Size of the unique subsequences (defaults to the pointer size for the current arch)\",\n)\n\ngroup = parser.add_mutually_exclusive_group(required=False)\ngroup.add_argument(\n \"-l\",\n \"-o\",\n \"--offset\",\n \"--lookup\",\n dest=\"lookup\",\n metavar=\"lookup_value\",\n type=str,\n help=\"Do a lookup instead of printing the sequence (accepts constant values as well as expressions)\",\n)\n\ngroup.add_argument(\n \"count\",\n type=int,\n nargs=\"?\",\n default=100,\n help=\"Number of characters to print from the sequence (default: print the entire sequence)\",\n)\n\n\[email protected](parser, command_name=\"cyclic\")\ndef cyclic_cmd(alphabet, length, lookup, count=100) -> None:\n if length:\n # Convert from gdb.Value\n length = int(length)\n else:\n length = pwndbg.gdblib.arch.ptrsize\n\n if lookup:\n lookup = pwndbg.commands.fix(lookup, sloppy=True)\n\n if isinstance(lookup, (gdb.Value, int)):\n lookup = int(lookup).to_bytes(length, pwndbg.gdblib.arch.endian)\n elif isinstance(lookup, str):\n lookup = bytes(lookup, \"utf-8\")\n\n if len(lookup) != length:\n print(\n message.error(\n f\"Lookup pattern must be {length} bytes (use `-n <length>` to lookup pattern of different length)\"\n )\n )\n return\n\n hexstr = \"0x\" + lookup.hex()\n print(\n message.notice(\n f\"Finding cyclic pattern of {length} bytes: {str(lookup)} (hex: {hexstr})\"\n )\n )\n\n if any(c not in alphabet for c in lookup):\n print(message.error(\"Pattern contains characters not present in the alphabet\"))\n return\n\n offset = cyclic_find(lookup, alphabet, length)\n\n if offset == -1:\n print(message.error(\"Given lookup pattern does not exist in the sequence\"))\n else:\n print(message.success(f\"Found at offset {offset}\"))\n else:\n sequence = cyclic(int(count), alphabet, length)\n print(sequence.decode())\n", "path": "pwndbg/commands/cyclic.py"}]} | 1,470 | 379 |
gh_patches_debug_687 | rasdani/github-patches | git_diff | hylang__hy-2220 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add header notice to "stable" line documentation to point users to the alpha cycle documentation
I was reading documentation and noticed that hy.contrib.walk is mentioned there:
https://docs.hylang.org/en/stable/contrib/walk.html
however it appears that hy.contrib.walk file is no longer on the master branch.
https://github.com/hylang/hy/blob/6ba90fd3f853b2ddc391aa3358f9386c41d831c4/hy/contrib/walk.hy
is it a bug in documentation or I'm missing something?
</issue>
<code>
[start of docs/conf.py]
1 # This file is execfile()d with the current directory set to its containing dir.
2
3 import re, os, sys, time, html
4
5 sys.path.insert(0, os.path.abspath('..'))
6
7 extensions = [
8 'sphinx.ext.napoleon',
9 'sphinx.ext.intersphinx',
10 'sphinx.ext.autodoc',
11 'sphinx.ext.viewcode',
12 'sphinxcontrib.hydomain',
13 ]
14
15 from get_version import __version__ as hy_version
16
17 # Read the Docs might dirty its checkout, so strip the dirty flag.
18 hy_version = re.sub(r'[+.]dirty\Z', '', hy_version)
19
20 templates_path = ['_templates']
21 source_suffix = '.rst'
22
23 master_doc = 'index'
24
25 # General information about the project.
26 project = 'hy'
27 copyright = '%s the authors' % time.strftime('%Y')
28
29 # The version info for the project you're documenting, acts as replacement for
30 # |version| and |release|, also used in various other places throughout the
31 # built documents.
32 #
33 # The short X.Y version.
34 version = ".".join(hy_version.split(".")[:-1])
35 # The full version, including alpha/beta/rc tags.
36 release = hy_version
37 hy_descriptive_version = html.escape(hy_version)
38 if "+" in hy_version:
39 hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>"
40
41 exclude_patterns = ['_build', 'coreteam.rst']
42 add_module_names = True
43
44 pygments_style = 'sphinx'
45
46 import sphinx_rtd_theme
47 html_theme = 'sphinx_rtd_theme'
48 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
49
50 # Add any paths that contain custom static files (such as style sheets) here,
51 # relative to this directory. They are copied after the builtin static files,
52 # so a file named "default.css" will overwrite the builtin "default.css".
53 html_static_path = ['_static']
54
55 html_use_smartypants = False
56 html_show_sphinx = False
57
58 html_context = dict(
59 hy_descriptive_version = hy_descriptive_version)
60
61 highlight_language = 'clojure'
62
63 intersphinx_mapping = dict(
64 py = ('https://docs.python.org/3/', None),
65 py3_10 = ('https://docs.python.org/3.10/', None),
66 hyrule = ('https://hyrule.readthedocs.io/en/master/', None))
67 # ** Generate Cheatsheet
68 import json
69 from pathlib import Path
70 from itertools import zip_longest
71
72 def refize(spec):
73 role = ':hy:func:'
74 if isinstance(spec, dict):
75 _name = spec['name']
76 uri = spec['uri']
77 if spec.get('internal'):
78 role = ':ref:'
79 else:
80 uri = spec
81 _name = str.split(uri, '.')[-1]
82 return '{}`{} <{}>`'.format(role, _name, uri)
83
84
85 def format_refs(refs, indent):
86 args = [iter(map(refize, refs))]
87 ref_groups = zip_longest(*args, fillvalue="")
88 return str.join(
89 ' \\\n' + ' ' * (indent + 3),
90 [str.join(' ', ref_group) for ref_group in ref_groups],
91 )
92
93
94 def format_row(category, divider_loc):
95 return '{title: <{width}} | {methods}'.format(
96 width=divider_loc,
97 title=category['name'],
98 methods=format_refs(category['methods'], divider_loc)
99 )
100
101
102 def format_table(table_spec):
103 table_name = table_spec['name']
104 categories = table_spec['categories']
105 longest_cat_name = max(len(category['name']) for category in categories)
106 table = [
107 table_name,
108 '-' * len(table_name),
109 '',
110 '=' * longest_cat_name + ' ' + '=' * 25,
111 *(format_row(category, longest_cat_name) for category in categories),
112 '=' * longest_cat_name + ' ' + '=' * 25,
113 ''
114 ]
115 return '\n'.join(table)
116
117
118 # Modifications to the cheatsheet should be added in `cheatsheet.json`
119 cheatsheet_spec = json.loads(Path('./docs/cheatsheet.json').read_text())
120 cheatsheet = [
121 '..',
122 ' DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``',
123 ' If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``',
124 '',
125 '.. _cheatsheet:',
126 '',
127 'Cheatsheet',
128 '==========',
129 '',
130 *map(format_table, cheatsheet_spec),
131 ]
132 Path('./docs/cheatsheet.rst').write_text('\n'.join(cheatsheet))
133
134
135 # ** Sphinx App Setup
136
137
138 def setup(app):
139 app.add_css_file('overrides.css')
140
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -56,7 +56,9 @@
html_show_sphinx = False
html_context = dict(
- hy_descriptive_version = hy_descriptive_version)
+ hy_descriptive_version = hy_descriptive_version,
+ has_active_alpha = True,
+)
highlight_language = 'clojure'
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -56,7 +56,9 @@\n html_show_sphinx = False\n \n html_context = dict(\n- hy_descriptive_version = hy_descriptive_version)\n+ hy_descriptive_version = hy_descriptive_version,\n+ has_active_alpha = True,\n+)\n \n highlight_language = 'clojure'\n", "issue": "Add header notice to \"stable\" line documentation to point users to the alpha cycle documentation\nI was reading documentation and noticed that hy.contrib.walk is mentioned there:\r\nhttps://docs.hylang.org/en/stable/contrib/walk.html\r\n\r\nhowever it appears that hy.contrib.walk file is no longer on the master branch. \r\nhttps://github.com/hylang/hy/blob/6ba90fd3f853b2ddc391aa3358f9386c41d831c4/hy/contrib/walk.hy\r\n\r\nis it a bug in documentation or I'm missing something? \r\n\r\n\n", "before_files": [{"content": "# This file is execfile()d with the current directory set to its containing dir.\n\nimport re, os, sys, time, html\n\nsys.path.insert(0, os.path.abspath('..'))\n\nextensions = [\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.viewcode',\n 'sphinxcontrib.hydomain',\n]\n\nfrom get_version import __version__ as hy_version\n\n# Read the Docs might dirty its checkout, so strip the dirty flag.\nhy_version = re.sub(r'[+.]dirty\\Z', '', hy_version)\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\n\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'hy'\ncopyright = '%s the authors' % time.strftime('%Y')\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \".\".join(hy_version.split(\".\")[:-1])\n# The full version, including alpha/beta/rc tags.\nrelease = hy_version\nhy_descriptive_version = html.escape(hy_version)\nif \"+\" in hy_version:\n hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n\nexclude_patterns = ['_build', 'coreteam.rst']\nadd_module_names = True\n\npygments_style = 'sphinx'\n\nimport sphinx_rtd_theme\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_use_smartypants = False\nhtml_show_sphinx = False\n\nhtml_context = dict(\n hy_descriptive_version = hy_descriptive_version)\n\nhighlight_language = 'clojure'\n\nintersphinx_mapping = dict(\n py = ('https://docs.python.org/3/', None),\n py3_10 = ('https://docs.python.org/3.10/', None),\n hyrule = ('https://hyrule.readthedocs.io/en/master/', None))\n# ** Generate Cheatsheet\nimport json\nfrom pathlib import Path\nfrom itertools import zip_longest\n\ndef refize(spec):\n role = ':hy:func:'\n if isinstance(spec, dict):\n _name = spec['name']\n uri = spec['uri']\n if spec.get('internal'):\n role = ':ref:'\n else:\n uri = spec\n _name = str.split(uri, '.')[-1]\n return '{}`{} <{}>`'.format(role, _name, uri)\n\n\ndef format_refs(refs, indent):\n args = [iter(map(refize, refs))]\n ref_groups = zip_longest(*args, fillvalue=\"\")\n return str.join(\n ' \\\\\\n' + ' ' * (indent + 3),\n [str.join(' ', ref_group) for ref_group in ref_groups],\n )\n\n\ndef format_row(category, divider_loc):\n return '{title: <{width}} | {methods}'.format(\n width=divider_loc,\n title=category['name'],\n methods=format_refs(category['methods'], divider_loc)\n )\n\n\ndef format_table(table_spec):\n table_name = table_spec['name']\n categories = table_spec['categories']\n longest_cat_name = max(len(category['name']) for category in categories)\n table = [\n table_name,\n '-' * len(table_name),\n '',\n '=' * longest_cat_name + ' ' + '=' * 25,\n *(format_row(category, longest_cat_name) for category in categories),\n '=' * longest_cat_name + ' ' + '=' * 25,\n ''\n ]\n return '\\n'.join(table)\n\n\n# Modifications to the cheatsheet should be added in `cheatsheet.json`\ncheatsheet_spec = json.loads(Path('./docs/cheatsheet.json').read_text())\ncheatsheet = [\n '..',\n ' DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``',\n ' If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``',\n '',\n '.. _cheatsheet:',\n '',\n 'Cheatsheet',\n '==========',\n '',\n *map(format_table, cheatsheet_spec),\n]\nPath('./docs/cheatsheet.rst').write_text('\\n'.join(cheatsheet))\n\n\n# ** Sphinx App Setup\n\n\ndef setup(app):\n app.add_css_file('overrides.css')\n", "path": "docs/conf.py"}]} | 2,012 | 91 |
gh_patches_debug_27568 | rasdani/github-patches | git_diff | spack__spack-18325 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installation issue: py-lxml doesn't use the spack libxslt and libexslt libraries
It looks like py-lxml should have AUTO_RPATH set to true. Otherwise it picks up the OS versions of libxslt instead of the spack built versions. I added this to the package.py and the library dependencies were correct:
```
def setup_build_environment(self, env):
env.set('AUTO_RPATH', 'true')
```
</issue>
<code>
[start of var/spack/repos/builtin/packages/py-lxml/package.py]
1 # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class PyLxml(PythonPackage):
10 """lxml is the most feature-rich and easy-to-use library for processing
11 XML and HTML in the Python language."""
12
13 homepage = "http://lxml.de/"
14 url = "https://pypi.io/packages/source/l/lxml/lxml-4.4.1.tar.gz"
15
16 version('4.4.1', sha256='c81cb40bff373ab7a7446d6bbca0190bccc5be3448b47b51d729e37799bb5692')
17 version('4.3.3', sha256='4a03dd682f8e35a10234904e0b9508d705ff98cf962c5851ed052e9340df3d90')
18 version('4.2.5', sha256='36720698c29e7a9626a0dc802ef8885f8f0239bfd1689628ecd459a061f2807f')
19 version('3.7.3', sha256='aa502d78a51ee7d127b4824ff96500f0181d3c7826e6ee7b800d068be79361c7')
20 version('2.3', sha256='eea1b8d29532739c1383cb4794c5eacd6176f0972b59e8d29348335b87ff2e66')
21
22 variant('html5', default=False, description='Enable html5lib backend')
23 variant('htmlsoup', default=False, description='Enable BeautifulSoup4 backend')
24 variant('cssselect', default=False, description='Enable cssselect module')
25
26 depends_on('[email protected]:2.8,3.5:', type=('build', 'run'))
27 depends_on('py-setuptools', type='build')
28 depends_on('libxml2', type=('build', 'run'))
29 depends_on('libxslt', type=('build', 'run'))
30 depends_on('py-html5lib', when='+html5', type=('build', 'run'))
31 depends_on('py-beautifulsoup4', when='+htmlsoup', type=('build', 'run'))
32 depends_on('[email protected]:', when='+cssselect', type=('build', 'run'))
33
[end of var/spack/repos/builtin/packages/py-lxml/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/var/spack/repos/builtin/packages/py-lxml/package.py b/var/spack/repos/builtin/packages/py-lxml/package.py
--- a/var/spack/repos/builtin/packages/py-lxml/package.py
+++ b/var/spack/repos/builtin/packages/py-lxml/package.py
@@ -13,6 +13,7 @@
homepage = "http://lxml.de/"
url = "https://pypi.io/packages/source/l/lxml/lxml-4.4.1.tar.gz"
+ version('4.5.2', sha256='cdc13a1682b2a6241080745b1953719e7fe0850b40a5c71ca574f090a1391df6')
version('4.4.1', sha256='c81cb40bff373ab7a7446d6bbca0190bccc5be3448b47b51d729e37799bb5692')
version('4.3.3', sha256='4a03dd682f8e35a10234904e0b9508d705ff98cf962c5851ed052e9340df3d90')
version('4.2.5', sha256='36720698c29e7a9626a0dc802ef8885f8f0239bfd1689628ecd459a061f2807f')
@@ -25,8 +26,8 @@
depends_on('[email protected]:2.8,3.5:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
- depends_on('libxml2', type=('build', 'run'))
- depends_on('libxslt', type=('build', 'run'))
+ depends_on('libxml2', type=('build', 'link', 'run'))
+ depends_on('libxslt', type=('build', 'link', 'run'))
depends_on('py-html5lib', when='+html5', type=('build', 'run'))
depends_on('py-beautifulsoup4', when='+htmlsoup', type=('build', 'run'))
depends_on('[email protected]:', when='+cssselect', type=('build', 'run'))
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/py-lxml/package.py b/var/spack/repos/builtin/packages/py-lxml/package.py\n--- a/var/spack/repos/builtin/packages/py-lxml/package.py\n+++ b/var/spack/repos/builtin/packages/py-lxml/package.py\n@@ -13,6 +13,7 @@\n homepage = \"http://lxml.de/\"\n url = \"https://pypi.io/packages/source/l/lxml/lxml-4.4.1.tar.gz\"\n \n+ version('4.5.2', sha256='cdc13a1682b2a6241080745b1953719e7fe0850b40a5c71ca574f090a1391df6')\n version('4.4.1', sha256='c81cb40bff373ab7a7446d6bbca0190bccc5be3448b47b51d729e37799bb5692')\n version('4.3.3', sha256='4a03dd682f8e35a10234904e0b9508d705ff98cf962c5851ed052e9340df3d90')\n version('4.2.5', sha256='36720698c29e7a9626a0dc802ef8885f8f0239bfd1689628ecd459a061f2807f')\n@@ -25,8 +26,8 @@\n \n depends_on('[email protected]:2.8,3.5:', type=('build', 'run'))\n depends_on('py-setuptools', type='build')\n- depends_on('libxml2', type=('build', 'run'))\n- depends_on('libxslt', type=('build', 'run'))\n+ depends_on('libxml2', type=('build', 'link', 'run'))\n+ depends_on('libxslt', type=('build', 'link', 'run'))\n depends_on('py-html5lib', when='+html5', type=('build', 'run'))\n depends_on('py-beautifulsoup4', when='+htmlsoup', type=('build', 'run'))\n depends_on('[email protected]:', when='+cssselect', type=('build', 'run'))\n", "issue": "Installation issue: py-lxml doesn't use the spack libxslt and libexslt libraries\nIt looks like py-lxml should have AUTO_RPATH set to true. Otherwise it picks up the OS versions of libxslt instead of the spack built versions. I added this to the package.py and the library dependencies were correct:\r\n\r\n```\r\n def setup_build_environment(self, env):\r\n env.set('AUTO_RPATH', 'true')\r\n```\n", "before_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyLxml(PythonPackage):\n \"\"\"lxml is the most feature-rich and easy-to-use library for processing\n XML and HTML in the Python language.\"\"\"\n\n homepage = \"http://lxml.de/\"\n url = \"https://pypi.io/packages/source/l/lxml/lxml-4.4.1.tar.gz\"\n\n version('4.4.1', sha256='c81cb40bff373ab7a7446d6bbca0190bccc5be3448b47b51d729e37799bb5692')\n version('4.3.3', sha256='4a03dd682f8e35a10234904e0b9508d705ff98cf962c5851ed052e9340df3d90')\n version('4.2.5', sha256='36720698c29e7a9626a0dc802ef8885f8f0239bfd1689628ecd459a061f2807f')\n version('3.7.3', sha256='aa502d78a51ee7d127b4824ff96500f0181d3c7826e6ee7b800d068be79361c7')\n version('2.3', sha256='eea1b8d29532739c1383cb4794c5eacd6176f0972b59e8d29348335b87ff2e66')\n\n variant('html5', default=False, description='Enable html5lib backend')\n variant('htmlsoup', default=False, description='Enable BeautifulSoup4 backend')\n variant('cssselect', default=False, description='Enable cssselect module')\n\n depends_on('[email protected]:2.8,3.5:', type=('build', 'run'))\n depends_on('py-setuptools', type='build')\n depends_on('libxml2', type=('build', 'run'))\n depends_on('libxslt', type=('build', 'run'))\n depends_on('py-html5lib', when='+html5', type=('build', 'run'))\n depends_on('py-beautifulsoup4', when='+htmlsoup', type=('build', 'run'))\n depends_on('[email protected]:', when='+cssselect', type=('build', 'run'))\n", "path": "var/spack/repos/builtin/packages/py-lxml/package.py"}]} | 1,380 | 588 |
gh_patches_debug_1252 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-4762 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
When too many requests come simultaneously, mitmdump called an error and quited [ValueError: too many file descriptors in select()]
#### Problem Description
A clear and concise description of what the bug is.
When too many requests come simultaneously, mitmdump called an error and quited.
Traceback (most recent call last):
File "mitmdump", line 3, in <module>
File "mitmproxy\tools\main.py", line 147, in mitmdump
File "mitmproxy\tools\main.py", line 114, in run
File "mitmproxy\master.py", line 76, in run
File "mitmproxy\master.py", line 59, in run_loop
File "mitmproxy\master.py", line 95, in shutdown
File "asyncio\base_events.py", line 629, in run_until_complete
File "asyncio\base_events.py", line 596, in run_forever
File "asyncio\base_events.py", line 1854, in _run_once
File "selectors.py", line 324, in select
File "selectors.py", line 315, in _select
ValueError: too many file descriptors in select()
[77436] Failed to execute script 'mitmdump' due to unhandled exception!
I googled the error message, and found the following answer. Don't know if it's related.
https://stackoverflow.com/questions/57182009/why-am-i-getting-an-valueerror-too-many-file-descriptors-in-select
#### Steps to reproduce the behavior:
1. I use the following command
`mitmdump.exe -p 8080 --anticomp -q -s "d:\redirect-router.py"`
In the script, I re-write the host for a specific URL
2.
3.
#### System Information
Paste the output of "mitmproxy --version" here.
mitmproxy --version
Mitmproxy: 7.0.2 binary
Python: 3.9.6
OpenSSL: OpenSSL 1.1.1k 25 Mar 2021
Platform: Windows-10-10.0.18363-SP0
</issue>
<code>
[start of mitmproxy/__init__.py]
1 import asyncio
2 import sys
3
4 if sys.platform == 'win32':
5 # workaround for
6 # https://github.com/tornadoweb/tornado/issues/2751
7 # https://www.tornadoweb.org/en/stable/index.html#installation
8 # (copied multiple times in the codebase, please remove all occurrences)
9 asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
10
[end of mitmproxy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/__init__.py b/mitmproxy/__init__.py
--- a/mitmproxy/__init__.py
+++ b/mitmproxy/__init__.py
@@ -1,9 +0,0 @@
-import asyncio
-import sys
-
-if sys.platform == 'win32':
- # workaround for
- # https://github.com/tornadoweb/tornado/issues/2751
- # https://www.tornadoweb.org/en/stable/index.html#installation
- # (copied multiple times in the codebase, please remove all occurrences)
- asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
| {"golden_diff": "diff --git a/mitmproxy/__init__.py b/mitmproxy/__init__.py\n--- a/mitmproxy/__init__.py\n+++ b/mitmproxy/__init__.py\n@@ -1,9 +0,0 @@\n-import asyncio\n-import sys\n-\n-if sys.platform == 'win32':\n- # workaround for\n- # https://github.com/tornadoweb/tornado/issues/2751\n- # https://www.tornadoweb.org/en/stable/index.html#installation\n- # (copied multiple times in the codebase, please remove all occurrences)\n- asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n", "issue": "When too many requests come simultaneously, mitmdump called an error and quited [ValueError: too many file descriptors in select()]\n#### Problem Description\r\nA clear and concise description of what the bug is.\r\nWhen too many requests come simultaneously, mitmdump called an error and quited.\r\nTraceback (most recent call last):\r\n File \"mitmdump\", line 3, in <module>\r\n File \"mitmproxy\\tools\\main.py\", line 147, in mitmdump\r\n File \"mitmproxy\\tools\\main.py\", line 114, in run\r\n File \"mitmproxy\\master.py\", line 76, in run\r\n File \"mitmproxy\\master.py\", line 59, in run_loop\r\n File \"mitmproxy\\master.py\", line 95, in shutdown\r\n File \"asyncio\\base_events.py\", line 629, in run_until_complete\r\n File \"asyncio\\base_events.py\", line 596, in run_forever\r\n File \"asyncio\\base_events.py\", line 1854, in _run_once\r\n File \"selectors.py\", line 324, in select\r\n File \"selectors.py\", line 315, in _select\r\nValueError: too many file descriptors in select()\r\n[77436] Failed to execute script 'mitmdump' due to unhandled exception!\r\n\r\nI googled the error message, and found the following answer. Don't know if it's related.\r\nhttps://stackoverflow.com/questions/57182009/why-am-i-getting-an-valueerror-too-many-file-descriptors-in-select\r\n\r\n#### Steps to reproduce the behavior:\r\n1. I use the following command\r\n`mitmdump.exe -p 8080 --anticomp -q -s \"d:\\redirect-router.py\"`\r\nIn the script, I re-write the host for a specific URL\r\n2. \r\n3. \r\n\r\n#### System Information\r\nPaste the output of \"mitmproxy --version\" here.\r\nmitmproxy --version\r\nMitmproxy: 7.0.2 binary\r\nPython: 3.9.6\r\nOpenSSL: OpenSSL 1.1.1k 25 Mar 2021\r\nPlatform: Windows-10-10.0.18363-SP0\n", "before_files": [{"content": "import asyncio\nimport sys\n\nif sys.platform == 'win32':\n # workaround for\n # https://github.com/tornadoweb/tornado/issues/2751\n # https://www.tornadoweb.org/en/stable/index.html#installation\n # (copied multiple times in the codebase, please remove all occurrences)\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n", "path": "mitmproxy/__init__.py"}]} | 1,150 | 145 |
gh_patches_debug_23336 | rasdani/github-patches | git_diff | pytorch__ignite-286 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tqm_logger: metric_names is currently not optional
Hi,
https://github.com/pytorch/ignite/blob/master/ignite/contrib/handlers/tqdm_logger.py#L75
This line should be modified to make `metric_names` optional. Here is a suggestion:
```
if metric_names is not None and not isinstance(metric_names, list):
raise TypeError("metric_names should be a list, got {} instead".format(type(metric_names)))
```
Thanks
</issue>
<code>
[start of ignite/contrib/handlers/tqdm_logger.py]
1 try:
2 from tqdm import tqdm
3 except ImportError:
4 raise RuntimeError("This contrib module requires tqdm to be installed")
5
6 from ignite.engine import Events
7
8
9 class ProgressBar:
10 """
11 TQDM progress bar handler to log training progress and computed metrics.
12
13 Examples:
14
15 Create a progress bar that shows you some metrics as they are computed,
16 by simply attaching the progress bar object to your engine.
17
18 .. code-block:: python
19
20 pbar = ProgressBar()
21 pbar.attach(trainer, ['loss'])
22
23 Note:
24 When adding attaching the progress bar to an engine, it is recommend that you replace
25 every print operation in the engine's handlers triggered every iteration with
26 ``pbar.log_message`` to guarantee the correct format of the stdout.
27 """
28
29 def __init__(self):
30 self.pbar = None
31
32 def _reset(self, engine):
33 self.pbar = tqdm(
34 total=len(engine.state.dataloader),
35 leave=False,
36 bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]')
37
38 def _close(self, engine):
39 self.pbar.close()
40 self.pbar = None
41
42 def _update(self, engine, metric_names=None):
43 if self.pbar is None:
44 self._reset(engine)
45
46 self.pbar.set_description('Epoch {}'.format(engine.state.epoch))
47
48 if metric_names is not None:
49 if not all(metric in engine.state.metrics for metric in metric_names):
50 raise KeyError("metrics not found in engine.state.metrics")
51
52 metrics = {name: '{:.2e}'.format(engine.state.metrics[name]) for name in metric_names}
53 self.pbar.set_postfix(**metrics)
54
55 self.pbar.update()
56
57 @staticmethod
58 def log_message(message):
59 """
60 Logs a message, preserving the progress bar correct output format
61
62 Args:
63 message (str): string you wish to log
64 """
65 tqdm.write(message)
66
67 def attach(self, engine, metric_names=None):
68 """
69 Attaches the progress bar to an engine object
70
71 Args:
72 engine (Engine): engine object
73 metric_names (list): (Optional) list of the metrics names to log as the bar progresses
74 """
75 if not isinstance(metric_names, list):
76 raise TypeError("metric_names should be a list, got {} instead".format(type(metric_names)))
77
78 engine.add_event_handler(Events.EPOCH_COMPLETED, self._close)
79 engine.add_event_handler(Events.ITERATION_COMPLETED, self._update, metric_names)
80
[end of ignite/contrib/handlers/tqdm_logger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py
--- a/ignite/contrib/handlers/tqdm_logger.py
+++ b/ignite/contrib/handlers/tqdm_logger.py
@@ -47,6 +47,7 @@
if metric_names is not None:
if not all(metric in engine.state.metrics for metric in metric_names):
+ self._close(engine)
raise KeyError("metrics not found in engine.state.metrics")
metrics = {name: '{:.2e}'.format(engine.state.metrics[name]) for name in metric_names}
@@ -72,7 +73,7 @@
engine (Engine): engine object
metric_names (list): (Optional) list of the metrics names to log as the bar progresses
"""
- if not isinstance(metric_names, list):
+ if metric_names is not None and not isinstance(metric_names, list):
raise TypeError("metric_names should be a list, got {} instead".format(type(metric_names)))
engine.add_event_handler(Events.EPOCH_COMPLETED, self._close)
| {"golden_diff": "diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py\n--- a/ignite/contrib/handlers/tqdm_logger.py\n+++ b/ignite/contrib/handlers/tqdm_logger.py\n@@ -47,6 +47,7 @@\n \n if metric_names is not None:\n if not all(metric in engine.state.metrics for metric in metric_names):\n+ self._close(engine)\n raise KeyError(\"metrics not found in engine.state.metrics\")\n \n metrics = {name: '{:.2e}'.format(engine.state.metrics[name]) for name in metric_names}\n@@ -72,7 +73,7 @@\n engine (Engine): engine object\n metric_names (list): (Optional) list of the metrics names to log as the bar progresses\n \"\"\"\n- if not isinstance(metric_names, list):\n+ if metric_names is not None and not isinstance(metric_names, list):\n raise TypeError(\"metric_names should be a list, got {} instead\".format(type(metric_names)))\n \n engine.add_event_handler(Events.EPOCH_COMPLETED, self._close)\n", "issue": "tqm_logger: metric_names is currently not optional \nHi,\r\n\r\nhttps://github.com/pytorch/ignite/blob/master/ignite/contrib/handlers/tqdm_logger.py#L75\r\nThis line should be modified to make `metric_names` optional. Here is a suggestion:\r\n```\r\nif metric_names is not None and not isinstance(metric_names, list):\r\n raise TypeError(\"metric_names should be a list, got {} instead\".format(type(metric_names)))\r\n```\r\n\r\nThanks\n", "before_files": [{"content": "try:\n from tqdm import tqdm\nexcept ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed\")\n\nfrom ignite.engine import Events\n\n\nclass ProgressBar:\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Examples:\n\n Create a progress bar that shows you some metrics as they are computed,\n by simply attaching the progress bar object to your engine.\n\n .. code-block:: python\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n \"\"\"\n\n def __init__(self):\n self.pbar = None\n\n def _reset(self, engine):\n self.pbar = tqdm(\n total=len(engine.state.dataloader),\n leave=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]')\n\n def _close(self, engine):\n self.pbar.close()\n self.pbar = None\n\n def _update(self, engine, metric_names=None):\n if self.pbar is None:\n self._reset(engine)\n\n self.pbar.set_description('Epoch {}'.format(engine.state.epoch))\n\n if metric_names is not None:\n if not all(metric in engine.state.metrics for metric in metric_names):\n raise KeyError(\"metrics not found in engine.state.metrics\")\n\n metrics = {name: '{:.2e}'.format(engine.state.metrics[name]) for name in metric_names}\n self.pbar.set_postfix(**metrics)\n\n self.pbar.update()\n\n @staticmethod\n def log_message(message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format\n\n Args:\n message (str): string you wish to log\n \"\"\"\n tqdm.write(message)\n\n def attach(self, engine, metric_names=None):\n \"\"\"\n Attaches the progress bar to an engine object\n\n Args:\n engine (Engine): engine object\n metric_names (list): (Optional) list of the metrics names to log as the bar progresses\n \"\"\"\n if not isinstance(metric_names, list):\n raise TypeError(\"metric_names should be a list, got {} instead\".format(type(metric_names)))\n\n engine.add_event_handler(Events.EPOCH_COMPLETED, self._close)\n engine.add_event_handler(Events.ITERATION_COMPLETED, self._update, metric_names)\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}]} | 1,352 | 245 |
gh_patches_debug_60627 | rasdani/github-patches | git_diff | CTPUG__wafer-111 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wafer page editing fails on Django 1.8 with ImproperlyConfigured: error
As seen on Travis, and confirmed locally, attempting to edit a page bombs out, ending with
"Specifying both 'fields' and 'form_class' is not permitted."
ImproperlyConfigured: Specifying both 'fields' and 'form_class' is not permitted.
</issue>
<code>
[start of wafer/pages/views.py]
1 from django.http import Http404
2 from django.core.exceptions import PermissionDenied
3 from django.views.generic import DetailView, TemplateView, UpdateView
4
5 from wafer.pages.models import Page
6 from wafer.pages.forms import PageForm
7
8
9 class ShowPage(DetailView):
10 template_name = 'wafer.pages/page.html'
11 model = Page
12
13
14 class EditPage(UpdateView):
15 template_name = 'wafer.pages/page_form.html'
16 model = Page
17 form_class = PageForm
18 fields = ['name', 'content']
19
20
21 def slug(request, url):
22 """Look up a page by url (which is a tree of slugs)"""
23 page = None
24 for slug in url.split('/'):
25 if not slug:
26 continue
27 try:
28 page = Page.objects.get(slug=slug, parent=page)
29 except Page.DoesNotExist:
30 raise Http404
31
32 if page is None:
33 try:
34 page = Page.objects.get(slug='index')
35 except Page.DoesNotExist:
36 return TemplateView.as_view(
37 template_name='wafer/index.html')(request)
38
39 if 'edit' in request.GET.keys():
40 if not request.user.has_perm('pages.change_page'):
41 raise PermissionDenied
42 return EditPage.as_view()(request, pk=page.id)
43
44 return ShowPage.as_view()(request, pk=page.id)
45
[end of wafer/pages/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wafer/pages/views.py b/wafer/pages/views.py
--- a/wafer/pages/views.py
+++ b/wafer/pages/views.py
@@ -15,7 +15,6 @@
template_name = 'wafer.pages/page_form.html'
model = Page
form_class = PageForm
- fields = ['name', 'content']
def slug(request, url):
| {"golden_diff": "diff --git a/wafer/pages/views.py b/wafer/pages/views.py\n--- a/wafer/pages/views.py\n+++ b/wafer/pages/views.py\n@@ -15,7 +15,6 @@\n template_name = 'wafer.pages/page_form.html'\n model = Page\n form_class = PageForm\n- fields = ['name', 'content']\n \n \n def slug(request, url):\n", "issue": "Wafer page editing fails on Django 1.8 with ImproperlyConfigured: error\nAs seen on Travis, and confirmed locally, attempting to edit a page bombs out, ending with\n\n\"Specifying both 'fields' and 'form_class' is not permitted.\"\nImproperlyConfigured: Specifying both 'fields' and 'form_class' is not permitted.\n\n", "before_files": [{"content": "from django.http import Http404\nfrom django.core.exceptions import PermissionDenied\nfrom django.views.generic import DetailView, TemplateView, UpdateView\n\nfrom wafer.pages.models import Page\nfrom wafer.pages.forms import PageForm\n\n\nclass ShowPage(DetailView):\n template_name = 'wafer.pages/page.html'\n model = Page\n\n\nclass EditPage(UpdateView):\n template_name = 'wafer.pages/page_form.html'\n model = Page\n form_class = PageForm\n fields = ['name', 'content']\n\n\ndef slug(request, url):\n \"\"\"Look up a page by url (which is a tree of slugs)\"\"\"\n page = None\n for slug in url.split('/'):\n if not slug:\n continue\n try:\n page = Page.objects.get(slug=slug, parent=page)\n except Page.DoesNotExist:\n raise Http404\n\n if page is None:\n try:\n page = Page.objects.get(slug='index')\n except Page.DoesNotExist:\n return TemplateView.as_view(\n template_name='wafer/index.html')(request)\n\n if 'edit' in request.GET.keys():\n if not request.user.has_perm('pages.change_page'):\n raise PermissionDenied\n return EditPage.as_view()(request, pk=page.id)\n\n return ShowPage.as_view()(request, pk=page.id)\n", "path": "wafer/pages/views.py"}]} | 986 | 90 |
gh_patches_debug_20149 | rasdani/github-patches | git_diff | hydroshare__hydroshare-5219 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
User account deletion doesn't remove resources from SOLR index
**Description of the bug**
Deleting a user in mezzanine also cascades to delete the user's resources. However it looks like the resources are not removed from the SOLR index. They still show up in discover.
Steps to reproduce the bug:
1. make a new user account
2. add a resource and make it discoverable
3. login as admin user and delete the account that you created in step 1 (via the mezzanine admin panel)
4. See that the resource listing persists on the Discover search page
**Expected behavior**
User account deletion should remove the user's resources from the SOLR index
**Additional information**
HS v 2.9.2
</issue>
<code>
[start of hs_core/hydro_realtime_signal_processor.py]
1 import logging
2
3 from django.conf import settings
4 from django.db import models
5 from hs_core.models import Date, BaseResource
6 from hs_access_control.models import ResourceAccess
7 from haystack.exceptions import NotHandled
8 from haystack.signals import BaseSignalProcessor
9
10 logger = logging.getLogger(__name__)
11
12
13 class HydroRealtimeSignalProcessor(BaseSignalProcessor):
14 """
15 Notes:
16 1. We assume everytime metadata is updated the modified datetime is updated
17 2. ResourceAccess does not update the modified datetime (it is not scientific metadata)
18 """
19
20 def setup(self):
21 if not getattr(settings, "DISABLE_HAYSTACK", False):
22 models.signals.post_save.connect(self.handle_update, sender=Date)
23 models.signals.post_save.connect(self.handle_access, sender=ResourceAccess)
24
25 def teardown(self):
26 if not getattr(settings, "DISABLE_HAYSTACK", False):
27 models.signals.post_save.disconnect(self.handle_update, sender=Date)
28 models.signals.post_save.disconnect(self.handle_access, sender=ResourceAccess)
29
30 def handle_update(self, sender, instance, **kwargs):
31 try:
32 # resolve the BaseResource corresponding to the metadata element.
33 newbase = instance.metadata.resource
34 index_resource(self, newbase)
35 except Exception as e:
36 logger.exception("{} exception: {}".format(type(instance), str(e)))
37
38 def handle_access(self, sender, instance, **kwargs):
39 try:
40 newbase = instance.resource
41 index_resource(self, newbase)
42 except Exception as e:
43 logger.exception("{} exception: {}".format(type(instance), str(e)))
44
45
46 def index_resource(signal_processor, instance: BaseResource):
47 if hasattr(instance, 'raccess') and hasattr(instance, 'metadata'):
48 # work around for failure of super(BaseResource, instance) to work properly.
49 # this always succeeds because this is a post-save object action.
50 newbase = BaseResource.objects.get(pk=instance.pk)
51 newsender = BaseResource
52 using_backends = signal_processor.connection_router.for_write(instance=newbase)
53 for using in using_backends:
54 # if object is public/discoverable or becoming public/discoverable, index it
55 # test whether the object should be exposed.
56 if instance.show_in_discover:
57 try:
58 index = signal_processor.connections[using].get_unified_index().get_index(newsender)
59 index.update_object(newbase, using=using)
60 except NotHandled:
61 logger.exception("Failure: changes to %s with short_id %s not added to Solr Index.",
62 str(type(instance)), newbase.short_id)
63
64 # if object is private or becoming private, delete from index
65 else: # not to be shown in discover
66 try:
67 index = signal_processor.connections[using].get_unified_index().get_index(newsender)
68 index.remove_object(newbase, using=using)
69 except NotHandled:
70 logger.exception("Failure: delete of %s with short_id %s failed.",
71 str(type(instance)), newbase.short_id)
72
[end of hs_core/hydro_realtime_signal_processor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hs_core/hydro_realtime_signal_processor.py b/hs_core/hydro_realtime_signal_processor.py
--- a/hs_core/hydro_realtime_signal_processor.py
+++ b/hs_core/hydro_realtime_signal_processor.py
@@ -21,11 +21,13 @@
if not getattr(settings, "DISABLE_HAYSTACK", False):
models.signals.post_save.connect(self.handle_update, sender=Date)
models.signals.post_save.connect(self.handle_access, sender=ResourceAccess)
+ models.signals.post_delete.connect(self.handle_delete, sender=BaseResource)
def teardown(self):
if not getattr(settings, "DISABLE_HAYSTACK", False):
models.signals.post_save.disconnect(self.handle_update, sender=Date)
models.signals.post_save.disconnect(self.handle_access, sender=ResourceAccess)
+ models.signals.post_delete.disconnect(self.handle_delete, sender=BaseResource)
def handle_update(self, sender, instance, **kwargs):
try:
| {"golden_diff": "diff --git a/hs_core/hydro_realtime_signal_processor.py b/hs_core/hydro_realtime_signal_processor.py\n--- a/hs_core/hydro_realtime_signal_processor.py\n+++ b/hs_core/hydro_realtime_signal_processor.py\n@@ -21,11 +21,13 @@\n if not getattr(settings, \"DISABLE_HAYSTACK\", False):\n models.signals.post_save.connect(self.handle_update, sender=Date)\n models.signals.post_save.connect(self.handle_access, sender=ResourceAccess)\n+ models.signals.post_delete.connect(self.handle_delete, sender=BaseResource)\n \n def teardown(self):\n if not getattr(settings, \"DISABLE_HAYSTACK\", False):\n models.signals.post_save.disconnect(self.handle_update, sender=Date)\n models.signals.post_save.disconnect(self.handle_access, sender=ResourceAccess)\n+ models.signals.post_delete.disconnect(self.handle_delete, sender=BaseResource)\n \n def handle_update(self, sender, instance, **kwargs):\n try:\n", "issue": "User account deletion doesn't remove resources from SOLR index\n**Description of the bug**\r\nDeleting a user in mezzanine also cascades to delete the user's resources. However it looks like the resources are not removed from the SOLR index. They still show up in discover.\r\n\r\nSteps to reproduce the bug:\r\n1. make a new user account\r\n2. add a resource and make it discoverable\r\n3. login as admin user and delete the account that you created in step 1 (via the mezzanine admin panel)\r\n4. See that the resource listing persists on the Discover search page\r\n\r\n**Expected behavior**\r\nUser account deletion should remove the user's resources from the SOLR index\r\n\r\n**Additional information**\r\nHS v 2.9.2\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.db import models\nfrom hs_core.models import Date, BaseResource\nfrom hs_access_control.models import ResourceAccess\nfrom haystack.exceptions import NotHandled\nfrom haystack.signals import BaseSignalProcessor\n\nlogger = logging.getLogger(__name__)\n\n\nclass HydroRealtimeSignalProcessor(BaseSignalProcessor):\n \"\"\"\n Notes:\n 1. We assume everytime metadata is updated the modified datetime is updated\n 2. ResourceAccess does not update the modified datetime (it is not scientific metadata)\n \"\"\"\n\n def setup(self):\n if not getattr(settings, \"DISABLE_HAYSTACK\", False):\n models.signals.post_save.connect(self.handle_update, sender=Date)\n models.signals.post_save.connect(self.handle_access, sender=ResourceAccess)\n\n def teardown(self):\n if not getattr(settings, \"DISABLE_HAYSTACK\", False):\n models.signals.post_save.disconnect(self.handle_update, sender=Date)\n models.signals.post_save.disconnect(self.handle_access, sender=ResourceAccess)\n\n def handle_update(self, sender, instance, **kwargs):\n try:\n # resolve the BaseResource corresponding to the metadata element.\n newbase = instance.metadata.resource\n index_resource(self, newbase)\n except Exception as e:\n logger.exception(\"{} exception: {}\".format(type(instance), str(e)))\n\n def handle_access(self, sender, instance, **kwargs):\n try:\n newbase = instance.resource\n index_resource(self, newbase)\n except Exception as e:\n logger.exception(\"{} exception: {}\".format(type(instance), str(e)))\n\n\ndef index_resource(signal_processor, instance: BaseResource):\n if hasattr(instance, 'raccess') and hasattr(instance, 'metadata'):\n # work around for failure of super(BaseResource, instance) to work properly.\n # this always succeeds because this is a post-save object action.\n newbase = BaseResource.objects.get(pk=instance.pk)\n newsender = BaseResource\n using_backends = signal_processor.connection_router.for_write(instance=newbase)\n for using in using_backends:\n # if object is public/discoverable or becoming public/discoverable, index it\n # test whether the object should be exposed.\n if instance.show_in_discover:\n try:\n index = signal_processor.connections[using].get_unified_index().get_index(newsender)\n index.update_object(newbase, using=using)\n except NotHandled:\n logger.exception(\"Failure: changes to %s with short_id %s not added to Solr Index.\",\n str(type(instance)), newbase.short_id)\n\n # if object is private or becoming private, delete from index\n else: # not to be shown in discover\n try:\n index = signal_processor.connections[using].get_unified_index().get_index(newsender)\n index.remove_object(newbase, using=using)\n except NotHandled:\n logger.exception(\"Failure: delete of %s with short_id %s failed.\",\n str(type(instance)), newbase.short_id)\n", "path": "hs_core/hydro_realtime_signal_processor.py"}]} | 1,463 | 208 |
gh_patches_debug_8403 | rasdani/github-patches | git_diff | pypa__pip-10507 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
21.3 regression with legacy resolver
Assuming the following project with an empty `pyproject.toml` and the following `setup.cfg`:
```ini
[metadata]
name = pkgb
version = 1.0
[options]
install_requires =
wrapt
```
We get the following stack trace, using pip main branch, today:
```console
$ pip install --use-deprecated=legacy-resolver -e ./pkgb
Obtaining file:///home/me/tmp/brol/pkgb
Installing build dependencies ... done
Getting requirements to build wheel ... done
Preparing wheel metadata ... done
Requirement already satisfied: wrapt in /home/me/.virtualenvs/tempenv-49ea1126817e6/lib/python3.8/site-packages (from pkgb==1.0) (1.12.1)
ERROR: Exception:
Traceback (most recent call last):
File "/home/me/pip/src/pip/_internal/cli/base_command.py", line 179, in exc_logging_wrapper
status = run_func(*args)
File "/home/me/pip/src/pip/_internal/cli/req_command.py", line 203, in wrapper
return func(self, options, args)
File "/home/me/pip/src/pip/_internal/commands/install.py", line 334, in run
requirement_set = resolver.resolve(
File "/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py", line 181, in resolve
discovered_reqs.extend(self._resolve_one(requirement_set, req))
File "/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py", line 382, in _resolve_one
_check_dist_requires_python(
File "/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py", line 75, in _check_dist_requires_python
requires_python = str(dist.requires_python)
File "/home/me/pip/src/pip/_vendor/pkg_resources/__init__.py", line 2816, in __getattr__
return getattr(self._provider, attr)
AttributeError: 'PathMetadata' object has no attribute 'requires_python'
```
</issue>
<code>
[start of src/pip/_internal/distributions/installed.py]
1 from pip._internal.distributions.base import AbstractDistribution
2 from pip._internal.index.package_finder import PackageFinder
3 from pip._internal.metadata import BaseDistribution
4
5
6 class InstalledDistribution(AbstractDistribution):
7 """Represents an installed package.
8
9 This does not need any preparation as the required information has already
10 been computed.
11 """
12
13 def get_metadata_distribution(self) -> BaseDistribution:
14 assert self.req.satisfied_by is not None, "not actually installed"
15 return self.req.satisfied_by
16
17 def prepare_distribution_metadata(
18 self, finder: PackageFinder, build_isolation: bool
19 ) -> None:
20 pass
21
[end of src/pip/_internal/distributions/installed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/distributions/installed.py b/src/pip/_internal/distributions/installed.py
--- a/src/pip/_internal/distributions/installed.py
+++ b/src/pip/_internal/distributions/installed.py
@@ -11,8 +11,10 @@
"""
def get_metadata_distribution(self) -> BaseDistribution:
+ from pip._internal.metadata.pkg_resources import Distribution as _Dist
+
assert self.req.satisfied_by is not None, "not actually installed"
- return self.req.satisfied_by
+ return _Dist(self.req.satisfied_by)
def prepare_distribution_metadata(
self, finder: PackageFinder, build_isolation: bool
| {"golden_diff": "diff --git a/src/pip/_internal/distributions/installed.py b/src/pip/_internal/distributions/installed.py\n--- a/src/pip/_internal/distributions/installed.py\n+++ b/src/pip/_internal/distributions/installed.py\n@@ -11,8 +11,10 @@\n \"\"\"\n \n def get_metadata_distribution(self) -> BaseDistribution:\n+ from pip._internal.metadata.pkg_resources import Distribution as _Dist\n+\n assert self.req.satisfied_by is not None, \"not actually installed\"\n- return self.req.satisfied_by\n+ return _Dist(self.req.satisfied_by)\n \n def prepare_distribution_metadata(\n self, finder: PackageFinder, build_isolation: bool\n", "issue": "21.3 regression with legacy resolver\nAssuming the following project with an empty `pyproject.toml` and the following `setup.cfg`:\r\n\r\n```ini\r\n[metadata]\r\nname = pkgb\r\nversion = 1.0\r\n\r\n[options]\r\ninstall_requires =\r\n wrapt\r\n```\r\n\r\nWe get the following stack trace, using pip main branch, today:\r\n\r\n```console\r\n$ pip install --use-deprecated=legacy-resolver -e ./pkgb\r\nObtaining file:///home/me/tmp/brol/pkgb\r\n Installing build dependencies ... done\r\n Getting requirements to build wheel ... done\r\n Preparing wheel metadata ... done\r\nRequirement already satisfied: wrapt in /home/me/.virtualenvs/tempenv-49ea1126817e6/lib/python3.8/site-packages (from pkgb==1.0) (1.12.1)\r\nERROR: Exception:\r\nTraceback (most recent call last):\r\n File \"/home/me/pip/src/pip/_internal/cli/base_command.py\", line 179, in exc_logging_wrapper\r\n status = run_func(*args)\r\n File \"/home/me/pip/src/pip/_internal/cli/req_command.py\", line 203, in wrapper\r\n return func(self, options, args)\r\n File \"/home/me/pip/src/pip/_internal/commands/install.py\", line 334, in run\r\n requirement_set = resolver.resolve(\r\n File \"/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py\", line 181, in resolve\r\n discovered_reqs.extend(self._resolve_one(requirement_set, req))\r\n File \"/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py\", line 382, in _resolve_one\r\n _check_dist_requires_python(\r\n File \"/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py\", line 75, in _check_dist_requires_python\r\n requires_python = str(dist.requires_python)\r\n File \"/home/me/pip/src/pip/_vendor/pkg_resources/__init__.py\", line 2816, in __getattr__\r\n return getattr(self._provider, attr)\r\nAttributeError: 'PathMetadata' object has no attribute 'requires_python'\r\n```\r\n\n", "before_files": [{"content": "from pip._internal.distributions.base import AbstractDistribution\nfrom pip._internal.index.package_finder import PackageFinder\nfrom pip._internal.metadata import BaseDistribution\n\n\nclass InstalledDistribution(AbstractDistribution):\n \"\"\"Represents an installed package.\n\n This does not need any preparation as the required information has already\n been computed.\n \"\"\"\n\n def get_metadata_distribution(self) -> BaseDistribution:\n assert self.req.satisfied_by is not None, \"not actually installed\"\n return self.req.satisfied_by\n\n def prepare_distribution_metadata(\n self, finder: PackageFinder, build_isolation: bool\n ) -> None:\n pass\n", "path": "src/pip/_internal/distributions/installed.py"}]} | 1,196 | 153 |
gh_patches_debug_19762 | rasdani/github-patches | git_diff | scrapy__scrapy-4676 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
startproject only allows creating the project on an existing folder if the path is specified with an invalid module name
```
(venv) [adrian@afonsox temporal]$ mkdir existing_folder
(venv) [adrian@afonsox temporal]$ scrapy startproject existing_folder
Error: Module 'existing_folder' already exists
(venv) [adrian@afonsox temporal]$ cd existing_folder && scrapy startproject existing_folder .
New Scrapy project 'existing_folder', using template directory '/home/adrian/temporal/venv/lib/python3.8/site-packages/scrapy/templates/project', created in:
/home/adrian/temporal/existing_folder
You can start your first spider with:
cd .
scrapy genspider example example.com
```
</issue>
<code>
[start of scrapy/commands/startproject.py]
1 import re
2 import os
3 import string
4 from importlib import import_module
5 from os.path import join, exists, abspath
6 from shutil import ignore_patterns, move, copy2, copystat
7 from stat import S_IWUSR as OWNER_WRITE_PERMISSION
8
9 import scrapy
10 from scrapy.commands import ScrapyCommand
11 from scrapy.utils.template import render_templatefile, string_camelcase
12 from scrapy.exceptions import UsageError
13
14
15 TEMPLATES_TO_RENDER = (
16 ('scrapy.cfg',),
17 ('${project_name}', 'settings.py.tmpl'),
18 ('${project_name}', 'items.py.tmpl'),
19 ('${project_name}', 'pipelines.py.tmpl'),
20 ('${project_name}', 'middlewares.py.tmpl'),
21 )
22
23 IGNORE = ignore_patterns('*.pyc', '__pycache__', '.svn')
24
25
26 def _make_writable(path):
27 current_permissions = os.stat(path).st_mode
28 os.chmod(path, current_permissions | OWNER_WRITE_PERMISSION)
29
30
31 class Command(ScrapyCommand):
32
33 requires_project = False
34 default_settings = {'LOG_ENABLED': False,
35 'SPIDER_LOADER_WARN_ONLY': True}
36
37 def syntax(self):
38 return "<project_name> [project_dir]"
39
40 def short_desc(self):
41 return "Create new project"
42
43 def _is_valid_name(self, project_name):
44 def _module_exists(module_name):
45 try:
46 import_module(module_name)
47 return True
48 except ImportError:
49 return False
50
51 if not re.search(r'^[_a-zA-Z]\w*$', project_name):
52 print('Error: Project names must begin with a letter and contain'
53 ' only\nletters, numbers and underscores')
54 elif _module_exists(project_name):
55 print(f'Error: Module {project_name!r} already exists')
56 else:
57 return True
58 return False
59
60 def _copytree(self, src, dst):
61 """
62 Since the original function always creates the directory, to resolve
63 the issue a new function had to be created. It's a simple copy and
64 was reduced for this case.
65
66 More info at:
67 https://github.com/scrapy/scrapy/pull/2005
68 """
69 ignore = IGNORE
70 names = os.listdir(src)
71 ignored_names = ignore(src, names)
72
73 if not os.path.exists(dst):
74 os.makedirs(dst)
75
76 for name in names:
77 if name in ignored_names:
78 continue
79
80 srcname = os.path.join(src, name)
81 dstname = os.path.join(dst, name)
82 if os.path.isdir(srcname):
83 self._copytree(srcname, dstname)
84 else:
85 copy2(srcname, dstname)
86 _make_writable(dstname)
87
88 copystat(src, dst)
89 _make_writable(dst)
90
91 def run(self, args, opts):
92 if len(args) not in (1, 2):
93 raise UsageError()
94
95 project_name = args[0]
96 project_dir = args[0]
97
98 if len(args) == 2:
99 project_dir = args[1]
100
101 if exists(join(project_dir, 'scrapy.cfg')):
102 self.exitcode = 1
103 print(f'Error: scrapy.cfg already exists in {abspath(project_dir)}')
104 return
105
106 if not self._is_valid_name(project_name):
107 self.exitcode = 1
108 return
109
110 self._copytree(self.templates_dir, abspath(project_dir))
111 move(join(project_dir, 'module'), join(project_dir, project_name))
112 for paths in TEMPLATES_TO_RENDER:
113 path = join(*paths)
114 tplfile = join(project_dir, string.Template(path).substitute(project_name=project_name))
115 render_templatefile(tplfile, project_name=project_name, ProjectName=string_camelcase(project_name))
116 print(f"New Scrapy project '{project_name}', using template directory "
117 f"'{self.templates_dir}', created in:")
118 print(f" {abspath(project_dir)}\n")
119 print("You can start your first spider with:")
120 print(f" cd {project_dir}")
121 print(" scrapy genspider example example.com")
122
123 @property
124 def templates_dir(self):
125 return join(
126 self.settings['TEMPLATES_DIR'] or join(scrapy.__path__[0], 'templates'),
127 'project'
128 )
129
[end of scrapy/commands/startproject.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/commands/startproject.py b/scrapy/commands/startproject.py
--- a/scrapy/commands/startproject.py
+++ b/scrapy/commands/startproject.py
@@ -1,7 +1,7 @@
import re
import os
import string
-from importlib import import_module
+from importlib.util import find_spec
from os.path import join, exists, abspath
from shutil import ignore_patterns, move, copy2, copystat
from stat import S_IWUSR as OWNER_WRITE_PERMISSION
@@ -42,11 +42,8 @@
def _is_valid_name(self, project_name):
def _module_exists(module_name):
- try:
- import_module(module_name)
- return True
- except ImportError:
- return False
+ spec = find_spec(module_name)
+ return spec is not None and spec.loader is not None
if not re.search(r'^[_a-zA-Z]\w*$', project_name):
print('Error: Project names must begin with a letter and contain'
| {"golden_diff": "diff --git a/scrapy/commands/startproject.py b/scrapy/commands/startproject.py\n--- a/scrapy/commands/startproject.py\n+++ b/scrapy/commands/startproject.py\n@@ -1,7 +1,7 @@\n import re\n import os\n import string\n-from importlib import import_module\n+from importlib.util import find_spec\n from os.path import join, exists, abspath\n from shutil import ignore_patterns, move, copy2, copystat\n from stat import S_IWUSR as OWNER_WRITE_PERMISSION\n@@ -42,11 +42,8 @@\n \n def _is_valid_name(self, project_name):\n def _module_exists(module_name):\n- try:\n- import_module(module_name)\n- return True\n- except ImportError:\n- return False\n+ spec = find_spec(module_name)\n+ return spec is not None and spec.loader is not None\n \n if not re.search(r'^[_a-zA-Z]\\w*$', project_name):\n print('Error: Project names must begin with a letter and contain'\n", "issue": "startproject only allows creating the project on an existing folder if the path is specified with an invalid module name\n```\r\n(venv) [adrian@afonsox temporal]$ mkdir existing_folder\r\n(venv) [adrian@afonsox temporal]$ scrapy startproject existing_folder\r\nError: Module 'existing_folder' already exists\r\n(venv) [adrian@afonsox temporal]$ cd existing_folder && scrapy startproject existing_folder .\r\nNew Scrapy project 'existing_folder', using template directory '/home/adrian/temporal/venv/lib/python3.8/site-packages/scrapy/templates/project', created in:\r\n /home/adrian/temporal/existing_folder\r\n\r\nYou can start your first spider with:\r\n cd .\r\n scrapy genspider example example.com\r\n```\n", "before_files": [{"content": "import re\nimport os\nimport string\nfrom importlib import import_module\nfrom os.path import join, exists, abspath\nfrom shutil import ignore_patterns, move, copy2, copystat\nfrom stat import S_IWUSR as OWNER_WRITE_PERMISSION\n\nimport scrapy\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.utils.template import render_templatefile, string_camelcase\nfrom scrapy.exceptions import UsageError\n\n\nTEMPLATES_TO_RENDER = (\n ('scrapy.cfg',),\n ('${project_name}', 'settings.py.tmpl'),\n ('${project_name}', 'items.py.tmpl'),\n ('${project_name}', 'pipelines.py.tmpl'),\n ('${project_name}', 'middlewares.py.tmpl'),\n)\n\nIGNORE = ignore_patterns('*.pyc', '__pycache__', '.svn')\n\n\ndef _make_writable(path):\n current_permissions = os.stat(path).st_mode\n os.chmod(path, current_permissions | OWNER_WRITE_PERMISSION)\n\n\nclass Command(ScrapyCommand):\n\n requires_project = False\n default_settings = {'LOG_ENABLED': False,\n 'SPIDER_LOADER_WARN_ONLY': True}\n\n def syntax(self):\n return \"<project_name> [project_dir]\"\n\n def short_desc(self):\n return \"Create new project\"\n\n def _is_valid_name(self, project_name):\n def _module_exists(module_name):\n try:\n import_module(module_name)\n return True\n except ImportError:\n return False\n\n if not re.search(r'^[_a-zA-Z]\\w*$', project_name):\n print('Error: Project names must begin with a letter and contain'\n ' only\\nletters, numbers and underscores')\n elif _module_exists(project_name):\n print(f'Error: Module {project_name!r} already exists')\n else:\n return True\n return False\n\n def _copytree(self, src, dst):\n \"\"\"\n Since the original function always creates the directory, to resolve\n the issue a new function had to be created. It's a simple copy and\n was reduced for this case.\n\n More info at:\n https://github.com/scrapy/scrapy/pull/2005\n \"\"\"\n ignore = IGNORE\n names = os.listdir(src)\n ignored_names = ignore(src, names)\n\n if not os.path.exists(dst):\n os.makedirs(dst)\n\n for name in names:\n if name in ignored_names:\n continue\n\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n if os.path.isdir(srcname):\n self._copytree(srcname, dstname)\n else:\n copy2(srcname, dstname)\n _make_writable(dstname)\n\n copystat(src, dst)\n _make_writable(dst)\n\n def run(self, args, opts):\n if len(args) not in (1, 2):\n raise UsageError()\n\n project_name = args[0]\n project_dir = args[0]\n\n if len(args) == 2:\n project_dir = args[1]\n\n if exists(join(project_dir, 'scrapy.cfg')):\n self.exitcode = 1\n print(f'Error: scrapy.cfg already exists in {abspath(project_dir)}')\n return\n\n if not self._is_valid_name(project_name):\n self.exitcode = 1\n return\n\n self._copytree(self.templates_dir, abspath(project_dir))\n move(join(project_dir, 'module'), join(project_dir, project_name))\n for paths in TEMPLATES_TO_RENDER:\n path = join(*paths)\n tplfile = join(project_dir, string.Template(path).substitute(project_name=project_name))\n render_templatefile(tplfile, project_name=project_name, ProjectName=string_camelcase(project_name))\n print(f\"New Scrapy project '{project_name}', using template directory \"\n f\"'{self.templates_dir}', created in:\")\n print(f\" {abspath(project_dir)}\\n\")\n print(\"You can start your first spider with:\")\n print(f\" cd {project_dir}\")\n print(\" scrapy genspider example example.com\")\n\n @property\n def templates_dir(self):\n return join(\n self.settings['TEMPLATES_DIR'] or join(scrapy.__path__[0], 'templates'),\n 'project'\n )\n", "path": "scrapy/commands/startproject.py"}]} | 1,909 | 228 |
gh_patches_debug_25061 | rasdani/github-patches | git_diff | elastic__apm-agent-python-813 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Flower hangs from version 5.4.0.
**Describe the bug**:
Flower hangs (no answer from http connection to browser) when a version >= 5.4.0 is installed
**To Reproduce**
1. pip install elastic-apm==5.4.0
2. restart flower and try to access
**Environment (please complete the following information)**
- OS: Ubuntu 18.04
- Python version: 3.6
- Framework and version: Django 2.2
- APM Server version: NA
- Agent version: 5.4.0+
</issue>
<code>
[start of elasticapm/instrumentation/packages/tornado.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 """
31 Instrumentation for Tornado
32 """
33 import elasticapm
34 from elasticapm.conf import constants
35 from elasticapm.instrumentation.packages.asyncio.base import AbstractInstrumentedModule, AsyncAbstractInstrumentedModule
36 from elasticapm.traces import capture_span
37 from elasticapm.utils.disttracing import TraceParent
38
39
40 class TornadoRequestExecuteInstrumentation(AsyncAbstractInstrumentedModule):
41 name = "tornado_request_execute"
42 creates_transactions = True
43 instrument_list = [("tornado.web", "RequestHandler._execute")]
44
45 async def call(self, module, method, wrapped, instance, args, kwargs):
46 # Late import to avoid ImportErrors
47 from elasticapm.contrib.tornado.utils import get_data_from_request, get_data_from_response
48
49 request = instance.request
50 trace_parent = TraceParent.from_headers(request.headers)
51 client = instance.application.elasticapm_client
52 client.begin_transaction("request", trace_parent=trace_parent)
53 elasticapm.set_context(
54 lambda: get_data_from_request(instance, request, client.config, constants.TRANSACTION), "request"
55 )
56 # TODO: Can we somehow incorporate the routing rule itself here?
57 elasticapm.set_transaction_name("{} {}".format(request.method, type(instance).__name__), override=False)
58
59 ret = await wrapped(*args, **kwargs)
60
61 elasticapm.set_context(
62 lambda: get_data_from_response(instance, client.config, constants.TRANSACTION), "response"
63 )
64 result = "HTTP {}xx".format(instance.get_status() // 100)
65 elasticapm.set_transaction_result(result, override=False)
66 client.end_transaction()
67
68 return ret
69
70
71 class TornadoHandleRequestExceptionInstrumentation(AbstractInstrumentedModule):
72 name = "tornado_handle_request_exception"
73
74 instrument_list = [("tornado.web", "RequestHandler._handle_request_exception")]
75
76 def call(self, module, method, wrapped, instance, args, kwargs):
77
78 # Late import to avoid ImportErrors
79 from tornado.web import Finish, HTTPError
80 from elasticapm.contrib.tornado.utils import get_data_from_request
81
82 e = args[0]
83 if isinstance(e, Finish):
84 # Not an error; Finish is an exception that ends a request without an error response
85 return wrapped(*args, **kwargs)
86
87 client = instance.application.elasticapm_client
88 request = instance.request
89 client.capture_exception(
90 context={"request": get_data_from_request(instance, request, client.config, constants.ERROR)}
91 )
92 if isinstance(e, HTTPError):
93 elasticapm.set_transaction_result("HTTP {}xx".format(int(e.status_code / 100)), override=False)
94 elasticapm.set_context({"status_code": e.status_code}, "response")
95 else:
96 elasticapm.set_transaction_result("HTTP 5xx", override=False)
97 elasticapm.set_context({"status_code": 500}, "response")
98
99 return wrapped(*args, **kwargs)
100
101
102 class TornadoRenderInstrumentation(AbstractInstrumentedModule):
103 name = "tornado_render"
104
105 instrument_list = [("tornado.web", "RequestHandler.render")]
106
107 def call(self, module, method, wrapped, instance, args, kwargs):
108 if "template_name" in kwargs:
109 name = kwargs["template_name"]
110 else:
111 name = args[0]
112
113 with capture_span(name, span_type="template", span_subtype="tornado", span_action="render"):
114 return wrapped(*args, **kwargs)
115
[end of elasticapm/instrumentation/packages/tornado.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/instrumentation/packages/tornado.py b/elasticapm/instrumentation/packages/tornado.py
--- a/elasticapm/instrumentation/packages/tornado.py
+++ b/elasticapm/instrumentation/packages/tornado.py
@@ -43,6 +43,11 @@
instrument_list = [("tornado.web", "RequestHandler._execute")]
async def call(self, module, method, wrapped, instance, args, kwargs):
+ if not hasattr(instance.application, "elasticapm_client"):
+ # If tornado was instrumented but not as the main framework
+ # (i.e. in Flower), we should skip it.
+ return await wrapped(*args, **kwargs)
+
# Late import to avoid ImportErrors
from elasticapm.contrib.tornado.utils import get_data_from_request, get_data_from_response
@@ -74,6 +79,10 @@
instrument_list = [("tornado.web", "RequestHandler._handle_request_exception")]
def call(self, module, method, wrapped, instance, args, kwargs):
+ if not hasattr(instance.application, "elasticapm_client"):
+ # If tornado was instrumented but not as the main framework
+ # (i.e. in Flower), we should skip it.
+ return wrapped(*args, **kwargs)
# Late import to avoid ImportErrors
from tornado.web import Finish, HTTPError
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/tornado.py b/elasticapm/instrumentation/packages/tornado.py\n--- a/elasticapm/instrumentation/packages/tornado.py\n+++ b/elasticapm/instrumentation/packages/tornado.py\n@@ -43,6 +43,11 @@\n instrument_list = [(\"tornado.web\", \"RequestHandler._execute\")]\n \n async def call(self, module, method, wrapped, instance, args, kwargs):\n+ if not hasattr(instance.application, \"elasticapm_client\"):\n+ # If tornado was instrumented but not as the main framework\n+ # (i.e. in Flower), we should skip it.\n+ return await wrapped(*args, **kwargs)\n+\n # Late import to avoid ImportErrors\n from elasticapm.contrib.tornado.utils import get_data_from_request, get_data_from_response\n \n@@ -74,6 +79,10 @@\n instrument_list = [(\"tornado.web\", \"RequestHandler._handle_request_exception\")]\n \n def call(self, module, method, wrapped, instance, args, kwargs):\n+ if not hasattr(instance.application, \"elasticapm_client\"):\n+ # If tornado was instrumented but not as the main framework\n+ # (i.e. in Flower), we should skip it.\n+ return wrapped(*args, **kwargs)\n \n # Late import to avoid ImportErrors\n from tornado.web import Finish, HTTPError\n", "issue": "Flower hangs from version 5.4.0.\n**Describe the bug**: \r\nFlower hangs (no answer from http connection to browser) when a version >= 5.4.0 is installed\r\n\r\n**To Reproduce**\r\n\r\n1. pip install elastic-apm==5.4.0\r\n2. restart flower and try to access\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Ubuntu 18.04\r\n- Python version: 3.6\r\n- Framework and version: Django 2.2\r\n- APM Server version: NA\r\n- Agent version: 5.4.0+ \r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nInstrumentation for Tornado\n\"\"\"\nimport elasticapm\nfrom elasticapm.conf import constants\nfrom elasticapm.instrumentation.packages.asyncio.base import AbstractInstrumentedModule, AsyncAbstractInstrumentedModule\nfrom elasticapm.traces import capture_span\nfrom elasticapm.utils.disttracing import TraceParent\n\n\nclass TornadoRequestExecuteInstrumentation(AsyncAbstractInstrumentedModule):\n name = \"tornado_request_execute\"\n creates_transactions = True\n instrument_list = [(\"tornado.web\", \"RequestHandler._execute\")]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n # Late import to avoid ImportErrors\n from elasticapm.contrib.tornado.utils import get_data_from_request, get_data_from_response\n\n request = instance.request\n trace_parent = TraceParent.from_headers(request.headers)\n client = instance.application.elasticapm_client\n client.begin_transaction(\"request\", trace_parent=trace_parent)\n elasticapm.set_context(\n lambda: get_data_from_request(instance, request, client.config, constants.TRANSACTION), \"request\"\n )\n # TODO: Can we somehow incorporate the routing rule itself here?\n elasticapm.set_transaction_name(\"{} {}\".format(request.method, type(instance).__name__), override=False)\n\n ret = await wrapped(*args, **kwargs)\n\n elasticapm.set_context(\n lambda: get_data_from_response(instance, client.config, constants.TRANSACTION), \"response\"\n )\n result = \"HTTP {}xx\".format(instance.get_status() // 100)\n elasticapm.set_transaction_result(result, override=False)\n client.end_transaction()\n\n return ret\n\n\nclass TornadoHandleRequestExceptionInstrumentation(AbstractInstrumentedModule):\n name = \"tornado_handle_request_exception\"\n\n instrument_list = [(\"tornado.web\", \"RequestHandler._handle_request_exception\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n\n # Late import to avoid ImportErrors\n from tornado.web import Finish, HTTPError\n from elasticapm.contrib.tornado.utils import get_data_from_request\n\n e = args[0]\n if isinstance(e, Finish):\n # Not an error; Finish is an exception that ends a request without an error response\n return wrapped(*args, **kwargs)\n\n client = instance.application.elasticapm_client\n request = instance.request\n client.capture_exception(\n context={\"request\": get_data_from_request(instance, request, client.config, constants.ERROR)}\n )\n if isinstance(e, HTTPError):\n elasticapm.set_transaction_result(\"HTTP {}xx\".format(int(e.status_code / 100)), override=False)\n elasticapm.set_context({\"status_code\": e.status_code}, \"response\")\n else:\n elasticapm.set_transaction_result(\"HTTP 5xx\", override=False)\n elasticapm.set_context({\"status_code\": 500}, \"response\")\n\n return wrapped(*args, **kwargs)\n\n\nclass TornadoRenderInstrumentation(AbstractInstrumentedModule):\n name = \"tornado_render\"\n\n instrument_list = [(\"tornado.web\", \"RequestHandler.render\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if \"template_name\" in kwargs:\n name = kwargs[\"template_name\"]\n else:\n name = args[0]\n\n with capture_span(name, span_type=\"template\", span_subtype=\"tornado\", span_action=\"render\"):\n return wrapped(*args, **kwargs)\n", "path": "elasticapm/instrumentation/packages/tornado.py"}]} | 1,999 | 311 |
gh_patches_debug_37011 | rasdani/github-patches | git_diff | huggingface__dataset-viewer-1084 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No specific error when dataset tries to import a non-installed module
When a dataset script tries to import a module/library that is not installed, there is no informative error message.
See:
- #1067
- #1068
Related to:
- #976
</issue>
<code>
[start of services/worker/src/worker/job_runners/config_names.py]
1 # SPDX-License-Identifier: Apache-2.0
2 # Copyright 2022 The HuggingFace Authors.
3
4 import logging
5 from http import HTTPStatus
6 from typing import Any, List, Literal, Mapping, Optional, TypedDict, Union
7
8 from datasets import get_dataset_config_names
9 from datasets.data_files import EmptyDatasetError as _EmptyDatasetError
10 from libcommon.constants import PROCESSING_STEP_CONFIG_NAMES_VERSION
11 from libcommon.simple_cache import SplitFullName
12
13 from worker.job_runner import CompleteJobResult, JobRunnerError, ParameterMissingError
14 from worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner
15
16 ConfigNamesJobRunnerErrorCode = Literal["EmptyDatasetError", "ConfigNamesError"]
17
18
19 class ConfigNamesJobRunnerError(JobRunnerError):
20 """Base class for job runner exceptions."""
21
22 def __init__(
23 self,
24 message: str,
25 status_code: HTTPStatus,
26 code: ConfigNamesJobRunnerErrorCode,
27 cause: Optional[BaseException] = None,
28 disclose_cause: bool = False,
29 ):
30 super().__init__(
31 message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
32 )
33
34
35 class EmptyDatasetError(ConfigNamesJobRunnerError):
36 """Raised when the dataset has no data."""
37
38 def __init__(self, message: str, cause: Optional[BaseException] = None):
39 super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "EmptyDatasetError", cause, True)
40
41
42 class ConfigNamesError(ConfigNamesJobRunnerError):
43 """Raised when the config names could not be fetched."""
44
45 def __init__(self, message: str, cause: Optional[BaseException] = None):
46 super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ConfigNamesError", cause, True)
47
48
49 class ConfigNameItem(TypedDict):
50 dataset: str
51 config: str
52
53
54 class ConfigNamesResponse(TypedDict):
55 config_names: List[ConfigNameItem]
56
57
58 def compute_config_names_response(
59 dataset: str,
60 hf_token: Optional[str] = None,
61 ) -> ConfigNamesResponse:
62 """
63 Get the response of /config-names for one specific dataset on huggingface.co.
64 Dataset can be private or gated if you pass an acceptable token.
65
66 It is assumed that the dataset exists and can be accessed using the token.
67
68 Args:
69 dataset (`str`):
70 A namespace (user or an organization) and a repo name separated
71 by a `/`.
72 hf_token (`str`, *optional*):
73 An authentication token (See https://huggingface.co/settings/token)
74 Returns:
75 `ConfigNamesResponse`: An object with the list of config names.
76 <Tip>
77 Raises the following errors:
78 - [`~job_runners.config_names.EmptyDatasetError`]
79 The dataset is empty.
80 - [`~job_runners.config_names.ConfigNamesError`]
81 If the list of configs could not be obtained using the datasets library.
82 </Tip>
83 """
84 logging.info(f"get config names for dataset={dataset}")
85 use_auth_token: Union[bool, str, None] = hf_token if hf_token is not None else False
86 # get the list of splits in streaming mode
87 try:
88 config_name_items: List[ConfigNameItem] = [
89 {"dataset": dataset, "config": str(config)}
90 for config in sorted(get_dataset_config_names(path=dataset, use_auth_token=use_auth_token))
91 ]
92 except _EmptyDatasetError as err:
93 raise EmptyDatasetError("The dataset is empty.", cause=err) from err
94 except Exception as err:
95 raise ConfigNamesError("Cannot get the config names for the dataset.", cause=err) from err
96 return ConfigNamesResponse(config_names=config_name_items)
97
98
99 class ConfigNamesJobRunner(DatasetsBasedJobRunner):
100 @staticmethod
101 def get_job_type() -> str:
102 return "/config-names"
103
104 @staticmethod
105 def get_job_runner_version() -> int:
106 return PROCESSING_STEP_CONFIG_NAMES_VERSION
107
108 def compute(self) -> CompleteJobResult:
109 if self.dataset is None:
110 raise ParameterMissingError("'dataset' parameter is required")
111 return CompleteJobResult(
112 compute_config_names_response(dataset=self.dataset, hf_token=self.common_config.hf_token)
113 )
114
115 def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
116 """Get the set of new splits, from the content created by the compute."""
117 return {SplitFullName(dataset=s["dataset"], config=s["config"], split=None) for s in content["config_names"]}
118
[end of services/worker/src/worker/job_runners/config_names.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/services/worker/src/worker/job_runners/config_names.py b/services/worker/src/worker/job_runners/config_names.py
--- a/services/worker/src/worker/job_runners/config_names.py
+++ b/services/worker/src/worker/job_runners/config_names.py
@@ -13,7 +13,7 @@
from worker.job_runner import CompleteJobResult, JobRunnerError, ParameterMissingError
from worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner
-ConfigNamesJobRunnerErrorCode = Literal["EmptyDatasetError", "ConfigNamesError"]
+ConfigNamesJobRunnerErrorCode = Literal["EmptyDatasetError", "DatasetModuleNotInstalledError", "ConfigNamesError"]
class ConfigNamesJobRunnerError(JobRunnerError):
@@ -39,6 +39,13 @@
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "EmptyDatasetError", cause, True)
+class DatasetModuleNotInstalledError(ConfigNamesJobRunnerError):
+ """Raised when the dataset tries to import a module that is not installed."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "DatasetModuleNotInstalledError", cause, True)
+
+
class ConfigNamesError(ConfigNamesJobRunnerError):
"""Raised when the config names could not be fetched."""
@@ -77,6 +84,8 @@
Raises the following errors:
- [`~job_runners.config_names.EmptyDatasetError`]
The dataset is empty.
+ - [`~job_runners.config_names.DatasetModuleNotInstalledError`]
+ The dataset tries to import a module that is not installed.
- [`~job_runners.config_names.ConfigNamesError`]
If the list of configs could not be obtained using the datasets library.
</Tip>
@@ -91,6 +100,10 @@
]
except _EmptyDatasetError as err:
raise EmptyDatasetError("The dataset is empty.", cause=err) from err
+ except ImportError as err:
+ raise DatasetModuleNotInstalledError(
+ "The dataset tries to import a module that is not installed.", cause=err
+ ) from err
except Exception as err:
raise ConfigNamesError("Cannot get the config names for the dataset.", cause=err) from err
return ConfigNamesResponse(config_names=config_name_items)
| {"golden_diff": "diff --git a/services/worker/src/worker/job_runners/config_names.py b/services/worker/src/worker/job_runners/config_names.py\n--- a/services/worker/src/worker/job_runners/config_names.py\n+++ b/services/worker/src/worker/job_runners/config_names.py\n@@ -13,7 +13,7 @@\n from worker.job_runner import CompleteJobResult, JobRunnerError, ParameterMissingError\n from worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner\n \n-ConfigNamesJobRunnerErrorCode = Literal[\"EmptyDatasetError\", \"ConfigNamesError\"]\n+ConfigNamesJobRunnerErrorCode = Literal[\"EmptyDatasetError\", \"DatasetModuleNotInstalledError\", \"ConfigNamesError\"]\n \n \n class ConfigNamesJobRunnerError(JobRunnerError):\n@@ -39,6 +39,13 @@\n super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"EmptyDatasetError\", cause, True)\n \n \n+class DatasetModuleNotInstalledError(ConfigNamesJobRunnerError):\n+ \"\"\"Raised when the dataset tries to import a module that is not installed.\"\"\"\n+\n+ def __init__(self, message: str, cause: Optional[BaseException] = None):\n+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"DatasetModuleNotInstalledError\", cause, True)\n+\n+\n class ConfigNamesError(ConfigNamesJobRunnerError):\n \"\"\"Raised when the config names could not be fetched.\"\"\"\n \n@@ -77,6 +84,8 @@\n Raises the following errors:\n - [`~job_runners.config_names.EmptyDatasetError`]\n The dataset is empty.\n+ - [`~job_runners.config_names.DatasetModuleNotInstalledError`]\n+ The dataset tries to import a module that is not installed.\n - [`~job_runners.config_names.ConfigNamesError`]\n If the list of configs could not be obtained using the datasets library.\n </Tip>\n@@ -91,6 +100,10 @@\n ]\n except _EmptyDatasetError as err:\n raise EmptyDatasetError(\"The dataset is empty.\", cause=err) from err\n+ except ImportError as err:\n+ raise DatasetModuleNotInstalledError(\n+ \"The dataset tries to import a module that is not installed.\", cause=err\n+ ) from err\n except Exception as err:\n raise ConfigNamesError(\"Cannot get the config names for the dataset.\", cause=err) from err\n return ConfigNamesResponse(config_names=config_name_items)\n", "issue": "No specific error when dataset tries to import a non-installed module\nWhen a dataset script tries to import a module/library that is not installed, there is no informative error message.\r\n\r\nSee:\r\n- #1067 \r\n- #1068\r\n\r\nRelated to:\r\n- #976\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nimport logging\nfrom http import HTTPStatus\nfrom typing import Any, List, Literal, Mapping, Optional, TypedDict, Union\n\nfrom datasets import get_dataset_config_names\nfrom datasets.data_files import EmptyDatasetError as _EmptyDatasetError\nfrom libcommon.constants import PROCESSING_STEP_CONFIG_NAMES_VERSION\nfrom libcommon.simple_cache import SplitFullName\n\nfrom worker.job_runner import CompleteJobResult, JobRunnerError, ParameterMissingError\nfrom worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner\n\nConfigNamesJobRunnerErrorCode = Literal[\"EmptyDatasetError\", \"ConfigNamesError\"]\n\n\nclass ConfigNamesJobRunnerError(JobRunnerError):\n \"\"\"Base class for job runner exceptions.\"\"\"\n\n def __init__(\n self,\n message: str,\n status_code: HTTPStatus,\n code: ConfigNamesJobRunnerErrorCode,\n cause: Optional[BaseException] = None,\n disclose_cause: bool = False,\n ):\n super().__init__(\n message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause\n )\n\n\nclass EmptyDatasetError(ConfigNamesJobRunnerError):\n \"\"\"Raised when the dataset has no data.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"EmptyDatasetError\", cause, True)\n\n\nclass ConfigNamesError(ConfigNamesJobRunnerError):\n \"\"\"Raised when the config names could not be fetched.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"ConfigNamesError\", cause, True)\n\n\nclass ConfigNameItem(TypedDict):\n dataset: str\n config: str\n\n\nclass ConfigNamesResponse(TypedDict):\n config_names: List[ConfigNameItem]\n\n\ndef compute_config_names_response(\n dataset: str,\n hf_token: Optional[str] = None,\n) -> ConfigNamesResponse:\n \"\"\"\n Get the response of /config-names for one specific dataset on huggingface.co.\n Dataset can be private or gated if you pass an acceptable token.\n\n It is assumed that the dataset exists and can be accessed using the token.\n\n Args:\n dataset (`str`):\n A namespace (user or an organization) and a repo name separated\n by a `/`.\n hf_token (`str`, *optional*):\n An authentication token (See https://huggingface.co/settings/token)\n Returns:\n `ConfigNamesResponse`: An object with the list of config names.\n <Tip>\n Raises the following errors:\n - [`~job_runners.config_names.EmptyDatasetError`]\n The dataset is empty.\n - [`~job_runners.config_names.ConfigNamesError`]\n If the list of configs could not be obtained using the datasets library.\n </Tip>\n \"\"\"\n logging.info(f\"get config names for dataset={dataset}\")\n use_auth_token: Union[bool, str, None] = hf_token if hf_token is not None else False\n # get the list of splits in streaming mode\n try:\n config_name_items: List[ConfigNameItem] = [\n {\"dataset\": dataset, \"config\": str(config)}\n for config in sorted(get_dataset_config_names(path=dataset, use_auth_token=use_auth_token))\n ]\n except _EmptyDatasetError as err:\n raise EmptyDatasetError(\"The dataset is empty.\", cause=err) from err\n except Exception as err:\n raise ConfigNamesError(\"Cannot get the config names for the dataset.\", cause=err) from err\n return ConfigNamesResponse(config_names=config_name_items)\n\n\nclass ConfigNamesJobRunner(DatasetsBasedJobRunner):\n @staticmethod\n def get_job_type() -> str:\n return \"/config-names\"\n\n @staticmethod\n def get_job_runner_version() -> int:\n return PROCESSING_STEP_CONFIG_NAMES_VERSION\n\n def compute(self) -> CompleteJobResult:\n if self.dataset is None:\n raise ParameterMissingError(\"'dataset' parameter is required\")\n return CompleteJobResult(\n compute_config_names_response(dataset=self.dataset, hf_token=self.common_config.hf_token)\n )\n\n def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:\n \"\"\"Get the set of new splits, from the content created by the compute.\"\"\"\n return {SplitFullName(dataset=s[\"dataset\"], config=s[\"config\"], split=None) for s in content[\"config_names\"]}\n", "path": "services/worker/src/worker/job_runners/config_names.py"}]} | 1,846 | 525 |
gh_patches_debug_19065 | rasdani/github-patches | git_diff | Azure__azure-cli-extensions-2850 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Is it possible to query Log Analytics via the az cli with a saved query?
I can’t tell from the documentation, is it possible to run a saved Log Analytics Query from this CLI command?
If not, a useful enhancement would be to enable the use a saved query in addition to the ability to execute queries in-line. The queries get long and cumbersome to maintain outside of Log Analytics.
If it is, however, possible to run a saved query, would you mind updating the documentation here? Thanks.
---
#### Document Details
⚠ *Do not edit this section. It is required for docs.microsoft.com ➟ GitHub issue linking.*
* ID: f0fd6a58-ac1a-fa45-8d96-579b4af36499
* Version Independent ID: 4098ca97-1b85-eb29-18e9-e6f0495fd030
* Content: [az monitor log-analytics](https://docs.microsoft.com/en-us/cli/azure/ext/log-analytics/monitor/log-analytics?view=azure-cli-latest)
* Content Source: [latest/docs-ref-autogen/ext/log-analytics/monitor/log-analytics.yml](https://github.com/MicrosoftDocs/azure-docs-cli/blob/master/latest/docs-ref-autogen/ext/log-analytics/monitor/log-analytics.yml)
* GitHub Login: @rloutlaw
* Microsoft Alias: **routlaw**
</issue>
<code>
[start of src/log-analytics/setup.py]
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8 from codecs import open
9 from setuptools import setup, find_packages
10
11 VERSION = "0.2.1"
12
13 CLASSIFIERS = [
14 'Development Status :: 4 - Beta',
15 'Intended Audience :: Developers',
16 'Intended Audience :: System Administrators',
17 'Programming Language :: Python',
18 'Programming Language :: Python :: 2',
19 'Programming Language :: Python :: 2.7',
20 'Programming Language :: Python :: 3',
21 'Programming Language :: Python :: 3.4',
22 'Programming Language :: Python :: 3.5',
23 'Programming Language :: Python :: 3.6',
24 'License :: OSI Approved :: MIT License',
25 ]
26
27 DEPENDENCIES = []
28
29 with open('README.rst', 'r', encoding='utf-8') as f:
30 README = f.read()
31 with open('HISTORY.rst', 'r', encoding='utf-8') as f:
32 HISTORY = f.read()
33
34 setup(
35 name='log-analytics',
36 version=VERSION,
37 description='Support for Azure Log Analytics query capabilities.',
38 long_description=README + '\n\n' + HISTORY,
39 license='MIT',
40 author='Ace Eldeib',
41 author_email='[email protected]',
42 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/log-analytics',
43 classifiers=CLASSIFIERS,
44 packages=find_packages(exclude=["tests"]),
45 package_data={'azext_loganalytics': ['azext_metadata.json']},
46 install_requires=DEPENDENCIES
47 )
48
[end of src/log-analytics/setup.py]
[start of src/log-analytics/azext_loganalytics/_help.py]
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 from knack.help_files import helps
7
8 # pylint: disable=line-too-long
9
10 helps['monitor log-analytics'] = """
11 type: group
12 short-summary: Commands for querying data in Log Analytics workspaces.
13 """
14
15 helps['monitor log-analytics query'] = """
16 type: command
17 short-summary: Query a Log Analytics workspace.
18 examples:
19 - name: Execute a simple query over past 3.5 days.
20 text: |
21 az monitor log-analytics query -w b8317023-66e4-4edc-8a5b-7c002b22f92f --analytics-query "AzureActivity | summarize count() by bin(timestamp, 1h)" -t P3DT12H
22 """
23
[end of src/log-analytics/azext_loganalytics/_help.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/log-analytics/azext_loganalytics/_help.py b/src/log-analytics/azext_loganalytics/_help.py
--- a/src/log-analytics/azext_loganalytics/_help.py
+++ b/src/log-analytics/azext_loganalytics/_help.py
@@ -18,5 +18,9 @@
examples:
- name: Execute a simple query over past 3.5 days.
text: |
- az monitor log-analytics query -w b8317023-66e4-4edc-8a5b-7c002b22f92f --analytics-query "AzureActivity | summarize count() by bin(timestamp, 1h)" -t P3DT12H
+ az monitor log-analytics query -w workspace-customId --analytics-query "AzureActivity | summarize count() by bin(timestamp, 1h)" -t P3DT12H
+ - name: Execute a saved query in workspace
+ text: |
+ QUERY=$(az monitor log-analytics workspace saved-search show -g resource-group --workspace-name workspace-name -n query-name --query query --output tsv)
+ az monitor log-analytics query -w workspace-customId --analytics-query "$QUERY"
"""
diff --git a/src/log-analytics/setup.py b/src/log-analytics/setup.py
--- a/src/log-analytics/setup.py
+++ b/src/log-analytics/setup.py
@@ -8,7 +8,7 @@
from codecs import open
from setuptools import setup, find_packages
-VERSION = "0.2.1"
+VERSION = "0.2.2"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
| {"golden_diff": "diff --git a/src/log-analytics/azext_loganalytics/_help.py b/src/log-analytics/azext_loganalytics/_help.py\n--- a/src/log-analytics/azext_loganalytics/_help.py\n+++ b/src/log-analytics/azext_loganalytics/_help.py\n@@ -18,5 +18,9 @@\n examples:\n - name: Execute a simple query over past 3.5 days.\n text: |\n- az monitor log-analytics query -w b8317023-66e4-4edc-8a5b-7c002b22f92f --analytics-query \"AzureActivity | summarize count() by bin(timestamp, 1h)\" -t P3DT12H\n+ az monitor log-analytics query -w workspace-customId --analytics-query \"AzureActivity | summarize count() by bin(timestamp, 1h)\" -t P3DT12H\n+ - name: Execute a saved query in workspace\n+ text: |\n+ QUERY=$(az monitor log-analytics workspace saved-search show -g resource-group --workspace-name workspace-name -n query-name --query query --output tsv)\n+ az monitor log-analytics query -w workspace-customId --analytics-query \"$QUERY\"\n \"\"\"\ndiff --git a/src/log-analytics/setup.py b/src/log-analytics/setup.py\n--- a/src/log-analytics/setup.py\n+++ b/src/log-analytics/setup.py\n@@ -8,7 +8,7 @@\n from codecs import open\n from setuptools import setup, find_packages\n \n-VERSION = \"0.2.1\"\n+VERSION = \"0.2.2\"\n \n CLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n", "issue": "Is it possible to query Log Analytics via the az cli with a saved query?\n\r\nI can\u2019t tell from the documentation, is it possible to run a saved Log Analytics Query from this CLI command? \r\n\r\nIf not, a useful enhancement would be to enable the use a saved query in addition to the ability to execute queries in-line. The queries get long and cumbersome to maintain outside of Log Analytics.\r\n\r\nIf it is, however, possible to run a saved query, would you mind updating the documentation here? Thanks.\r\n\r\n\r\n---\r\n#### Document Details\r\n\r\n\u26a0 *Do not edit this section. It is required for docs.microsoft.com \u279f GitHub issue linking.*\r\n\r\n* ID: f0fd6a58-ac1a-fa45-8d96-579b4af36499\r\n* Version Independent ID: 4098ca97-1b85-eb29-18e9-e6f0495fd030\r\n* Content: [az monitor log-analytics](https://docs.microsoft.com/en-us/cli/azure/ext/log-analytics/monitor/log-analytics?view=azure-cli-latest)\r\n* Content Source: [latest/docs-ref-autogen/ext/log-analytics/monitor/log-analytics.yml](https://github.com/MicrosoftDocs/azure-docs-cli/blob/master/latest/docs-ref-autogen/ext/log-analytics/monitor/log-analytics.yml)\r\n* GitHub Login: @rloutlaw\r\n* Microsoft Alias: **routlaw**\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.2.1\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = []\n\nwith open('README.rst', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='log-analytics',\n version=VERSION,\n description='Support for Azure Log Analytics query capabilities.',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n author='Ace Eldeib',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/log-analytics',\n classifiers=CLASSIFIERS,\n packages=find_packages(exclude=[\"tests\"]),\n package_data={'azext_loganalytics': ['azext_metadata.json']},\n install_requires=DEPENDENCIES\n)\n", "path": "src/log-analytics/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom knack.help_files import helps\n\n# pylint: disable=line-too-long\n\nhelps['monitor log-analytics'] = \"\"\"\n type: group\n short-summary: Commands for querying data in Log Analytics workspaces.\n\"\"\"\n\nhelps['monitor log-analytics query'] = \"\"\"\n type: command\n short-summary: Query a Log Analytics workspace.\n examples:\n - name: Execute a simple query over past 3.5 days.\n text: |\n az monitor log-analytics query -w b8317023-66e4-4edc-8a5b-7c002b22f92f --analytics-query \"AzureActivity | summarize count() by bin(timestamp, 1h)\" -t P3DT12H\n\"\"\"\n", "path": "src/log-analytics/azext_loganalytics/_help.py"}]} | 1,568 | 362 |
gh_patches_debug_60894 | rasdani/github-patches | git_diff | tiangolo__fastapi-493 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FastAPI exceptions module mistakenly references the 'requests' package
**Describe the bug**
Starting up a FastAPI 0.38.0 app displays the following error:
```python
from fastapi import FastAPI
File ".../lib/site-packages/fastapi/__init__.py", line 7, in <module>
from .applications import FastAPI
File ".../lib/site-packages/fastapi/applications.py", line 3, in <module>
from fastapi import routing
File ".../lib/site-packages/fastapi/routing.py", line 7, in <module>
from fastapi.dependencies.models import Dependant
File ".../lib/site-packages/fastapi/dependencies/models.py", line 3, in <module>
from fastapi.security.base import SecurityBase
File ".../lib/site-packages/fastapi/security/__init__.py", line 2, in <module>
from .http import (
File ".../lib/site-packages/fastapi/security/http.py", line 5, in <module>
from fastapi.exceptions import HTTPException
File ".../lib/site-packages/fastapi/exceptions.py", line 5, in <module>
from requests import Request
ModuleNotFoundError: No module named 'requests'
```
**Expected behavior**
The app should start without import errors.
**Environment:**
- OS: Linux, Windows, and macOS
- FastAPI Version: 0.38.0
**Additional context**
It's likely the `from requests import Request` should be replaced with `from starlette.requests import Request` in line 5 of `fastapi/exceptions.py`
FastAPI exceptions module mistakenly references the 'requests' package
**Describe the bug**
Starting up a FastAPI 0.38.0 app displays the following error:
```python
from fastapi import FastAPI
File ".../lib/site-packages/fastapi/__init__.py", line 7, in <module>
from .applications import FastAPI
File ".../lib/site-packages/fastapi/applications.py", line 3, in <module>
from fastapi import routing
File ".../lib/site-packages/fastapi/routing.py", line 7, in <module>
from fastapi.dependencies.models import Dependant
File ".../lib/site-packages/fastapi/dependencies/models.py", line 3, in <module>
from fastapi.security.base import SecurityBase
File ".../lib/site-packages/fastapi/security/__init__.py", line 2, in <module>
from .http import (
File ".../lib/site-packages/fastapi/security/http.py", line 5, in <module>
from fastapi.exceptions import HTTPException
File ".../lib/site-packages/fastapi/exceptions.py", line 5, in <module>
from requests import Request
ModuleNotFoundError: No module named 'requests'
```
**Expected behavior**
The app should start without import errors.
**Environment:**
- OS: Linux, Windows, and macOS
- FastAPI Version: 0.38.0
**Additional context**
It's likely the `from requests import Request` should be replaced with `from starlette.requests import Request` in line 5 of `fastapi/exceptions.py`
</issue>
<code>
[start of fastapi/exceptions.py]
1 from typing import Any, Sequence
2
3 from pydantic import ValidationError
4 from pydantic.error_wrappers import ErrorList
5 from requests import Request
6 from starlette.exceptions import HTTPException as StarletteHTTPException
7 from starlette.websockets import WebSocket
8
9
10 class HTTPException(StarletteHTTPException):
11 def __init__(
12 self, status_code: int, detail: Any = None, headers: dict = None
13 ) -> None:
14 super().__init__(status_code=status_code, detail=detail)
15 self.headers = headers
16
17
18 class RequestValidationError(ValidationError):
19 def __init__(self, errors: Sequence[ErrorList]) -> None:
20 super().__init__(errors, Request)
21
22
23 class WebSocketRequestValidationError(ValidationError):
24 def __init__(self, errors: Sequence[ErrorList]) -> None:
25 super().__init__(errors, WebSocket)
26
[end of fastapi/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/fastapi/exceptions.py b/fastapi/exceptions.py
--- a/fastapi/exceptions.py
+++ b/fastapi/exceptions.py
@@ -2,8 +2,8 @@
from pydantic import ValidationError
from pydantic.error_wrappers import ErrorList
-from requests import Request
from starlette.exceptions import HTTPException as StarletteHTTPException
+from starlette.requests import Request
from starlette.websockets import WebSocket
| {"golden_diff": "diff --git a/fastapi/exceptions.py b/fastapi/exceptions.py\n--- a/fastapi/exceptions.py\n+++ b/fastapi/exceptions.py\n@@ -2,8 +2,8 @@\n \n from pydantic import ValidationError\n from pydantic.error_wrappers import ErrorList\n-from requests import Request\n from starlette.exceptions import HTTPException as StarletteHTTPException\n+from starlette.requests import Request\n from starlette.websockets import WebSocket\n", "issue": "FastAPI exceptions module mistakenly references the 'requests' package\n**Describe the bug**\r\nStarting up a FastAPI 0.38.0 app displays the following error:\r\n\r\n```python\r\nfrom fastapi import FastAPI\r\n File \".../lib/site-packages/fastapi/__init__.py\", line 7, in <module>\r\n from .applications import FastAPI\r\n File \".../lib/site-packages/fastapi/applications.py\", line 3, in <module>\r\n from fastapi import routing\r\n File \".../lib/site-packages/fastapi/routing.py\", line 7, in <module>\r\n from fastapi.dependencies.models import Dependant\r\n File \".../lib/site-packages/fastapi/dependencies/models.py\", line 3, in <module>\r\n from fastapi.security.base import SecurityBase\r\n File \".../lib/site-packages/fastapi/security/__init__.py\", line 2, in <module>\r\n from .http import (\r\n File \".../lib/site-packages/fastapi/security/http.py\", line 5, in <module>\r\n from fastapi.exceptions import HTTPException\r\n File \".../lib/site-packages/fastapi/exceptions.py\", line 5, in <module>\r\n from requests import Request\r\nModuleNotFoundError: No module named 'requests'\r\n```\r\n\r\n**Expected behavior**\r\nThe app should start without import errors.\r\n\r\n**Environment:**\r\n - OS: Linux, Windows, and macOS\r\n - FastAPI Version: 0.38.0\r\n\r\n**Additional context**\r\nIt's likely the `from requests import Request` should be replaced with `from starlette.requests import Request` in line 5 of `fastapi/exceptions.py`\nFastAPI exceptions module mistakenly references the 'requests' package\n**Describe the bug**\r\nStarting up a FastAPI 0.38.0 app displays the following error:\r\n\r\n```python\r\nfrom fastapi import FastAPI\r\n File \".../lib/site-packages/fastapi/__init__.py\", line 7, in <module>\r\n from .applications import FastAPI\r\n File \".../lib/site-packages/fastapi/applications.py\", line 3, in <module>\r\n from fastapi import routing\r\n File \".../lib/site-packages/fastapi/routing.py\", line 7, in <module>\r\n from fastapi.dependencies.models import Dependant\r\n File \".../lib/site-packages/fastapi/dependencies/models.py\", line 3, in <module>\r\n from fastapi.security.base import SecurityBase\r\n File \".../lib/site-packages/fastapi/security/__init__.py\", line 2, in <module>\r\n from .http import (\r\n File \".../lib/site-packages/fastapi/security/http.py\", line 5, in <module>\r\n from fastapi.exceptions import HTTPException\r\n File \".../lib/site-packages/fastapi/exceptions.py\", line 5, in <module>\r\n from requests import Request\r\nModuleNotFoundError: No module named 'requests'\r\n```\r\n\r\n**Expected behavior**\r\nThe app should start without import errors.\r\n\r\n**Environment:**\r\n - OS: Linux, Windows, and macOS\r\n - FastAPI Version: 0.38.0\r\n\r\n**Additional context**\r\nIt's likely the `from requests import Request` should be replaced with `from starlette.requests import Request` in line 5 of `fastapi/exceptions.py`\n", "before_files": [{"content": "from typing import Any, Sequence\n\nfrom pydantic import ValidationError\nfrom pydantic.error_wrappers import ErrorList\nfrom requests import Request\nfrom starlette.exceptions import HTTPException as StarletteHTTPException\nfrom starlette.websockets import WebSocket\n\n\nclass HTTPException(StarletteHTTPException):\n def __init__(\n self, status_code: int, detail: Any = None, headers: dict = None\n ) -> None:\n super().__init__(status_code=status_code, detail=detail)\n self.headers = headers\n\n\nclass RequestValidationError(ValidationError):\n def __init__(self, errors: Sequence[ErrorList]) -> None:\n super().__init__(errors, Request)\n\n\nclass WebSocketRequestValidationError(ValidationError):\n def __init__(self, errors: Sequence[ErrorList]) -> None:\n super().__init__(errors, WebSocket)\n", "path": "fastapi/exceptions.py"}]} | 1,441 | 96 |
gh_patches_debug_965 | rasdani/github-patches | git_diff | tiangolo__fastapi-9468 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FastAPI tests in pydantic failing due to flask deprecation
### Privileged issue
- [X] I'm @tiangolo or he asked me directly to create an issue here.
### Issue Content
hope you don't mind me creating an issue, pydantic's 1.10.X tests are failing due to a new issue with running our fastapi tests, see
https://github.com/pydantic/pydantic/actions/runs/4832692304/jobs/8611783607?pr=5628
output from pydantic's tests:
```
==================================== ERRORS ====================================
______ ERROR collecting tests/test_tutorial/test_wsgi/test_tutorial001.py ______
tests/test_tutorial/test_wsgi/test_tutorial001.py:3: in <module>
from docs_src.wsgi.tutorial001 import app
docs_src/wsgi/tutorial001.py:3: in <module>
from flask import Flask, escape, request
<frozen importlib._bootstrap>:1075: in _handle_fromlist
???
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/flask/__init__.py:71: in __getattr__
warnings.warn(
E DeprecationWarning: 'flask.escape' is deprecated and will be removed in Flask 2.4. Import 'markupsafe.escape' instead.
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_wsgi/test_tutorial001.py - DeprecationWarning: 'flask.escape' is deprecated and will be removed in Flask 2.4. Import 'markupsafe.escape'
```
related to https://github.com/pydantic/pydantic/pull/5628
</issue>
<code>
[start of docs_src/wsgi/tutorial001.py]
1 from fastapi import FastAPI
2 from fastapi.middleware.wsgi import WSGIMiddleware
3 from flask import Flask, escape, request
4
5 flask_app = Flask(__name__)
6
7
8 @flask_app.route("/")
9 def flask_main():
10 name = request.args.get("name", "World")
11 return f"Hello, {escape(name)} from Flask!"
12
13
14 app = FastAPI()
15
16
17 @app.get("/v2")
18 def read_main():
19 return {"message": "Hello World"}
20
21
22 app.mount("/v1", WSGIMiddleware(flask_app))
23
[end of docs_src/wsgi/tutorial001.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs_src/wsgi/tutorial001.py b/docs_src/wsgi/tutorial001.py
--- a/docs_src/wsgi/tutorial001.py
+++ b/docs_src/wsgi/tutorial001.py
@@ -1,6 +1,7 @@
from fastapi import FastAPI
from fastapi.middleware.wsgi import WSGIMiddleware
-from flask import Flask, escape, request
+from flask import Flask, request
+from markupsafe import escape
flask_app = Flask(__name__)
| {"golden_diff": "diff --git a/docs_src/wsgi/tutorial001.py b/docs_src/wsgi/tutorial001.py\n--- a/docs_src/wsgi/tutorial001.py\n+++ b/docs_src/wsgi/tutorial001.py\n@@ -1,6 +1,7 @@\n from fastapi import FastAPI\n from fastapi.middleware.wsgi import WSGIMiddleware\n-from flask import Flask, escape, request\n+from flask import Flask, request\n+from markupsafe import escape\n \n flask_app = Flask(__name__)\n", "issue": "FastAPI tests in pydantic failing due to flask deprecation\n### Privileged issue\n\n- [X] I'm @tiangolo or he asked me directly to create an issue here.\n\n### Issue Content\n\nhope you don't mind me creating an issue, pydantic's 1.10.X tests are failing due to a new issue with running our fastapi tests, see\r\n\r\nhttps://github.com/pydantic/pydantic/actions/runs/4832692304/jobs/8611783607?pr=5628\r\n\r\noutput from pydantic's tests:\r\n\r\n```\r\n==================================== ERRORS ====================================\r\n______ ERROR collecting tests/test_tutorial/test_wsgi/test_tutorial001.py ______\r\ntests/test_tutorial/test_wsgi/test_tutorial001.py:3: in <module>\r\n from docs_src.wsgi.tutorial001 import app\r\ndocs_src/wsgi/tutorial001.py:3: in <module>\r\n from flask import Flask, escape, request\r\n<frozen importlib._bootstrap>:1075: in _handle_fromlist\r\n ???\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/flask/__init__.py:71: in __getattr__\r\n warnings.warn(\r\nE DeprecationWarning: 'flask.escape' is deprecated and will be removed in Flask 2.4. Import 'markupsafe.escape' instead.\r\n=========================== short test summary info ============================\r\nERROR tests/test_tutorial/test_wsgi/test_tutorial001.py - DeprecationWarning: 'flask.escape' is deprecated and will be removed in Flask 2.4. Import 'markupsafe.escape' \r\n```\r\n\r\nrelated to https://github.com/pydantic/pydantic/pull/5628\n", "before_files": [{"content": "from fastapi import FastAPI\nfrom fastapi.middleware.wsgi import WSGIMiddleware\nfrom flask import Flask, escape, request\n\nflask_app = Flask(__name__)\n\n\n@flask_app.route(\"/\")\ndef flask_main():\n name = request.args.get(\"name\", \"World\")\n return f\"Hello, {escape(name)} from Flask!\"\n\n\napp = FastAPI()\n\n\[email protected](\"/v2\")\ndef read_main():\n return {\"message\": \"Hello World\"}\n\n\napp.mount(\"/v1\", WSGIMiddleware(flask_app))\n", "path": "docs_src/wsgi/tutorial001.py"}]} | 1,100 | 110 |
gh_patches_debug_4721 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-3240 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: src/system/os.py does not correctly return architecture for bsd platform
### Describe the bug
Run `uname -m` will return follow in the freebsd:
```
amd64
```
The code here does not support `amd64` as input:
https://github.com/opensearch-project/opensearch-build/blob/main/src/system/os.py#L12-L19
```
def current_architecture() -> str:
architecture = subprocess.check_output(["uname", "-m"]).decode().strip()
if architecture == "x86_64":
return "x64"
elif architecture == "aarch64" or architecture == "arm64":
return "arm64"
else:
raise ValueError(f"Unsupported architecture: {architecture}")
```
Thanks.
### To reproduce
Run the build process on a freebsd server and see output:
```
$ ./build.sh manifests/2.4.0/opensearch-2.4.0.yml --component OpenSearch
Installing dependencies in . ...
Installing dependencies from Pipfile.lock (b36c9c)...
To activate this project's virtualenv, run pipenv shell.
Alternatively, run a command inside the virtualenv with pipenv run.
Running ./src/run_build.py manifests/2.4.0/opensearch-2.4.0.yml --component OpenSearch ...
2023-02-23 23:15:47 INFO Building in /tmp/tmpllimwxjs
2023-02-23 23:15:47 INFO Removing /tmp/tmpllimwxjs
Traceback (most recent call last):
File "./src/run_build.py", line 81, in <module>
sys.exit(main())
File "./src/run_build.py", line 55, in main
architecture=args.architecture or manifest.build.architecture,
File "/usr/share/opensearch/opensearch-build/src/build_workflow/build_target.py", line 45, in __init__
self.architecture = architecture or current_architecture()
File "/usr/share/opensearch/opensearch-build/src/system/os.py", line 20, in current_architecture
raise ValueError(f"Unsupported architecture: {architecture}")
ValueError: Unsupported architecture: amd64
```
### Expected behavior
The bsd x64 hosts can run the code without specifying --architecture x64.
### Screenshots
If applicable, add screenshots to help explain your problem.
### Host / Environment
_No response_
### Additional context
_No response_
### Relevant log output
_No response_
</issue>
<code>
[start of src/system/os.py]
1 # Copyright OpenSearch Contributors
2 # SPDX-License-Identifier: Apache-2.0
3 #
4 # The OpenSearch Contributors require contributions made to
5 # this file be licensed under the Apache-2.0 license or a
6 # compatible open source license.
7
8 import os
9 import subprocess
10
11
12 def current_architecture() -> str:
13 architecture = subprocess.check_output(["uname", "-m"]).decode().strip()
14 if architecture == "x86_64":
15 return "x64"
16 elif architecture == "aarch64" or architecture == "arm64":
17 return "arm64"
18 else:
19 raise ValueError(f"Unsupported architecture: {architecture}")
20
21
22 def current_platform() -> str:
23 if os.name == "nt":
24 return "windows"
25 else:
26 return subprocess.check_output(["uname", "-s"]).decode().strip().lower()
27
28
29 def deb_architecture(architecture: str) -> str:
30 # This would convert arch from "current_architecture" to deb specific architecture alternatives
31
32 deb_architecture_map = {
33 "x64": "amd64",
34 "arm64": "arm64",
35 }
36
37 return deb_architecture_map[architecture]
38
39
40 def rpm_architecture(architecture: str) -> str:
41 # This would convert arch from "current_architecture" to rpm specific architecture alternatives
42
43 rpm_architecture_map = {
44 "x64": "x86_64",
45 "arm64": "aarch64",
46 }
47
48 return rpm_architecture_map[architecture]
49
[end of src/system/os.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/system/os.py b/src/system/os.py
--- a/src/system/os.py
+++ b/src/system/os.py
@@ -11,7 +11,7 @@
def current_architecture() -> str:
architecture = subprocess.check_output(["uname", "-m"]).decode().strip()
- if architecture == "x86_64":
+ if architecture == "x86_64" or architecture == "amd64":
return "x64"
elif architecture == "aarch64" or architecture == "arm64":
return "arm64"
| {"golden_diff": "diff --git a/src/system/os.py b/src/system/os.py\n--- a/src/system/os.py\n+++ b/src/system/os.py\n@@ -11,7 +11,7 @@\n \n def current_architecture() -> str:\n architecture = subprocess.check_output([\"uname\", \"-m\"]).decode().strip()\n- if architecture == \"x86_64\":\n+ if architecture == \"x86_64\" or architecture == \"amd64\":\n return \"x64\"\n elif architecture == \"aarch64\" or architecture == \"arm64\":\n return \"arm64\"\n", "issue": "[Bug]: src/system/os.py does not correctly return architecture for bsd platform\n### Describe the bug\r\n\r\n\r\nRun `uname -m` will return follow in the freebsd:\r\n```\r\namd64\r\n```\r\n\r\nThe code here does not support `amd64` as input:\r\nhttps://github.com/opensearch-project/opensearch-build/blob/main/src/system/os.py#L12-L19\r\n```\r\ndef current_architecture() -> str:\r\n architecture = subprocess.check_output([\"uname\", \"-m\"]).decode().strip()\r\n if architecture == \"x86_64\":\r\n return \"x64\"\r\n elif architecture == \"aarch64\" or architecture == \"arm64\":\r\n return \"arm64\"\r\n else:\r\n raise ValueError(f\"Unsupported architecture: {architecture}\")\r\n```\r\n\r\n\r\n\r\nThanks.\r\n\r\n\r\n### To reproduce\r\n\r\nRun the build process on a freebsd server and see output:\r\n```\r\n$ ./build.sh manifests/2.4.0/opensearch-2.4.0.yml --component OpenSearch\r\nInstalling dependencies in . ...\r\nInstalling dependencies from Pipfile.lock (b36c9c)...\r\nTo activate this project's virtualenv, run pipenv shell.\r\nAlternatively, run a command inside the virtualenv with pipenv run.\r\nRunning ./src/run_build.py manifests/2.4.0/opensearch-2.4.0.yml --component OpenSearch ...\r\n2023-02-23 23:15:47 INFO Building in /tmp/tmpllimwxjs\r\n2023-02-23 23:15:47 INFO Removing /tmp/tmpllimwxjs\r\nTraceback (most recent call last):\r\n File \"./src/run_build.py\", line 81, in <module>\r\n sys.exit(main())\r\n File \"./src/run_build.py\", line 55, in main\r\n architecture=args.architecture or manifest.build.architecture,\r\n File \"/usr/share/opensearch/opensearch-build/src/build_workflow/build_target.py\", line 45, in __init__\r\n self.architecture = architecture or current_architecture()\r\n File \"/usr/share/opensearch/opensearch-build/src/system/os.py\", line 20, in current_architecture\r\n raise ValueError(f\"Unsupported architecture: {architecture}\")\r\nValueError: Unsupported architecture: amd64\r\n\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe bsd x64 hosts can run the code without specifying --architecture x64.\r\n\r\n### Screenshots\r\n\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n### Host / Environment\r\n\r\n_No response_\r\n\r\n### Additional context\r\n\r\n_No response_\r\n\r\n### Relevant log output\r\n\r\n_No response_\n", "before_files": [{"content": "# Copyright OpenSearch Contributors\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\nimport subprocess\n\n\ndef current_architecture() -> str:\n architecture = subprocess.check_output([\"uname\", \"-m\"]).decode().strip()\n if architecture == \"x86_64\":\n return \"x64\"\n elif architecture == \"aarch64\" or architecture == \"arm64\":\n return \"arm64\"\n else:\n raise ValueError(f\"Unsupported architecture: {architecture}\")\n\n\ndef current_platform() -> str:\n if os.name == \"nt\":\n return \"windows\"\n else:\n return subprocess.check_output([\"uname\", \"-s\"]).decode().strip().lower()\n\n\ndef deb_architecture(architecture: str) -> str:\n # This would convert arch from \"current_architecture\" to deb specific architecture alternatives\n\n deb_architecture_map = {\n \"x64\": \"amd64\",\n \"arm64\": \"arm64\",\n }\n\n return deb_architecture_map[architecture]\n\n\ndef rpm_architecture(architecture: str) -> str:\n # This would convert arch from \"current_architecture\" to rpm specific architecture alternatives\n\n rpm_architecture_map = {\n \"x64\": \"x86_64\",\n \"arm64\": \"aarch64\",\n }\n\n return rpm_architecture_map[architecture]\n", "path": "src/system/os.py"}]} | 1,519 | 132 |
gh_patches_debug_558 | rasdani/github-patches | git_diff | pex-tool__pex-691 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 1.6.4
On the docket:
+ [x] Restore pex.pex_bootstrapper.is_compressed API #684
+ [ ] Release more flexible pex binaries. #654
+ [x] If an `--interpreter-constraint` is set, it should always be honored. #656
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '1.6.3'
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '1.6.3'
+__version__ = '1.6.4'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '1.6.3'\n+__version__ = '1.6.4'\n", "issue": "Release 1.6.4\nOn the docket:\r\n+ [x] Restore pex.pex_bootstrapper.is_compressed API #684\r\n+ [ ] Release more flexible pex binaries. #654\r\n + [x] If an `--interpreter-constraint` is set, it should always be honored. #656\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.6.3'\n", "path": "pex/version.py"}]} | 658 | 95 |
gh_patches_debug_25746 | rasdani/github-patches | git_diff | mito-ds__mito-359 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
spelling mistake during mitoinstaller install
**Describe the bug**
Small issue, very minor, found a spelling mistake when running mitoinstaller install,
Starting install...
Create mito user
Upgrade mitoinstaller
Setting up **enviornment** <--- **environment**
Check dependencies
Remove mitosheet3 if present
Install mitosheet
This might take a few moments...
**To Reproduce**
Steps to reproduce the behavior:
1. run python -m mitoinstaller install
Please include the relevant dataset if the bug you encountered is dataset specific. Make sure to anonymize the data properly.
**Expected behavior**
should be corrected to "environment"
**Screenshots**

**Desktop (please complete the following information):**
N/A
**Additional context**
N/A
</issue>
<code>
[start of mitoinstaller/mitoinstaller/__main__.py]
1 """
2 The Mito Installer package contains utils for installing
3 Mito within your Python enviornment.
4
5 Long term, we aim to meet:
6 1. This package has minimal dependencies, both for speed of download and the ultimate portability.
7 2. The installation attempts to fail as early as possible, and to give the user as much help
8 help as possible while doing so.
9 """
10 from colorama import init
11 from termcolor import colored # type: ignore
12
13 from mitoinstaller.install import do_install
14
15
16 def main() -> None:
17 """
18 The main function of the Mito installer, this function is responsible
19 for installing and upgrading the `mitosheet` package.
20
21 To install Mito:
22 python -m mitoinstaller install
23
24 To upgrade Mito:
25 python -m mitoinstaller upgrade
26
27 To install Mito from TestPyPi
28 python -m mitoinstaller install --test-pypi
29 """
30 import sys
31 init()
32
33 if len(sys.argv) > 1:
34 command = sys.argv[1]
35 else:
36 command = ''
37
38 if command == 'install' or command == 'upgrade':
39 do_install()
40 elif command == 'uninstall':
41 print('To uninstall, run,', colored('`pip uninstall mitosheet`', 'green'))
42 else:
43 # NOTE: we don't add upgrade_to_jupyterlab_3 to the help.
44 # We only send this command to the users who need to know this (namely, those that need to upgrade)
45 print('\nProper usage is', colored('`python -m mitoinstaller install`', 'green'), 'or', colored('`python -m mitoinstaller upgrade`', 'green'), '\n\nTry running the command ', colored('`python -m mitoinstaller install`', 'green'), '\n')
46
47
48 if __name__ == '__main__':
49 main()
50
[end of mitoinstaller/mitoinstaller/__main__.py]
[start of mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py]
1 import importlib
2 import os
3 import sys
4
5 from mitoinstaller import __version__
6 from mitoinstaller.commands import upgrade_mito_installer
7 from mitoinstaller.installer_steps.installer_step import InstallerStep
8 from mitoinstaller.jupyter_utils import set_prefered_jupyter_env_variable
9 from mitoinstaller.log_utils import identify, log
10 from mitoinstaller.user_install import (USER_JSON_PATH, go_pro,
11 try_create_user_json_file)
12
13
14 def initial_install_step_create_user():
15
16 if not os.path.exists(USER_JSON_PATH):
17 try_create_user_json_file(is_pro=('--pro' in sys.argv))
18
19 if not ('--pro' in sys.argv):
20 # Only try and log if we're not pro
21 identify()
22 log('install_started', {
23 'mitoinstaller_version': __version__
24 })
25 else:
26 # If the user is going pro, make sure they are set to pro
27 go_pro()
28
29 def initial_install_step_add_env_for_which_jupyter():
30 """
31 This install steps checks, up front, which very of jupyter we should
32 launch: lab or notebook. It then stores this as an enviornment variable
33 so that the final installer steps can launch it.
34
35 We do this up front, so that we can see which packages that user has
36 installed before installing Mito.
37 """
38 set_prefered_jupyter_env_variable()
39
40
41 INITIAL_INSTALLER_STEPS = [
42 InstallerStep(
43 'Create mito user',
44 initial_install_step_create_user
45 ),
46 InstallerStep(
47 'Upgrade mitoinstaller',
48 upgrade_mito_installer,
49 optional=True
50 ),
51 InstallerStep(
52 'Setting up enviornment',
53 initial_install_step_add_env_for_which_jupyter,
54 ),
55 ]
56
[end of mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitoinstaller/mitoinstaller/__main__.py b/mitoinstaller/mitoinstaller/__main__.py
--- a/mitoinstaller/mitoinstaller/__main__.py
+++ b/mitoinstaller/mitoinstaller/__main__.py
@@ -1,6 +1,6 @@
"""
The Mito Installer package contains utils for installing
-Mito within your Python enviornment.
+Mito within your Python environment.
Long term, we aim to meet:
1. This package has minimal dependencies, both for speed of download and the ultimate portability.
diff --git a/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py b/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py
--- a/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py
+++ b/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py
@@ -29,7 +29,7 @@
def initial_install_step_add_env_for_which_jupyter():
"""
This install steps checks, up front, which very of jupyter we should
- launch: lab or notebook. It then stores this as an enviornment variable
+ launch: lab or notebook. It then stores this as an environment variable
so that the final installer steps can launch it.
We do this up front, so that we can see which packages that user has
@@ -49,7 +49,7 @@
optional=True
),
InstallerStep(
- 'Setting up enviornment',
+ 'Setting up environment',
initial_install_step_add_env_for_which_jupyter,
),
]
| {"golden_diff": "diff --git a/mitoinstaller/mitoinstaller/__main__.py b/mitoinstaller/mitoinstaller/__main__.py\n--- a/mitoinstaller/mitoinstaller/__main__.py\n+++ b/mitoinstaller/mitoinstaller/__main__.py\n@@ -1,6 +1,6 @@\n \"\"\"\n The Mito Installer package contains utils for installing\n-Mito within your Python enviornment.\n+Mito within your Python environment.\n \n Long term, we aim to meet:\n 1. This package has minimal dependencies, both for speed of download and the ultimate portability.\ndiff --git a/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py b/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py\n--- a/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py\n+++ b/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py\n@@ -29,7 +29,7 @@\n def initial_install_step_add_env_for_which_jupyter():\n \"\"\"\n This install steps checks, up front, which very of jupyter we should\n- launch: lab or notebook. It then stores this as an enviornment variable\n+ launch: lab or notebook. It then stores this as an environment variable\n so that the final installer steps can launch it. \n \n We do this up front, so that we can see which packages that user has \n@@ -49,7 +49,7 @@\n optional=True\n ),\n InstallerStep(\n- 'Setting up enviornment',\n+ 'Setting up environment',\n initial_install_step_add_env_for_which_jupyter,\n ),\n ]\n", "issue": "spelling mistake during mitoinstaller install\n**Describe the bug**\r\nSmall issue, very minor, found a spelling mistake when running mitoinstaller install, \r\n\r\nStarting install...\r\nCreate mito user\r\nUpgrade mitoinstaller\r\nSetting up **enviornment** <--- **environment**\r\nCheck dependencies\r\nRemove mitosheet3 if present\r\nInstall mitosheet\r\nThis might take a few moments...\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. run python -m mitoinstaller install\r\n\r\nPlease include the relevant dataset if the bug you encountered is dataset specific. Make sure to anonymize the data properly.\r\n\r\n**Expected behavior**\r\nshould be corrected to \"environment\"\r\n\r\n**Screenshots**\r\n\r\n\r\n**Desktop (please complete the following information):**\r\nN/A\r\n\r\n**Additional context**\r\nN/A\r\n\n", "before_files": [{"content": "\"\"\"\nThe Mito Installer package contains utils for installing\nMito within your Python enviornment.\n\nLong term, we aim to meet:\n1. This package has minimal dependencies, both for speed of download and the ultimate portability.\n2. The installation attempts to fail as early as possible, and to give the user as much help\n help as possible while doing so.\n\"\"\"\nfrom colorama import init\nfrom termcolor import colored # type: ignore\n\nfrom mitoinstaller.install import do_install\n\n\ndef main() -> None:\n \"\"\"\n The main function of the Mito installer, this function is responsible\n for installing and upgrading the `mitosheet` package.\n\n To install Mito:\n python -m mitoinstaller install\n\n To upgrade Mito:\n python -m mitoinstaller upgrade\n\n To install Mito from TestPyPi\n python -m mitoinstaller install --test-pypi\n \"\"\"\n import sys\n init()\n\n if len(sys.argv) > 1:\n command = sys.argv[1]\n else:\n command = ''\n\n if command == 'install' or command == 'upgrade':\n do_install()\n elif command == 'uninstall':\n print('To uninstall, run,', colored('`pip uninstall mitosheet`', 'green'))\n else:\n # NOTE: we don't add upgrade_to_jupyterlab_3 to the help.\n # We only send this command to the users who need to know this (namely, those that need to upgrade)\n print('\\nProper usage is', colored('`python -m mitoinstaller install`', 'green'), 'or', colored('`python -m mitoinstaller upgrade`', 'green'), '\\n\\nTry running the command ', colored('`python -m mitoinstaller install`', 'green'), '\\n')\n \n\nif __name__ == '__main__':\n main()\n", "path": "mitoinstaller/mitoinstaller/__main__.py"}, {"content": "import importlib\nimport os\nimport sys\n\nfrom mitoinstaller import __version__\nfrom mitoinstaller.commands import upgrade_mito_installer\nfrom mitoinstaller.installer_steps.installer_step import InstallerStep\nfrom mitoinstaller.jupyter_utils import set_prefered_jupyter_env_variable\nfrom mitoinstaller.log_utils import identify, log\nfrom mitoinstaller.user_install import (USER_JSON_PATH, go_pro,\n try_create_user_json_file)\n\n\ndef initial_install_step_create_user():\n\n if not os.path.exists(USER_JSON_PATH):\n try_create_user_json_file(is_pro=('--pro' in sys.argv))\n\n if not ('--pro' in sys.argv):\n # Only try and log if we're not pro\n identify()\n log('install_started', {\n 'mitoinstaller_version': __version__\n })\n else:\n # If the user is going pro, make sure they are set to pro\n go_pro()\n\ndef initial_install_step_add_env_for_which_jupyter():\n \"\"\"\n This install steps checks, up front, which very of jupyter we should\n launch: lab or notebook. It then stores this as an enviornment variable\n so that the final installer steps can launch it. \n\n We do this up front, so that we can see which packages that user has \n installed before installing Mito.\n \"\"\"\n set_prefered_jupyter_env_variable()\n\n\nINITIAL_INSTALLER_STEPS = [\n InstallerStep(\n 'Create mito user',\n initial_install_step_create_user\n ),\n InstallerStep(\n 'Upgrade mitoinstaller',\n upgrade_mito_installer,\n optional=True\n ),\n InstallerStep(\n 'Setting up enviornment',\n initial_install_step_add_env_for_which_jupyter,\n ),\n]\n", "path": "mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py"}]} | 1,816 | 367 |
gh_patches_debug_1231 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-583 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support Python 3.9
Python 3.9 will be released 2020-10-05.
Here are some steps before its release:
* Start testing with prerelease
After release:
* Ensure tests run with released version
* Add 3.9 PyPI classifier
* Enable PYthon wheel building in release
</issue>
<code>
[start of setup.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import os
5 import sys
6
7 from setuptools import Extension, find_packages, setup
8
9 with open("README.md", "r") as fp:
10 long_description = fp.read()
11
12 packages = find_packages("src")
13 if sys.version_info < (3, 6):
14 packages = [p for p in packages if not p.startswith("scout_apm.async_")]
15
16 compile_extensions = (
17 # Python 3+
18 sys.version_info >= (3,)
19 # Not Jython
20 and not sys.platform.startswith("java")
21 # Not PyPy
22 and "__pypy__" not in sys.builtin_module_names
23 # Not explicitly disabled
24 and (os.environ.get("SCOUT_DISABLE_EXTENSIONS", "") == "")
25 )
26 if compile_extensions:
27 ext_modules = [
28 Extension(
29 name=str("scout_apm.core._objtrace"),
30 sources=[str("src/scout_apm/core/_objtrace.c")],
31 optional=True,
32 )
33 ]
34 else:
35 ext_modules = []
36
37 setup(
38 name="scout_apm",
39 version="2.16.2",
40 description="Scout Application Performance Monitoring Agent",
41 long_description=long_description,
42 long_description_content_type="text/markdown",
43 url="https://github.com/scoutapp/scout_apm_python",
44 project_urls={
45 "Documentation": "https://docs.scoutapm.com/#python-agent",
46 "Changelog": (
47 "https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md"
48 ),
49 },
50 author="Scout",
51 author_email="[email protected]",
52 license="MIT",
53 zip_safe=False,
54 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
55 packages=packages,
56 package_dir={str(""): str("src")},
57 ext_modules=ext_modules,
58 entry_points={
59 "console_scripts": [
60 "core-agent-manager = scout_apm.core.cli.core_agent_manager:main"
61 ]
62 },
63 install_requires=[
64 'asgiref ; python_version >= "3.5"',
65 'importlib-metadata ; python_version < "3.8"',
66 "psutil>=5,<6",
67 'urllib3[secure] < 1.25 ; python_version < "3.5"',
68 'urllib3[secure] < 2 ; python_version >= "3.5"',
69 "wrapt>=1.10,<2.0",
70 ],
71 keywords="apm performance monitoring development",
72 classifiers=[
73 "Development Status :: 5 - Production/Stable",
74 "Framework :: Bottle",
75 "Framework :: Django",
76 "Framework :: Django :: 1.8",
77 "Framework :: Django :: 1.9",
78 "Framework :: Django :: 1.10",
79 "Framework :: Django :: 1.11",
80 "Framework :: Django :: 2.0",
81 "Framework :: Django :: 2.1",
82 "Framework :: Django :: 2.2",
83 "Framework :: Django :: 3.0",
84 "Framework :: Django :: 3.1",
85 "Framework :: Flask",
86 "Framework :: Pyramid",
87 "Intended Audience :: Developers",
88 "Topic :: System :: Monitoring",
89 "License :: OSI Approved :: MIT License",
90 "Operating System :: MacOS",
91 "Operating System :: POSIX",
92 "Operating System :: POSIX :: Linux",
93 "Programming Language :: Python :: 2",
94 "Programming Language :: Python :: 2.7",
95 "Programming Language :: Python :: 3",
96 "Programming Language :: Python :: 3.4",
97 "Programming Language :: Python :: 3.5",
98 "Programming Language :: Python :: 3.6",
99 "Programming Language :: Python :: 3.7",
100 "Programming Language :: Python :: 3.8",
101 ],
102 )
103
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -98,5 +98,6 @@
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
],
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -98,5 +98,6 @@\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n ],\n )\n", "issue": "Support Python 3.9\nPython 3.9 will be released 2020-10-05.\r\n\r\nHere are some steps before its release:\r\n\r\n* Start testing with prerelease\r\n\r\nAfter release:\r\n* Ensure tests run with released version\r\n* Add 3.9 PyPI classifier\r\n* Enable PYthon wheel building in release\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nwith open(\"README.md\", \"r\") as fp:\n long_description = fp.read()\n\npackages = find_packages(\"src\")\nif sys.version_info < (3, 6):\n packages = [p for p in packages if not p.startswith(\"scout_apm.async_\")]\n\ncompile_extensions = (\n # Python 3+\n sys.version_info >= (3,)\n # Not Jython\n and not sys.platform.startswith(\"java\")\n # Not PyPy\n and \"__pypy__\" not in sys.builtin_module_names\n # Not explicitly disabled\n and (os.environ.get(\"SCOUT_DISABLE_EXTENSIONS\", \"\") == \"\")\n)\nif compile_extensions:\n ext_modules = [\n Extension(\n name=str(\"scout_apm.core._objtrace\"),\n sources=[str(\"src/scout_apm/core/_objtrace.c\")],\n optional=True,\n )\n ]\nelse:\n ext_modules = []\n\nsetup(\n name=\"scout_apm\",\n version=\"2.16.2\",\n description=\"Scout Application Performance Monitoring Agent\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/scoutapp/scout_apm_python\",\n project_urls={\n \"Documentation\": \"https://docs.scoutapm.com/#python-agent\",\n \"Changelog\": (\n \"https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md\"\n ),\n },\n author=\"Scout\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n zip_safe=False,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n packages=packages,\n package_dir={str(\"\"): str(\"src\")},\n ext_modules=ext_modules,\n entry_points={\n \"console_scripts\": [\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n install_requires=[\n 'asgiref ; python_version >= \"3.5\"',\n 'importlib-metadata ; python_version < \"3.8\"',\n \"psutil>=5,<6\",\n 'urllib3[secure] < 1.25 ; python_version < \"3.5\"',\n 'urllib3[secure] < 2 ; python_version >= \"3.5\"',\n \"wrapt>=1.10,<2.0\",\n ],\n keywords=\"apm performance monitoring development\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Bottle\",\n \"Framework :: Django\",\n \"Framework :: Django :: 1.8\",\n \"Framework :: Django :: 1.9\",\n \"Framework :: Django :: 1.10\",\n \"Framework :: Django :: 1.11\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Flask\",\n \"Framework :: Pyramid\",\n \"Intended Audience :: Developers\",\n \"Topic :: System :: Monitoring\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}]} | 1,661 | 85 |
gh_patches_debug_34386 | rasdani/github-patches | git_diff | optuna__optuna-1678 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use function annotation syntax for Type Hints.
After dropping Python 2.7 support at #710, we can define type hints with function annotation syntax.
~~Do you have a plan to update the coding style guideline?~~
https://github.com/optuna/optuna/wiki/Coding-Style-Conventions
## Progress
- [x] `optuna/integration/sklearn.py` (#1735)
- [x] `optuna/study.py` - assigned to harpy
## Note to the questioner
We still cannot use variable annotation syntax introduced by [PEP 526](https://www.python.org/dev/peps/pep-0526/) because we supports Python 3.5.
</issue>
<code>
[start of optuna/samplers/_random.py]
1 import numpy
2
3 from optuna import distributions
4 from optuna.samplers import BaseSampler
5 from optuna import type_checking
6
7 if type_checking.TYPE_CHECKING:
8 from typing import Any # NOQA
9 from typing import Dict # NOQA
10 from typing import Optional # NOQA
11
12 from optuna.distributions import BaseDistribution # NOQA
13 from optuna.study import Study # NOQA
14 from optuna.trial import FrozenTrial # NOQA
15
16
17 class RandomSampler(BaseSampler):
18 """Sampler using random sampling.
19
20 This sampler is based on *independent sampling*.
21 See also :class:`~optuna.samplers.BaseSampler` for more details of 'independent sampling'.
22
23 Example:
24
25 .. testcode::
26
27 import optuna
28 from optuna.samplers import RandomSampler
29
30 def objective(trial):
31 x = trial.suggest_uniform('x', -5, 5)
32 return x**2
33
34 study = optuna.create_study(sampler=RandomSampler())
35 study.optimize(objective, n_trials=10)
36
37 Args:
38 seed: Seed for random number generator.
39 """
40
41 def __init__(self, seed=None):
42 # type: (Optional[int]) -> None
43
44 self._rng = numpy.random.RandomState(seed)
45
46 def reseed_rng(self) -> None:
47
48 self._rng = numpy.random.RandomState()
49
50 def infer_relative_search_space(self, study, trial):
51 # type: (Study, FrozenTrial) -> Dict[str, BaseDistribution]
52
53 return {}
54
55 def sample_relative(self, study, trial, search_space):
56 # type: (Study, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]
57
58 return {}
59
60 def sample_independent(self, study, trial, param_name, param_distribution):
61 # type: (Study, FrozenTrial, str, distributions.BaseDistribution) -> Any
62
63 if isinstance(param_distribution, distributions.UniformDistribution):
64 return self._rng.uniform(param_distribution.low, param_distribution.high)
65 elif isinstance(param_distribution, distributions.LogUniformDistribution):
66 log_low = numpy.log(param_distribution.low)
67 log_high = numpy.log(param_distribution.high)
68 return float(numpy.exp(self._rng.uniform(log_low, log_high)))
69 elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):
70 q = param_distribution.q
71 r = param_distribution.high - param_distribution.low
72 # [low, high] is shifted to [0, r] to align sampled values at regular intervals.
73 low = 0 - 0.5 * q
74 high = r + 0.5 * q
75 s = self._rng.uniform(low, high)
76 v = numpy.round(s / q) * q + param_distribution.low
77 # v may slightly exceed range due to round-off errors.
78 return float(min(max(v, param_distribution.low), param_distribution.high))
79 elif isinstance(param_distribution, distributions.IntUniformDistribution):
80 # [low, high] is shifted to [0, r] to align sampled values at regular intervals.
81 r = (param_distribution.high - param_distribution.low) / param_distribution.step
82 # numpy.random.randint includes low but excludes high.
83 s = self._rng.randint(0, r + 1)
84 v = s * param_distribution.step + param_distribution.low
85 return int(v)
86 elif isinstance(param_distribution, distributions.IntLogUniformDistribution):
87 log_low = numpy.log(param_distribution.low - 0.5)
88 log_high = numpy.log(param_distribution.high + 0.5)
89 s = numpy.exp(self._rng.uniform(log_low, log_high))
90 v = numpy.round(s)
91 return int(min(max(v, param_distribution.low), param_distribution.high))
92 elif isinstance(param_distribution, distributions.CategoricalDistribution):
93 choices = param_distribution.choices
94 index = self._rng.randint(0, len(choices))
95 return choices[index]
96 else:
97 raise NotImplementedError
98
[end of optuna/samplers/_random.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optuna/samplers/_random.py b/optuna/samplers/_random.py
--- a/optuna/samplers/_random.py
+++ b/optuna/samplers/_random.py
@@ -1,17 +1,14 @@
+from typing import Any
+from typing import Dict
+from typing import Optional
+
import numpy
from optuna import distributions
+from optuna.distributions import BaseDistribution
from optuna.samplers import BaseSampler
-from optuna import type_checking
-
-if type_checking.TYPE_CHECKING:
- from typing import Any # NOQA
- from typing import Dict # NOQA
- from typing import Optional # NOQA
-
- from optuna.distributions import BaseDistribution # NOQA
- from optuna.study import Study # NOQA
- from optuna.trial import FrozenTrial # NOQA
+from optuna.study import Study
+from optuna.trial import FrozenTrial
class RandomSampler(BaseSampler):
@@ -38,8 +35,7 @@
seed: Seed for random number generator.
"""
- def __init__(self, seed=None):
- # type: (Optional[int]) -> None
+ def __init__(self, seed: Optional[int] = None) -> None:
self._rng = numpy.random.RandomState(seed)
@@ -47,18 +43,25 @@
self._rng = numpy.random.RandomState()
- def infer_relative_search_space(self, study, trial):
- # type: (Study, FrozenTrial) -> Dict[str, BaseDistribution]
+ def infer_relative_search_space(
+ self, study: Study, trial: FrozenTrial
+ ) -> Dict[str, BaseDistribution]:
return {}
- def sample_relative(self, study, trial, search_space):
- # type: (Study, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]
+ def sample_relative(
+ self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]
+ ) -> Dict[str, Any]:
return {}
- def sample_independent(self, study, trial, param_name, param_distribution):
- # type: (Study, FrozenTrial, str, distributions.BaseDistribution) -> Any
+ def sample_independent(
+ self,
+ study: Study,
+ trial: FrozenTrial,
+ param_name: str,
+ param_distribution: distributions.BaseDistribution,
+ ) -> Any:
if isinstance(param_distribution, distributions.UniformDistribution):
return self._rng.uniform(param_distribution.low, param_distribution.high)
| {"golden_diff": "diff --git a/optuna/samplers/_random.py b/optuna/samplers/_random.py\n--- a/optuna/samplers/_random.py\n+++ b/optuna/samplers/_random.py\n@@ -1,17 +1,14 @@\n+from typing import Any\n+from typing import Dict\n+from typing import Optional\n+\n import numpy\n \n from optuna import distributions\n+from optuna.distributions import BaseDistribution\n from optuna.samplers import BaseSampler\n-from optuna import type_checking\n-\n-if type_checking.TYPE_CHECKING:\n- from typing import Any # NOQA\n- from typing import Dict # NOQA\n- from typing import Optional # NOQA\n-\n- from optuna.distributions import BaseDistribution # NOQA\n- from optuna.study import Study # NOQA\n- from optuna.trial import FrozenTrial # NOQA\n+from optuna.study import Study\n+from optuna.trial import FrozenTrial\n \n \n class RandomSampler(BaseSampler):\n@@ -38,8 +35,7 @@\n seed: Seed for random number generator.\n \"\"\"\n \n- def __init__(self, seed=None):\n- # type: (Optional[int]) -> None\n+ def __init__(self, seed: Optional[int] = None) -> None:\n \n self._rng = numpy.random.RandomState(seed)\n \n@@ -47,18 +43,25 @@\n \n self._rng = numpy.random.RandomState()\n \n- def infer_relative_search_space(self, study, trial):\n- # type: (Study, FrozenTrial) -> Dict[str, BaseDistribution]\n+ def infer_relative_search_space(\n+ self, study: Study, trial: FrozenTrial\n+ ) -> Dict[str, BaseDistribution]:\n \n return {}\n \n- def sample_relative(self, study, trial, search_space):\n- # type: (Study, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]\n+ def sample_relative(\n+ self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]\n+ ) -> Dict[str, Any]:\n \n return {}\n \n- def sample_independent(self, study, trial, param_name, param_distribution):\n- # type: (Study, FrozenTrial, str, distributions.BaseDistribution) -> Any\n+ def sample_independent(\n+ self,\n+ study: Study,\n+ trial: FrozenTrial,\n+ param_name: str,\n+ param_distribution: distributions.BaseDistribution,\n+ ) -> Any:\n \n if isinstance(param_distribution, distributions.UniformDistribution):\n return self._rng.uniform(param_distribution.low, param_distribution.high)\n", "issue": "Use function annotation syntax for Type Hints.\nAfter dropping Python 2.7 support at #710, we can define type hints with function annotation syntax. \r\n~~Do you have a plan to update the coding style guideline?~~\r\nhttps://github.com/optuna/optuna/wiki/Coding-Style-Conventions\r\n\r\n## Progress\r\n\r\n- [x] `optuna/integration/sklearn.py` (#1735)\r\n- [x] `optuna/study.py` - assigned to harpy\r\n\r\n## Note to the questioner\r\n\r\nWe still cannot use variable annotation syntax introduced by [PEP 526](https://www.python.org/dev/peps/pep-0526/) because we supports Python 3.5.\n", "before_files": [{"content": "import numpy\n\nfrom optuna import distributions\nfrom optuna.samplers import BaseSampler\nfrom optuna import type_checking\n\nif type_checking.TYPE_CHECKING:\n from typing import Any # NOQA\n from typing import Dict # NOQA\n from typing import Optional # NOQA\n\n from optuna.distributions import BaseDistribution # NOQA\n from optuna.study import Study # NOQA\n from optuna.trial import FrozenTrial # NOQA\n\n\nclass RandomSampler(BaseSampler):\n \"\"\"Sampler using random sampling.\n\n This sampler is based on *independent sampling*.\n See also :class:`~optuna.samplers.BaseSampler` for more details of 'independent sampling'.\n\n Example:\n\n .. testcode::\n\n import optuna\n from optuna.samplers import RandomSampler\n\n def objective(trial):\n x = trial.suggest_uniform('x', -5, 5)\n return x**2\n\n study = optuna.create_study(sampler=RandomSampler())\n study.optimize(objective, n_trials=10)\n\n Args:\n seed: Seed for random number generator.\n \"\"\"\n\n def __init__(self, seed=None):\n # type: (Optional[int]) -> None\n\n self._rng = numpy.random.RandomState(seed)\n\n def reseed_rng(self) -> None:\n\n self._rng = numpy.random.RandomState()\n\n def infer_relative_search_space(self, study, trial):\n # type: (Study, FrozenTrial) -> Dict[str, BaseDistribution]\n\n return {}\n\n def sample_relative(self, study, trial, search_space):\n # type: (Study, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]\n\n return {}\n\n def sample_independent(self, study, trial, param_name, param_distribution):\n # type: (Study, FrozenTrial, str, distributions.BaseDistribution) -> Any\n\n if isinstance(param_distribution, distributions.UniformDistribution):\n return self._rng.uniform(param_distribution.low, param_distribution.high)\n elif isinstance(param_distribution, distributions.LogUniformDistribution):\n log_low = numpy.log(param_distribution.low)\n log_high = numpy.log(param_distribution.high)\n return float(numpy.exp(self._rng.uniform(log_low, log_high)))\n elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):\n q = param_distribution.q\n r = param_distribution.high - param_distribution.low\n # [low, high] is shifted to [0, r] to align sampled values at regular intervals.\n low = 0 - 0.5 * q\n high = r + 0.5 * q\n s = self._rng.uniform(low, high)\n v = numpy.round(s / q) * q + param_distribution.low\n # v may slightly exceed range due to round-off errors.\n return float(min(max(v, param_distribution.low), param_distribution.high))\n elif isinstance(param_distribution, distributions.IntUniformDistribution):\n # [low, high] is shifted to [0, r] to align sampled values at regular intervals.\n r = (param_distribution.high - param_distribution.low) / param_distribution.step\n # numpy.random.randint includes low but excludes high.\n s = self._rng.randint(0, r + 1)\n v = s * param_distribution.step + param_distribution.low\n return int(v)\n elif isinstance(param_distribution, distributions.IntLogUniformDistribution):\n log_low = numpy.log(param_distribution.low - 0.5)\n log_high = numpy.log(param_distribution.high + 0.5)\n s = numpy.exp(self._rng.uniform(log_low, log_high))\n v = numpy.round(s)\n return int(min(max(v, param_distribution.low), param_distribution.high))\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n choices = param_distribution.choices\n index = self._rng.randint(0, len(choices))\n return choices[index]\n else:\n raise NotImplementedError\n", "path": "optuna/samplers/_random.py"}]} | 1,727 | 584 |
gh_patches_debug_37113 | rasdani/github-patches | git_diff | sublimelsp__LSP-472 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LS always starts in first folder of workspace
LSP always starts a language server in the first project of your workspace, regardless of which one you're working on. For example, with the following workspace:

When I open any Rust files in `bserver`, RLS is still started in `LSP`, since it appears first in the list. This causes RLS to throw a warning:

and effectively breaks all useful functionality of the LSP plugin--nothing works, because RLS is staring at the wrong directory.
I'm still digging as to why this is, but it looks like the issue is [an oversight with branching right here](https://github.com/tomv564/LSP/blob/master/plugin/core/workspace.py#L16). I'll submit a PR shortly.
</issue>
<code>
[start of plugin/core/workspace.py]
1 import os
2 try:
3 from typing import List, Optional, Any
4 assert List and Optional and Any
5 except ImportError:
6 pass
7
8 from .logging import debug
9 # from .types import WindowLike
10
11
12 def get_project_path(window: 'Any') -> 'Optional[str]':
13 """
14 Returns the first project folder or the parent folder of the active view
15 """
16 if len(window.folders()):
17 folder_paths = window.folders()
18 return folder_paths[0]
19 else:
20 view = window.active_view()
21 if view:
22 filename = view.file_name()
23 if filename:
24 project_path = os.path.dirname(filename)
25 debug("Couldn't determine project directory since no folders are open!",
26 "Using", project_path, "as a fallback.")
27 return project_path
28 else:
29 debug("Couldn't determine project directory since no folders are open",
30 "and the current file isn't saved on the disk.")
31 return None
32 else:
33 debug("No view is active in current window")
34 return None # https://github.com/tomv564/LSP/issues/219
35
36
37 def get_common_parent(paths: 'List[str]') -> str:
38 """
39 Get the common parent directory of multiple paths.
40
41 Python 3.5+ includes os.path.commonpath which does this, however Sublime
42 currently embeds Python 3.3.
43 """
44 return os.path.commonprefix([path + '/' for path in paths]).rstrip('/')
45
46
47 def is_in_workspace(window: 'Any', file_path: str) -> bool:
48 workspace_path = get_project_path(window)
49 if workspace_path is None:
50 return False
51
52 common_dir = get_common_parent([workspace_path, file_path])
53 return workspace_path == common_dir
54
55
56 def enable_in_project(window, config_name: str) -> None:
57 project_data = window.project_data()
58 if isinstance(project_data, dict):
59 project_settings = project_data.setdefault('settings', dict())
60 project_lsp_settings = project_settings.setdefault('LSP', dict())
61 project_client_settings = project_lsp_settings.setdefault(config_name, dict())
62 project_client_settings['enabled'] = True
63 window.set_project_data(project_data)
64 else:
65 debug('non-dict returned in project_settings: ', project_data)
66
67
68 def disable_in_project(window, config_name: str) -> None:
69 project_data = window.project_data()
70 if isinstance(project_data, dict):
71 project_settings = project_data.setdefault('settings', dict())
72 project_lsp_settings = project_settings.setdefault('LSP', dict())
73 project_client_settings = project_lsp_settings.setdefault(config_name, dict())
74 project_client_settings['enabled'] = False
75 window.set_project_data(project_data)
76 else:
77 debug('non-dict returned in project_settings: ', project_data)
78
79
80 def get_project_config(window: 'Any') -> dict:
81 project_data = window.project_data() or dict()
82 if isinstance(project_data, dict):
83 project_settings = project_data.setdefault('settings', dict())
84 project_lsp_settings = project_settings.setdefault('LSP', dict())
85 return project_lsp_settings
86 else:
87 debug('non-dict returned in project_settings: ', project_data)
88 return dict()
89
[end of plugin/core/workspace.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/core/workspace.py b/plugin/core/workspace.py
--- a/plugin/core/workspace.py
+++ b/plugin/core/workspace.py
@@ -1,37 +1,69 @@
import os
try:
- from typing import List, Optional, Any
- assert List and Optional and Any
+ from typing import List, Optional, Any, Iterable
+ assert List and Optional and Any and Iterable
except ImportError:
pass
from .logging import debug
-# from .types import WindowLike
+from .types import ViewLike
+
+
+def get_filename_from_view(view: ViewLike) -> 'Optional[str]':
+ if not view:
+ debug("No view is active in current window")
+ return None # https://github.com/tomv564/LSP/issues/219
+ filename = view.file_name()
+ if not filename:
+ debug("Couldn't determine project directory since no folders are open",
+ "and the current file isn't saved on the disk.")
+ return filename
+
+
+def get_directory_name(view: ViewLike) -> 'Optional[str]':
+ filename = get_filename_from_view(view)
+ if filename:
+ project_path = os.path.dirname(filename)
+ return project_path
+ return None
+
+
+def find_path_among_multi_folders(folders: 'Iterable[str]',
+ view: ViewLike) -> 'Optional[str]':
+ filename = get_filename_from_view(view)
+ if not filename:
+ return None
+ folders = [os.path.realpath(f) for f in folders]
+ file = view.file_name()
+ if not file:
+ return None
+ file = os.path.realpath(file)
+ while file not in folders:
+ file = os.path.dirname(file)
+ if os.path.dirname(file) == file:
+ # We're at the root of the filesystem.
+ file = None
+ break
+ debug('project path is', file)
+ return file
def get_project_path(window: 'Any') -> 'Optional[str]':
"""
- Returns the first project folder or the parent folder of the active view
+ Returns the project folder or the parent folder of the active view
"""
- if len(window.folders()):
+ if not window:
+ return None
+ num_folders = len(window.folders())
+ if num_folders == 0:
+ return get_directory_name(window.active_view())
+ elif num_folders == 1:
folder_paths = window.folders()
return folder_paths[0]
- else:
- view = window.active_view()
- if view:
- filename = view.file_name()
- if filename:
- project_path = os.path.dirname(filename)
- debug("Couldn't determine project directory since no folders are open!",
- "Using", project_path, "as a fallback.")
- return project_path
- else:
- debug("Couldn't determine project directory since no folders are open",
- "and the current file isn't saved on the disk.")
- return None
- else:
- debug("No view is active in current window")
- return None # https://github.com/tomv564/LSP/issues/219
+ else: # num_folders > 1
+ return find_path_among_multi_folders(
+ window.folders(),
+ window.active_view())
def get_common_parent(paths: 'List[str]') -> str:
| {"golden_diff": "diff --git a/plugin/core/workspace.py b/plugin/core/workspace.py\n--- a/plugin/core/workspace.py\n+++ b/plugin/core/workspace.py\n@@ -1,37 +1,69 @@\n import os\n try:\n- from typing import List, Optional, Any\n- assert List and Optional and Any\n+ from typing import List, Optional, Any, Iterable\n+ assert List and Optional and Any and Iterable\n except ImportError:\n pass\n \n from .logging import debug\n-# from .types import WindowLike\n+from .types import ViewLike\n+\n+\n+def get_filename_from_view(view: ViewLike) -> 'Optional[str]':\n+ if not view:\n+ debug(\"No view is active in current window\")\n+ return None # https://github.com/tomv564/LSP/issues/219\n+ filename = view.file_name()\n+ if not filename:\n+ debug(\"Couldn't determine project directory since no folders are open\",\n+ \"and the current file isn't saved on the disk.\")\n+ return filename\n+\n+\n+def get_directory_name(view: ViewLike) -> 'Optional[str]':\n+ filename = get_filename_from_view(view)\n+ if filename:\n+ project_path = os.path.dirname(filename)\n+ return project_path\n+ return None\n+\n+\n+def find_path_among_multi_folders(folders: 'Iterable[str]',\n+ view: ViewLike) -> 'Optional[str]':\n+ filename = get_filename_from_view(view)\n+ if not filename:\n+ return None\n+ folders = [os.path.realpath(f) for f in folders]\n+ file = view.file_name()\n+ if not file:\n+ return None\n+ file = os.path.realpath(file)\n+ while file not in folders:\n+ file = os.path.dirname(file)\n+ if os.path.dirname(file) == file:\n+ # We're at the root of the filesystem.\n+ file = None\n+ break\n+ debug('project path is', file)\n+ return file\n \n \n def get_project_path(window: 'Any') -> 'Optional[str]':\n \"\"\"\n- Returns the first project folder or the parent folder of the active view\n+ Returns the project folder or the parent folder of the active view\n \"\"\"\n- if len(window.folders()):\n+ if not window:\n+ return None\n+ num_folders = len(window.folders())\n+ if num_folders == 0:\n+ return get_directory_name(window.active_view())\n+ elif num_folders == 1:\n folder_paths = window.folders()\n return folder_paths[0]\n- else:\n- view = window.active_view()\n- if view:\n- filename = view.file_name()\n- if filename:\n- project_path = os.path.dirname(filename)\n- debug(\"Couldn't determine project directory since no folders are open!\",\n- \"Using\", project_path, \"as a fallback.\")\n- return project_path\n- else:\n- debug(\"Couldn't determine project directory since no folders are open\",\n- \"and the current file isn't saved on the disk.\")\n- return None\n- else:\n- debug(\"No view is active in current window\")\n- return None # https://github.com/tomv564/LSP/issues/219\n+ else: # num_folders > 1\n+ return find_path_among_multi_folders(\n+ window.folders(),\n+ window.active_view())\n \n \n def get_common_parent(paths: 'List[str]') -> str:\n", "issue": "LS always starts in first folder of workspace\nLSP always starts a language server in the first project of your workspace, regardless of which one you're working on. For example, with the following workspace:\r\n\r\n\r\n\r\nWhen I open any Rust files in `bserver`, RLS is still started in `LSP`, since it appears first in the list. This causes RLS to throw a warning:\r\n\r\n\r\n\r\nand effectively breaks all useful functionality of the LSP plugin--nothing works, because RLS is staring at the wrong directory.\r\n\r\nI'm still digging as to why this is, but it looks like the issue is [an oversight with branching right here](https://github.com/tomv564/LSP/blob/master/plugin/core/workspace.py#L16). I'll submit a PR shortly.\n", "before_files": [{"content": "import os\ntry:\n from typing import List, Optional, Any\n assert List and Optional and Any\nexcept ImportError:\n pass\n\nfrom .logging import debug\n# from .types import WindowLike\n\n\ndef get_project_path(window: 'Any') -> 'Optional[str]':\n \"\"\"\n Returns the first project folder or the parent folder of the active view\n \"\"\"\n if len(window.folders()):\n folder_paths = window.folders()\n return folder_paths[0]\n else:\n view = window.active_view()\n if view:\n filename = view.file_name()\n if filename:\n project_path = os.path.dirname(filename)\n debug(\"Couldn't determine project directory since no folders are open!\",\n \"Using\", project_path, \"as a fallback.\")\n return project_path\n else:\n debug(\"Couldn't determine project directory since no folders are open\",\n \"and the current file isn't saved on the disk.\")\n return None\n else:\n debug(\"No view is active in current window\")\n return None # https://github.com/tomv564/LSP/issues/219\n\n\ndef get_common_parent(paths: 'List[str]') -> str:\n \"\"\"\n Get the common parent directory of multiple paths.\n\n Python 3.5+ includes os.path.commonpath which does this, however Sublime\n currently embeds Python 3.3.\n \"\"\"\n return os.path.commonprefix([path + '/' for path in paths]).rstrip('/')\n\n\ndef is_in_workspace(window: 'Any', file_path: str) -> bool:\n workspace_path = get_project_path(window)\n if workspace_path is None:\n return False\n\n common_dir = get_common_parent([workspace_path, file_path])\n return workspace_path == common_dir\n\n\ndef enable_in_project(window, config_name: str) -> None:\n project_data = window.project_data()\n if isinstance(project_data, dict):\n project_settings = project_data.setdefault('settings', dict())\n project_lsp_settings = project_settings.setdefault('LSP', dict())\n project_client_settings = project_lsp_settings.setdefault(config_name, dict())\n project_client_settings['enabled'] = True\n window.set_project_data(project_data)\n else:\n debug('non-dict returned in project_settings: ', project_data)\n\n\ndef disable_in_project(window, config_name: str) -> None:\n project_data = window.project_data()\n if isinstance(project_data, dict):\n project_settings = project_data.setdefault('settings', dict())\n project_lsp_settings = project_settings.setdefault('LSP', dict())\n project_client_settings = project_lsp_settings.setdefault(config_name, dict())\n project_client_settings['enabled'] = False\n window.set_project_data(project_data)\n else:\n debug('non-dict returned in project_settings: ', project_data)\n\n\ndef get_project_config(window: 'Any') -> dict:\n project_data = window.project_data() or dict()\n if isinstance(project_data, dict):\n project_settings = project_data.setdefault('settings', dict())\n project_lsp_settings = project_settings.setdefault('LSP', dict())\n return project_lsp_settings\n else:\n debug('non-dict returned in project_settings: ', project_data)\n return dict()\n", "path": "plugin/core/workspace.py"}]} | 1,706 | 765 |
gh_patches_debug_13955 | rasdani/github-patches | git_diff | saleor__saleor-3337 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The fetch vat rates button should not be a GET method
### What I'm trying to achieve
Not to allow GET methods to fetch vat rates.
### Steps to reproduce the problem
1. Go to configuration -> Taxes ;
2. The fetch tax rates button, is a GET button.
### What I expected to happen
Get a POST instead of a GET, which is safer against attacks.
### Describe a proposed solution
Drop the button link on the dashboard for a submit button or a modal.
</issue>
<code>
[start of saleor/dashboard/taxes/views.py]
1 import logging
2
3 from django.conf import settings
4 from django.contrib import messages
5 from django.contrib.auth.decorators import permission_required
6 from django.core.exceptions import ImproperlyConfigured
7 from django.core.management import call_command
8 from django.shortcuts import get_object_or_404, redirect
9 from django.template.response import TemplateResponse
10 from django.utils.translation import pgettext_lazy
11 from django_countries.fields import Country
12 from django_prices_vatlayer.models import VAT
13
14 from ...core import TaxRateType
15 from ...core.utils import get_paginator_items
16 from ...core.utils.taxes import get_taxes_for_country
17 from ...dashboard.taxes.filters import TaxFilter
18 from ...dashboard.taxes.forms import TaxesConfigurationForm
19 from ...dashboard.views import staff_member_required
20
21 logger = logging.getLogger(__name__)
22
23
24 @staff_member_required
25 def tax_list(request):
26 taxes = VAT.objects.order_by('country_code')
27 tax_filter = TaxFilter(request.GET, queryset=taxes)
28 taxes = get_paginator_items(
29 tax_filter.qs, settings.DASHBOARD_PAGINATE_BY, request.GET.get('page'))
30 ctx = {
31 'taxes': taxes, 'filter_set': tax_filter,
32 'is_empty': not tax_filter.queryset.exists()}
33 return TemplateResponse(request, 'dashboard/taxes/list.html', ctx)
34
35
36 @staff_member_required
37 def tax_details(request, country_code):
38 tax = get_object_or_404(VAT, country_code=country_code)
39 tax_rates = get_taxes_for_country(Country(country_code))
40 translations = dict(TaxRateType.CHOICES)
41 tax_rates = [
42 (translations.get(rate_name, rate_name), tax['value'])
43 for rate_name, tax in tax_rates.items()]
44 ctx = {'tax': tax, 'tax_rates': sorted(tax_rates)}
45 return TemplateResponse(request, 'dashboard/taxes/details.html', ctx)
46
47
48 @staff_member_required
49 @permission_required('site.manage_settings')
50 def configure_taxes(request):
51 site_settings = request.site.settings
52 taxes_form = TaxesConfigurationForm(
53 request.POST or None, instance=site_settings)
54 if taxes_form.is_valid():
55 taxes_form.save()
56 msg = pgettext_lazy('Dashboard message', 'Updated taxes settings')
57 messages.success(request, msg)
58 return redirect('dashboard:taxes')
59 ctx = {'site': site_settings, 'taxes_form': taxes_form}
60 return TemplateResponse(request, 'dashboard/taxes/form.html', ctx)
61
62
63 @staff_member_required
64 @permission_required('site.manage_settings')
65 def fetch_tax_rates(request):
66 try:
67 call_command('get_vat_rates')
68 msg = pgettext_lazy(
69 'Dashboard message', 'Tax rates updated successfully')
70 messages.success(request, msg)
71 except ImproperlyConfigured as exc:
72 logger.exception(exc)
73 msg = pgettext_lazy(
74 'Dashboard message',
75 'Could not fetch tax rates. '
76 'Make sure you have supplied a valid API Access Key.<br/>'
77 'Check the server logs for more information about this error.')
78 messages.warning(request, msg)
79 return redirect('dashboard:taxes')
80
[end of saleor/dashboard/taxes/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/dashboard/taxes/views.py b/saleor/dashboard/taxes/views.py
--- a/saleor/dashboard/taxes/views.py
+++ b/saleor/dashboard/taxes/views.py
@@ -8,6 +8,7 @@
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.utils.translation import pgettext_lazy
+from django.views.decorators.http import require_POST
from django_countries.fields import Country
from django_prices_vatlayer.models import VAT
@@ -61,6 +62,7 @@
@staff_member_required
+@require_POST
@permission_required('site.manage_settings')
def fetch_tax_rates(request):
try:
| {"golden_diff": "diff --git a/saleor/dashboard/taxes/views.py b/saleor/dashboard/taxes/views.py\n--- a/saleor/dashboard/taxes/views.py\n+++ b/saleor/dashboard/taxes/views.py\n@@ -8,6 +8,7 @@\n from django.shortcuts import get_object_or_404, redirect\n from django.template.response import TemplateResponse\n from django.utils.translation import pgettext_lazy\n+from django.views.decorators.http import require_POST\n from django_countries.fields import Country\n from django_prices_vatlayer.models import VAT\n \n@@ -61,6 +62,7 @@\n \n \n @staff_member_required\n+@require_POST\n @permission_required('site.manage_settings')\n def fetch_tax_rates(request):\n try:\n", "issue": "The fetch vat rates button should not be a GET method\n### What I'm trying to achieve\r\nNot to allow GET methods to fetch vat rates.\r\n\r\n### Steps to reproduce the problem\r\n1. Go to configuration -> Taxes ;\r\n2. The fetch tax rates button, is a GET button.\r\n\r\n### What I expected to happen\r\nGet a POST instead of a GET, which is safer against attacks.\r\n\r\n### Describe a proposed solution\r\nDrop the button link on the dashboard for a submit button or a modal.\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import permission_required\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.management import call_command\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import pgettext_lazy\nfrom django_countries.fields import Country\nfrom django_prices_vatlayer.models import VAT\n\nfrom ...core import TaxRateType\nfrom ...core.utils import get_paginator_items\nfrom ...core.utils.taxes import get_taxes_for_country\nfrom ...dashboard.taxes.filters import TaxFilter\nfrom ...dashboard.taxes.forms import TaxesConfigurationForm\nfrom ...dashboard.views import staff_member_required\n\nlogger = logging.getLogger(__name__)\n\n\n@staff_member_required\ndef tax_list(request):\n taxes = VAT.objects.order_by('country_code')\n tax_filter = TaxFilter(request.GET, queryset=taxes)\n taxes = get_paginator_items(\n tax_filter.qs, settings.DASHBOARD_PAGINATE_BY, request.GET.get('page'))\n ctx = {\n 'taxes': taxes, 'filter_set': tax_filter,\n 'is_empty': not tax_filter.queryset.exists()}\n return TemplateResponse(request, 'dashboard/taxes/list.html', ctx)\n\n\n@staff_member_required\ndef tax_details(request, country_code):\n tax = get_object_or_404(VAT, country_code=country_code)\n tax_rates = get_taxes_for_country(Country(country_code))\n translations = dict(TaxRateType.CHOICES)\n tax_rates = [\n (translations.get(rate_name, rate_name), tax['value'])\n for rate_name, tax in tax_rates.items()]\n ctx = {'tax': tax, 'tax_rates': sorted(tax_rates)}\n return TemplateResponse(request, 'dashboard/taxes/details.html', ctx)\n\n\n@staff_member_required\n@permission_required('site.manage_settings')\ndef configure_taxes(request):\n site_settings = request.site.settings\n taxes_form = TaxesConfigurationForm(\n request.POST or None, instance=site_settings)\n if taxes_form.is_valid():\n taxes_form.save()\n msg = pgettext_lazy('Dashboard message', 'Updated taxes settings')\n messages.success(request, msg)\n return redirect('dashboard:taxes')\n ctx = {'site': site_settings, 'taxes_form': taxes_form}\n return TemplateResponse(request, 'dashboard/taxes/form.html', ctx)\n\n\n@staff_member_required\n@permission_required('site.manage_settings')\ndef fetch_tax_rates(request):\n try:\n call_command('get_vat_rates')\n msg = pgettext_lazy(\n 'Dashboard message', 'Tax rates updated successfully')\n messages.success(request, msg)\n except ImproperlyConfigured as exc:\n logger.exception(exc)\n msg = pgettext_lazy(\n 'Dashboard message',\n 'Could not fetch tax rates. '\n 'Make sure you have supplied a valid API Access Key.<br/>'\n 'Check the server logs for more information about this error.')\n messages.warning(request, msg)\n return redirect('dashboard:taxes')\n", "path": "saleor/dashboard/taxes/views.py"}]} | 1,450 | 153 |
gh_patches_debug_1474 | rasdani/github-patches | git_diff | ray-project__ray-9429 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[rllib] MARWIL tuned cartpole example (and my own experiments) produce nan rewards only.
<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->
### What is the problem? + Reproduction
I have a custom example that produces offline data and picks it up with MARWIL for training. I observed that I get `nan` reward values for my example every time, so I went a step back and used your cartpole example:
https://github.com/ray-project/ray/blob/cd5a207d69cdaf05b47d956c18e89d928585eec7/rllib/tuned_examples/marwil/cartpole-marwil.yaml
I'm following the exact steps there, i.e. first run
```
./train.py --run=PPO --env=CartPole-v0 \
--stop='{"timesteps_total": 50000}' \
--config='{"output": "/tmp/out", "batch_mode": "complete_episodes"}'
```
followed by
```
rllib train -f cartpole-marwil.yaml
```
I did this both on my currently preferred stable version `0.8.5`, as well as on the `0.9.0.dev0` wheel. The result is this:
```
== Status ==
Memory usage on this node: 19.4/32.0 GiB
Using FIFO scheduling algorithm.
Resources requested: 0/12 CPUs, 0/0 GPUs, 0.0/9.96 GiB heap, 0.0/3.42 GiB objects
Result logdir: /Users/maxpumperla/ray_results/cartpole-marwil
Number of trials: 2 (2 TERMINATED)
+--------------------------------+------------+-------+--------+--------+------------------+--------+----------+
| Trial name | status | loc | beta | iter | total time (s) | ts | reward |
|--------------------------------+------------+-------+--------+--------+------------------+--------+----------|
| MARWIL_CartPole-v0_7af06_00000 | TERMINATED | | 0 | 2206 | 58.5661 | 500007 | nan |
| MARWIL_CartPole-v0_7af06_00001 | TERMINATED | | 1 | 2248 | 58.6117 | 500286 | nan |
+--------------------------------+------------+-------+--------+--------+------------------+--------+----------+
```
Also, I've noticed that your MARWIL unit test is a pure smoke test and doesn't check reward values, but I didn't run that locally. Maybe it produces nan values as well.
In any case I'd appreciate any input here, as we'd love to use MARWIL for our "real" use case, in which we see the same behaviour.
</issue>
<code>
[start of rllib/examples/custom_loss.py]
1 """Example of using custom_loss() with an imitation learning loss.
2
3 The default input file is too small to learn a good policy, but you can
4 generate new experiences for IL training as follows:
5
6 To generate experiences:
7 $ ./train.py --run=PG --config='{"output": "/tmp/cartpole"}' --env=CartPole-v0
8
9 To train on experiences with joint PG + IL loss:
10 $ python custom_loss.py --input-files=/tmp/cartpole
11 """
12
13 import argparse
14 from pathlib import Path
15 import os
16
17 import ray
18 from ray import tune
19 from ray.rllib.examples.models.custom_loss_model import CustomLossModel, \
20 TorchCustomLossModel
21 from ray.rllib.models import ModelCatalog
22 from ray.rllib.utils.framework import try_import_tf
23
24 tf1, tf, tfv = try_import_tf()
25
26 parser = argparse.ArgumentParser()
27 parser.add_argument("--torch", action="store_true")
28 parser.add_argument("--stop-iters", type=int, default=200)
29 parser.add_argument(
30 "--input-files",
31 type=str,
32 default=os.path.join(
33 os.path.dirname(os.path.abspath(__file__)),
34 "../tests/data/cartpole_small"))
35
36 if __name__ == "__main__":
37 ray.init()
38 args = parser.parse_args()
39
40 # Bazel makes it hard to find files specified in `args` (and `data`).
41 # Look for them here.
42 if not os.path.exists(args.input_files):
43 # This script runs in the ray/rllib/examples dir.
44 rllib_dir = Path(__file__).parent.parent
45 input_dir = rllib_dir.absolute().joinpath(args.input_files)
46 args.input_files = str(input_dir)
47
48 ModelCatalog.register_custom_model(
49 "custom_loss", TorchCustomLossModel if args.torch else CustomLossModel)
50
51 config = {
52 "env": "CartPole-v0",
53 "num_workers": 0,
54 "model": {
55 "custom_model": "custom_loss",
56 "custom_model_config": {
57 "input_files": args.input_files,
58 },
59 },
60 "framework": "torch" if args.torch else "tf",
61 }
62
63 stop = {
64 "training_iteration": args.stop_iters,
65 }
66
67 tune.run("PG", config=config, stop=stop)
68
[end of rllib/examples/custom_loss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rllib/examples/custom_loss.py b/rllib/examples/custom_loss.py
--- a/rllib/examples/custom_loss.py
+++ b/rllib/examples/custom_loss.py
@@ -31,7 +31,7 @@
type=str,
default=os.path.join(
os.path.dirname(os.path.abspath(__file__)),
- "../tests/data/cartpole_small"))
+ "../tests/data/cartpole/small"))
if __name__ == "__main__":
ray.init()
| {"golden_diff": "diff --git a/rllib/examples/custom_loss.py b/rllib/examples/custom_loss.py\n--- a/rllib/examples/custom_loss.py\n+++ b/rllib/examples/custom_loss.py\n@@ -31,7 +31,7 @@\n type=str,\n default=os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n- \"../tests/data/cartpole_small\"))\n+ \"../tests/data/cartpole/small\"))\n \n if __name__ == \"__main__\":\n ray.init()\n", "issue": "[rllib] MARWIL tuned cartpole example (and my own experiments) produce nan rewards only.\n<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->\r\n\r\n### What is the problem? + Reproduction\r\n\r\nI have a custom example that produces offline data and picks it up with MARWIL for training. I observed that I get `nan` reward values for my example every time, so I went a step back and used your cartpole example:\r\n\r\nhttps://github.com/ray-project/ray/blob/cd5a207d69cdaf05b47d956c18e89d928585eec7/rllib/tuned_examples/marwil/cartpole-marwil.yaml\r\n\r\nI'm following the exact steps there, i.e. first run \r\n\r\n```\r\n./train.py --run=PPO --env=CartPole-v0 \\\r\n --stop='{\"timesteps_total\": 50000}' \\\r\n --config='{\"output\": \"/tmp/out\", \"batch_mode\": \"complete_episodes\"}'\r\n```\r\n\r\nfollowed by \r\n\r\n```\r\nrllib train -f cartpole-marwil.yaml\r\n```\r\n\r\nI did this both on my currently preferred stable version `0.8.5`, as well as on the `0.9.0.dev0` wheel. The result is this:\r\n\r\n```\r\n== Status ==\r\nMemory usage on this node: 19.4/32.0 GiB\r\nUsing FIFO scheduling algorithm.\r\nResources requested: 0/12 CPUs, 0/0 GPUs, 0.0/9.96 GiB heap, 0.0/3.42 GiB objects\r\nResult logdir: /Users/maxpumperla/ray_results/cartpole-marwil\r\nNumber of trials: 2 (2 TERMINATED)\r\n+--------------------------------+------------+-------+--------+--------+------------------+--------+----------+\r\n| Trial name | status | loc | beta | iter | total time (s) | ts | reward |\r\n|--------------------------------+------------+-------+--------+--------+------------------+--------+----------|\r\n| MARWIL_CartPole-v0_7af06_00000 | TERMINATED | | 0 | 2206 | 58.5661 | 500007 | nan |\r\n| MARWIL_CartPole-v0_7af06_00001 | TERMINATED | | 1 | 2248 | 58.6117 | 500286 | nan |\r\n+--------------------------------+------------+-------+--------+--------+------------------+--------+----------+\r\n```\r\n\r\nAlso, I've noticed that your MARWIL unit test is a pure smoke test and doesn't check reward values, but I didn't run that locally. Maybe it produces nan values as well.\r\n\r\nIn any case I'd appreciate any input here, as we'd love to use MARWIL for our \"real\" use case, in which we see the same behaviour.\n", "before_files": [{"content": "\"\"\"Example of using custom_loss() with an imitation learning loss.\n\nThe default input file is too small to learn a good policy, but you can\ngenerate new experiences for IL training as follows:\n\nTo generate experiences:\n$ ./train.py --run=PG --config='{\"output\": \"/tmp/cartpole\"}' --env=CartPole-v0\n\nTo train on experiences with joint PG + IL loss:\n$ python custom_loss.py --input-files=/tmp/cartpole\n\"\"\"\n\nimport argparse\nfrom pathlib import Path\nimport os\n\nimport ray\nfrom ray import tune\nfrom ray.rllib.examples.models.custom_loss_model import CustomLossModel, \\\n TorchCustomLossModel\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.utils.framework import try_import_tf\n\ntf1, tf, tfv = try_import_tf()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--torch\", action=\"store_true\")\nparser.add_argument(\"--stop-iters\", type=int, default=200)\nparser.add_argument(\n \"--input-files\",\n type=str,\n default=os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"../tests/data/cartpole_small\"))\n\nif __name__ == \"__main__\":\n ray.init()\n args = parser.parse_args()\n\n # Bazel makes it hard to find files specified in `args` (and `data`).\n # Look for them here.\n if not os.path.exists(args.input_files):\n # This script runs in the ray/rllib/examples dir.\n rllib_dir = Path(__file__).parent.parent\n input_dir = rllib_dir.absolute().joinpath(args.input_files)\n args.input_files = str(input_dir)\n\n ModelCatalog.register_custom_model(\n \"custom_loss\", TorchCustomLossModel if args.torch else CustomLossModel)\n\n config = {\n \"env\": \"CartPole-v0\",\n \"num_workers\": 0,\n \"model\": {\n \"custom_model\": \"custom_loss\",\n \"custom_model_config\": {\n \"input_files\": args.input_files,\n },\n },\n \"framework\": \"torch\" if args.torch else \"tf\",\n }\n\n stop = {\n \"training_iteration\": args.stop_iters,\n }\n\n tune.run(\"PG\", config=config, stop=stop)\n", "path": "rllib/examples/custom_loss.py"}]} | 1,823 | 101 |
gh_patches_debug_57313 | rasdani/github-patches | git_diff | vllm-project__vllm-3129 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[v0.3.3] Release Tracker
**ETA**: Feb 29th - Mar 1st
## Major changes
* StarCoder2 support
* Performance optimization and LoRA support for Gemma
* Performance optimization for MoE kernel
* 2/3/8-bit GPTQ support
* [Experimental] AWS Inferentia2 support
## PRs to be merged before the release
- [x] #2330 #2223
- [ ] ~~#2761~~
- [x] #2819
- [x] #3087 #3099
- [x] #3089
</issue>
<code>
[start of vllm/__init__.py]
1 """vLLM: a high-throughput and memory-efficient inference engine for LLMs"""
2
3 from vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs
4 from vllm.engine.async_llm_engine import AsyncLLMEngine
5 from vllm.engine.llm_engine import LLMEngine
6 from vllm.engine.ray_utils import initialize_cluster
7 from vllm.entrypoints.llm import LLM
8 from vllm.outputs import CompletionOutput, RequestOutput
9 from vllm.sampling_params import SamplingParams
10
11 __version__ = "0.3.2"
12
13 __all__ = [
14 "LLM",
15 "SamplingParams",
16 "RequestOutput",
17 "CompletionOutput",
18 "LLMEngine",
19 "EngineArgs",
20 "AsyncLLMEngine",
21 "AsyncEngineArgs",
22 "initialize_cluster",
23 ]
24
[end of vllm/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vllm/__init__.py b/vllm/__init__.py
--- a/vllm/__init__.py
+++ b/vllm/__init__.py
@@ -8,7 +8,7 @@
from vllm.outputs import CompletionOutput, RequestOutput
from vllm.sampling_params import SamplingParams
-__version__ = "0.3.2"
+__version__ = "0.3.3"
__all__ = [
"LLM",
| {"golden_diff": "diff --git a/vllm/__init__.py b/vllm/__init__.py\n--- a/vllm/__init__.py\n+++ b/vllm/__init__.py\n@@ -8,7 +8,7 @@\n from vllm.outputs import CompletionOutput, RequestOutput\n from vllm.sampling_params import SamplingParams\n \n-__version__ = \"0.3.2\"\n+__version__ = \"0.3.3\"\n \n __all__ = [\n \"LLM\",\n", "issue": "[v0.3.3] Release Tracker\n**ETA**: Feb 29th - Mar 1st\r\n\r\n## Major changes\r\n\r\n* StarCoder2 support\r\n* Performance optimization and LoRA support for Gemma\r\n* Performance optimization for MoE kernel\r\n* 2/3/8-bit GPTQ support\r\n* [Experimental] AWS Inferentia2 support\r\n\r\n## PRs to be merged before the release\r\n\r\n- [x] #2330 #2223\r\n- [ ] ~~#2761~~\r\n- [x] #2819 \r\n- [x] #3087 #3099\r\n- [x] #3089 \n", "before_files": [{"content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_cluster\nfrom vllm.entrypoints.llm import LLM\nfrom vllm.outputs import CompletionOutput, RequestOutput\nfrom vllm.sampling_params import SamplingParams\n\n__version__ = \"0.3.2\"\n\n__all__ = [\n \"LLM\",\n \"SamplingParams\",\n \"RequestOutput\",\n \"CompletionOutput\",\n \"LLMEngine\",\n \"EngineArgs\",\n \"AsyncLLMEngine\",\n \"AsyncEngineArgs\",\n \"initialize_cluster\",\n]\n", "path": "vllm/__init__.py"}]} | 905 | 109 |
gh_patches_debug_33139 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-2535 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Baggage span processor - key predicate
This issue is to track adding a method of selecting what baggage key entries should be copied.
Feedback in the JS contrib PR was to allow a user-provided predicate function. This puts the responsibility on the user to ensure sensitive baggage keys are not copied while also not prescribing how that is determined.
- https://github.com/open-telemetry/opentelemetry-js-contrib/issues/2166
We had a similar feedback in the .NET contrib project but thought it was more complicated than just using a set of prefixes so created an issue to continue the discussion. The plain processor that copies all baggage entries (like using `*` in your example) is likely to be accepted first.
- https://github.com/open-telemetry/opentelemetry-dotnet-contrib/issues/1695
</issue>
<code>
[start of processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Optional
16
17 from opentelemetry.baggage import get_all as get_all_baggage
18 from opentelemetry.context import Context
19 from opentelemetry.sdk.trace.export import SpanProcessor
20 from opentelemetry.trace import Span
21
22
23 class BaggageSpanProcessor(SpanProcessor):
24 """
25 The BaggageSpanProcessor reads entries stored in Baggage
26 from the parent context and adds the baggage entries' keys and
27 values to the span as attributes on span start.
28
29 Add this span processor to a tracer provider.
30
31 Keys and values added to Baggage will appear on subsequent child
32 spans for a trace within this service *and* be propagated to external
33 services in accordance with any configured propagation formats
34 configured. If the external services also have a Baggage span
35 processor, the keys and values will appear in those child spans as
36 well.
37
38 ⚠ Warning ⚠️
39
40 Do not put sensitive information in Baggage.
41
42 To repeat: a consequence of adding data to Baggage is that the keys and
43 values will appear in all outgoing HTTP headers from the application.
44
45 """
46
47 def __init__(self) -> None:
48 pass
49
50 def on_start(
51 self, span: "Span", parent_context: Optional[Context] = None
52 ) -> None:
53 baggage = get_all_baggage(parent_context)
54 for key, value in baggage.items():
55 span.set_attribute(key, value)
56
[end of processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py]
[start of processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # pylint: disable=import-error
16
17 from .processor import BaggageSpanProcessor
18 from .version import __version__
19
20 __all__ = ["BaggageSpanProcessor", "__version__"]
21
[end of processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py
--- a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py
+++ b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py
@@ -14,7 +14,7 @@
# pylint: disable=import-error
-from .processor import BaggageSpanProcessor
+from .processor import ALLOW_ALL_BAGGAGE_KEYS, BaggageSpanProcessor
from .version import __version__
-__all__ = ["BaggageSpanProcessor", "__version__"]
+__all__ = ["ALLOW_ALL_BAGGAGE_KEYS", "BaggageSpanProcessor", "__version__"]
diff --git a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py
--- a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py
+++ b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py
@@ -12,13 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Optional
+from typing import Callable, Optional
from opentelemetry.baggage import get_all as get_all_baggage
from opentelemetry.context import Context
from opentelemetry.sdk.trace.export import SpanProcessor
from opentelemetry.trace import Span
+# A BaggageKeyPredicate is a function that takes a baggage key and returns a boolean
+BaggageKeyPredicateT = Callable[[str], bool]
+
+# A BaggageKeyPredicate that always returns True, allowing all baggage keys to be added to spans
+ALLOW_ALL_BAGGAGE_KEYS: BaggageKeyPredicateT = lambda _: True
+
class BaggageSpanProcessor(SpanProcessor):
"""
@@ -44,12 +50,13 @@
"""
- def __init__(self) -> None:
- pass
+ def __init__(self, baggage_key_predicate: BaggageKeyPredicateT) -> None:
+ self._baggage_key_predicate = baggage_key_predicate
def on_start(
self, span: "Span", parent_context: Optional[Context] = None
) -> None:
baggage = get_all_baggage(parent_context)
for key, value in baggage.items():
- span.set_attribute(key, value)
+ if self._baggage_key_predicate(key):
+ span.set_attribute(key, value)
| {"golden_diff": "diff --git a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py\n--- a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py\n+++ b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py\n@@ -14,7 +14,7 @@\n \n # pylint: disable=import-error\n \n-from .processor import BaggageSpanProcessor\n+from .processor import ALLOW_ALL_BAGGAGE_KEYS, BaggageSpanProcessor\n from .version import __version__\n \n-__all__ = [\"BaggageSpanProcessor\", \"__version__\"]\n+__all__ = [\"ALLOW_ALL_BAGGAGE_KEYS\", \"BaggageSpanProcessor\", \"__version__\"]\ndiff --git a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py\n--- a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py\n+++ b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py\n@@ -12,13 +12,19 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from typing import Optional\n+from typing import Callable, Optional\n \n from opentelemetry.baggage import get_all as get_all_baggage\n from opentelemetry.context import Context\n from opentelemetry.sdk.trace.export import SpanProcessor\n from opentelemetry.trace import Span\n \n+# A BaggageKeyPredicate is a function that takes a baggage key and returns a boolean\n+BaggageKeyPredicateT = Callable[[str], bool]\n+\n+# A BaggageKeyPredicate that always returns True, allowing all baggage keys to be added to spans\n+ALLOW_ALL_BAGGAGE_KEYS: BaggageKeyPredicateT = lambda _: True\n+\n \n class BaggageSpanProcessor(SpanProcessor):\n \"\"\"\n@@ -44,12 +50,13 @@\n \n \"\"\"\n \n- def __init__(self) -> None:\n- pass\n+ def __init__(self, baggage_key_predicate: BaggageKeyPredicateT) -> None:\n+ self._baggage_key_predicate = baggage_key_predicate\n \n def on_start(\n self, span: \"Span\", parent_context: Optional[Context] = None\n ) -> None:\n baggage = get_all_baggage(parent_context)\n for key, value in baggage.items():\n- span.set_attribute(key, value)\n+ if self._baggage_key_predicate(key):\n+ span.set_attribute(key, value)\n", "issue": "Baggage span processor - key predicate\nThis issue is to track adding a method of selecting what baggage key entries should be copied.\r\n\r\nFeedback in the JS contrib PR was to allow a user-provided predicate function. This puts the responsibility on the user to ensure sensitive baggage keys are not copied while also not prescribing how that is determined.\r\n- https://github.com/open-telemetry/opentelemetry-js-contrib/issues/2166\r\n\r\n\r\nWe had a similar feedback in the .NET contrib project but thought it was more complicated than just using a set of prefixes so created an issue to continue the discussion. The plain processor that copies all baggage entries (like using `*` in your example) is likely to be accepted first.\r\n- https://github.com/open-telemetry/opentelemetry-dotnet-contrib/issues/1695\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional\n\nfrom opentelemetry.baggage import get_all as get_all_baggage\nfrom opentelemetry.context import Context\nfrom opentelemetry.sdk.trace.export import SpanProcessor\nfrom opentelemetry.trace import Span\n\n\nclass BaggageSpanProcessor(SpanProcessor):\n \"\"\"\n The BaggageSpanProcessor reads entries stored in Baggage\n from the parent context and adds the baggage entries' keys and\n values to the span as attributes on span start.\n\n Add this span processor to a tracer provider.\n\n Keys and values added to Baggage will appear on subsequent child\n spans for a trace within this service *and* be propagated to external\n services in accordance with any configured propagation formats\n configured. If the external services also have a Baggage span\n processor, the keys and values will appear in those child spans as\n well.\n\n \u26a0 Warning \u26a0\ufe0f\n\n Do not put sensitive information in Baggage.\n\n To repeat: a consequence of adding data to Baggage is that the keys and\n values will appear in all outgoing HTTP headers from the application.\n\n \"\"\"\n\n def __init__(self) -> None:\n pass\n\n def on_start(\n self, span: \"Span\", parent_context: Optional[Context] = None\n ) -> None:\n baggage = get_all_baggage(parent_context)\n for key, value in baggage.items():\n span.set_attribute(key, value)\n", "path": "processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: disable=import-error\n\nfrom .processor import BaggageSpanProcessor\nfrom .version import __version__\n\n__all__ = [\"BaggageSpanProcessor\", \"__version__\"]\n", "path": "processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py"}]} | 1,530 | 618 |
gh_patches_debug_19161 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-5810 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dm - use national sites
Is it possible to use the national sites for dm stores instead of the German one? The format is `dm.[country code]` for all countries except for Bulgaria, Bosnia and Italy (which use `dm-drogeriemarkt.[country code]`) and Slovakia (`mojadm.sk`).
</issue>
<code>
[start of locations/spiders/dm.py]
1 import scrapy
2
3 from locations.categories import Categories, apply_category
4 from locations.dict_parser import DictParser
5 from locations.hours import DAYS, OpeningHours
6
7
8 class DmSpider(scrapy.Spider):
9 name = "dm"
10 item_attributes = {"brand": "dm", "brand_wikidata": "Q266572"}
11 allowed_domains = ["store-data-service.services.dmtech.com"]
12 start_urls = ["https://store-data-service.services.dmtech.com/stores/bbox/89.999,-179.999,-89.999,179.999"]
13
14 @staticmethod
15 def parse_hours(store_hours: [dict]) -> OpeningHours:
16 opening_hours = OpeningHours()
17
18 for store_day in store_hours:
19 for times in store_day["timeRanges"]:
20 open_time = times["opening"]
21 close_time = times["closing"]
22
23 opening_hours.add_range(DAYS[store_day["weekDay"] - 1], open_time, close_time)
24
25 return opening_hours
26
27 def parse(self, response, **kwargs):
28 for location in response.json()["stores"]:
29 location["address"]["street_address"] = location["address"].pop("street")
30 location["address"]["country"] = location["countryCode"]
31 location["name"] = location["address"].get("name")
32 item = DictParser.parse(location)
33 item["website"] = f'https://www.dm.de/store{location["storeUrlPath"]}'
34 item["extras"]["check_date"] = location["updateTimeStamp"]
35 item["opening_hours"] = self.parse_hours(location["openingHours"])
36
37 apply_category(Categories.SHOP_CHEMIST, item)
38
39 yield item
40
[end of locations/spiders/dm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/dm.py b/locations/spiders/dm.py
--- a/locations/spiders/dm.py
+++ b/locations/spiders/dm.py
@@ -30,7 +30,14 @@
location["address"]["country"] = location["countryCode"]
location["name"] = location["address"].get("name")
item = DictParser.parse(location)
- item["website"] = f'https://www.dm.de/store{location["storeUrlPath"]}'
+ if location["countryCode"] in ["BG", "BA", "IT"]:
+ item[
+ "website"
+ ] = f'https://www.dm-drogeriemarkt.{location["countryCode"].lower()}/store{location["storeUrlPath"]}'
+ elif location["countryCode"] == "SK":
+ item["website"] = f'https://www.mojadm.sk/store{location["storeUrlPath"]}'
+ else:
+ item["website"] = f'https://www.dm.{location["countryCode"].lower()}/store{location["storeUrlPath"]}'
item["extras"]["check_date"] = location["updateTimeStamp"]
item["opening_hours"] = self.parse_hours(location["openingHours"])
| {"golden_diff": "diff --git a/locations/spiders/dm.py b/locations/spiders/dm.py\n--- a/locations/spiders/dm.py\n+++ b/locations/spiders/dm.py\n@@ -30,7 +30,14 @@\n location[\"address\"][\"country\"] = location[\"countryCode\"]\n location[\"name\"] = location[\"address\"].get(\"name\")\n item = DictParser.parse(location)\n- item[\"website\"] = f'https://www.dm.de/store{location[\"storeUrlPath\"]}'\n+ if location[\"countryCode\"] in [\"BG\", \"BA\", \"IT\"]:\n+ item[\n+ \"website\"\n+ ] = f'https://www.dm-drogeriemarkt.{location[\"countryCode\"].lower()}/store{location[\"storeUrlPath\"]}'\n+ elif location[\"countryCode\"] == \"SK\":\n+ item[\"website\"] = f'https://www.mojadm.sk/store{location[\"storeUrlPath\"]}'\n+ else:\n+ item[\"website\"] = f'https://www.dm.{location[\"countryCode\"].lower()}/store{location[\"storeUrlPath\"]}'\n item[\"extras\"][\"check_date\"] = location[\"updateTimeStamp\"]\n item[\"opening_hours\"] = self.parse_hours(location[\"openingHours\"])\n", "issue": "dm - use national sites\nIs it possible to use the national sites for dm stores instead of the German one? The format is `dm.[country code]` for all countries except for Bulgaria, Bosnia and Italy (which use `dm-drogeriemarkt.[country code]`) and Slovakia (`mojadm.sk`).\n", "before_files": [{"content": "import scrapy\n\nfrom locations.categories import Categories, apply_category\nfrom locations.dict_parser import DictParser\nfrom locations.hours import DAYS, OpeningHours\n\n\nclass DmSpider(scrapy.Spider):\n name = \"dm\"\n item_attributes = {\"brand\": \"dm\", \"brand_wikidata\": \"Q266572\"}\n allowed_domains = [\"store-data-service.services.dmtech.com\"]\n start_urls = [\"https://store-data-service.services.dmtech.com/stores/bbox/89.999,-179.999,-89.999,179.999\"]\n\n @staticmethod\n def parse_hours(store_hours: [dict]) -> OpeningHours:\n opening_hours = OpeningHours()\n\n for store_day in store_hours:\n for times in store_day[\"timeRanges\"]:\n open_time = times[\"opening\"]\n close_time = times[\"closing\"]\n\n opening_hours.add_range(DAYS[store_day[\"weekDay\"] - 1], open_time, close_time)\n\n return opening_hours\n\n def parse(self, response, **kwargs):\n for location in response.json()[\"stores\"]:\n location[\"address\"][\"street_address\"] = location[\"address\"].pop(\"street\")\n location[\"address\"][\"country\"] = location[\"countryCode\"]\n location[\"name\"] = location[\"address\"].get(\"name\")\n item = DictParser.parse(location)\n item[\"website\"] = f'https://www.dm.de/store{location[\"storeUrlPath\"]}'\n item[\"extras\"][\"check_date\"] = location[\"updateTimeStamp\"]\n item[\"opening_hours\"] = self.parse_hours(location[\"openingHours\"])\n\n apply_category(Categories.SHOP_CHEMIST, item)\n\n yield item\n", "path": "locations/spiders/dm.py"}]} | 1,045 | 270 |
gh_patches_debug_7973 | rasdani/github-patches | git_diff | celery__celery-5870 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Continuous memory leak
There is a memory leak in the parent process of Celery's worker.
It is not a child process executing a task.
It happens suddenly every few days.
Unless you stop Celery, it consumes server memory in tens of hours.
This problem happens at least in Celery 4.1, and it also occurs in Celery 4.2.
Celery is running on Ubuntu 16 and brokers use RabbitMQ.

</issue>
<code>
[start of celery/events/receiver.py]
1 """Event receiver implementation."""
2 from __future__ import absolute_import, unicode_literals
3
4 import time
5 from operator import itemgetter
6
7 from kombu import Queue
8 from kombu.connection import maybe_channel
9 from kombu.mixins import ConsumerMixin
10
11 from celery import uuid
12 from celery.app import app_or_default
13 from celery.utils.time import adjust_timestamp
14
15 from .event import get_exchange
16
17 __all__ = ('EventReceiver',)
18
19 CLIENT_CLOCK_SKEW = -1
20
21 _TZGETTER = itemgetter('utcoffset', 'timestamp')
22
23
24 class EventReceiver(ConsumerMixin):
25 """Capture events.
26
27 Arguments:
28 connection (kombu.Connection): Connection to the broker.
29 handlers (Mapping[Callable]): Event handlers.
30 This is a map of event type names and their handlers.
31 The special handler `"*"` captures all events that don't have a
32 handler.
33 """
34
35 app = None
36
37 def __init__(self, channel, handlers=None, routing_key='#',
38 node_id=None, app=None, queue_prefix=None,
39 accept=None, queue_ttl=None, queue_expires=None):
40 self.app = app_or_default(app or self.app)
41 self.channel = maybe_channel(channel)
42 self.handlers = {} if handlers is None else handlers
43 self.routing_key = routing_key
44 self.node_id = node_id or uuid()
45 self.queue_prefix = queue_prefix or self.app.conf.event_queue_prefix
46 self.exchange = get_exchange(
47 self.connection or self.app.connection_for_write(),
48 name=self.app.conf.event_exchange)
49 if queue_ttl is None:
50 queue_ttl = self.app.conf.event_queue_ttl
51 if queue_expires is None:
52 queue_expires = self.app.conf.event_queue_expires
53 self.queue = Queue(
54 '.'.join([self.queue_prefix, self.node_id]),
55 exchange=self.exchange,
56 routing_key=self.routing_key,
57 auto_delete=True, durable=False,
58 message_ttl=queue_ttl,
59 expires=queue_expires,
60 )
61 self.clock = self.app.clock
62 self.adjust_clock = self.clock.adjust
63 self.forward_clock = self.clock.forward
64 if accept is None:
65 accept = {self.app.conf.event_serializer, 'json'}
66 self.accept = accept
67
68 def process(self, type, event):
69 """Process event by dispatching to configured handler."""
70 handler = self.handlers.get(type) or self.handlers.get('*')
71 handler and handler(event)
72
73 def get_consumers(self, Consumer, channel):
74 return [Consumer(queues=[self.queue],
75 callbacks=[self._receive], no_ack=True,
76 accept=self.accept)]
77
78 def on_consume_ready(self, connection, channel, consumers,
79 wakeup=True, **kwargs):
80 if wakeup:
81 self.wakeup_workers(channel=channel)
82
83 def itercapture(self, limit=None, timeout=None, wakeup=True):
84 return self.consume(limit=limit, timeout=timeout, wakeup=wakeup)
85
86 def capture(self, limit=None, timeout=None, wakeup=True):
87 """Open up a consumer capturing events.
88
89 This has to run in the main process, and it will never stop
90 unless :attr:`EventDispatcher.should_stop` is set to True, or
91 forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
92 """
93 return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup))
94
95 def wakeup_workers(self, channel=None):
96 self.app.control.broadcast('heartbeat',
97 connection=self.connection,
98 channel=channel)
99
100 def event_from_message(self, body, localize=True,
101 now=time.time, tzfields=_TZGETTER,
102 adjust_timestamp=adjust_timestamp,
103 CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW):
104 type = body['type']
105 if type == 'task-sent':
106 # clients never sync so cannot use their clock value
107 _c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW
108 self.adjust_clock(_c)
109 else:
110 try:
111 clock = body['clock']
112 except KeyError:
113 body['clock'] = self.forward_clock()
114 else:
115 self.adjust_clock(clock)
116
117 if localize:
118 try:
119 offset, timestamp = tzfields(body)
120 except KeyError:
121 pass
122 else:
123 body['timestamp'] = adjust_timestamp(timestamp, offset)
124 body['local_received'] = now()
125 return type, body
126
127 def _receive(self, body, message, list=list, isinstance=isinstance):
128 if isinstance(body, list): # celery 4.0: List of events
129 process, from_message = self.process, self.event_from_message
130 [process(*from_message(event)) for event in body]
131 else:
132 self.process(*self.event_from_message(body))
133
134 @property
135 def connection(self):
136 return self.channel.connection.client if self.channel else None
137
[end of celery/events/receiver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/celery/events/receiver.py b/celery/events/receiver.py
--- a/celery/events/receiver.py
+++ b/celery/events/receiver.py
@@ -90,7 +90,8 @@
unless :attr:`EventDispatcher.should_stop` is set to True, or
forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
"""
- return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup))
+ for _ in self.consume(limit=limit, timeout=timeout, wakeup=wakeup):
+ pass
def wakeup_workers(self, channel=None):
self.app.control.broadcast('heartbeat',
| {"golden_diff": "diff --git a/celery/events/receiver.py b/celery/events/receiver.py\n--- a/celery/events/receiver.py\n+++ b/celery/events/receiver.py\n@@ -90,7 +90,8 @@\n unless :attr:`EventDispatcher.should_stop` is set to True, or\n forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.\n \"\"\"\n- return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup))\n+ for _ in self.consume(limit=limit, timeout=timeout, wakeup=wakeup):\n+ pass\n \n def wakeup_workers(self, channel=None):\n self.app.control.broadcast('heartbeat',\n", "issue": "Continuous memory leak\nThere is a memory leak in the parent process of Celery's worker.\nIt is not a child process executing a task.\nIt happens suddenly every few days.\nUnless you stop Celery, it consumes server memory in tens of hours.\n\nThis problem happens at least in Celery 4.1, and it also occurs in Celery 4.2.\nCelery is running on Ubuntu 16 and brokers use RabbitMQ.\n\n\n\n\n", "before_files": [{"content": "\"\"\"Event receiver implementation.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport time\nfrom operator import itemgetter\n\nfrom kombu import Queue\nfrom kombu.connection import maybe_channel\nfrom kombu.mixins import ConsumerMixin\n\nfrom celery import uuid\nfrom celery.app import app_or_default\nfrom celery.utils.time import adjust_timestamp\n\nfrom .event import get_exchange\n\n__all__ = ('EventReceiver',)\n\nCLIENT_CLOCK_SKEW = -1\n\n_TZGETTER = itemgetter('utcoffset', 'timestamp')\n\n\nclass EventReceiver(ConsumerMixin):\n \"\"\"Capture events.\n\n Arguments:\n connection (kombu.Connection): Connection to the broker.\n handlers (Mapping[Callable]): Event handlers.\n This is a map of event type names and their handlers.\n The special handler `\"*\"` captures all events that don't have a\n handler.\n \"\"\"\n\n app = None\n\n def __init__(self, channel, handlers=None, routing_key='#',\n node_id=None, app=None, queue_prefix=None,\n accept=None, queue_ttl=None, queue_expires=None):\n self.app = app_or_default(app or self.app)\n self.channel = maybe_channel(channel)\n self.handlers = {} if handlers is None else handlers\n self.routing_key = routing_key\n self.node_id = node_id or uuid()\n self.queue_prefix = queue_prefix or self.app.conf.event_queue_prefix\n self.exchange = get_exchange(\n self.connection or self.app.connection_for_write(),\n name=self.app.conf.event_exchange)\n if queue_ttl is None:\n queue_ttl = self.app.conf.event_queue_ttl\n if queue_expires is None:\n queue_expires = self.app.conf.event_queue_expires\n self.queue = Queue(\n '.'.join([self.queue_prefix, self.node_id]),\n exchange=self.exchange,\n routing_key=self.routing_key,\n auto_delete=True, durable=False,\n message_ttl=queue_ttl,\n expires=queue_expires,\n )\n self.clock = self.app.clock\n self.adjust_clock = self.clock.adjust\n self.forward_clock = self.clock.forward\n if accept is None:\n accept = {self.app.conf.event_serializer, 'json'}\n self.accept = accept\n\n def process(self, type, event):\n \"\"\"Process event by dispatching to configured handler.\"\"\"\n handler = self.handlers.get(type) or self.handlers.get('*')\n handler and handler(event)\n\n def get_consumers(self, Consumer, channel):\n return [Consumer(queues=[self.queue],\n callbacks=[self._receive], no_ack=True,\n accept=self.accept)]\n\n def on_consume_ready(self, connection, channel, consumers,\n wakeup=True, **kwargs):\n if wakeup:\n self.wakeup_workers(channel=channel)\n\n def itercapture(self, limit=None, timeout=None, wakeup=True):\n return self.consume(limit=limit, timeout=timeout, wakeup=wakeup)\n\n def capture(self, limit=None, timeout=None, wakeup=True):\n \"\"\"Open up a consumer capturing events.\n\n This has to run in the main process, and it will never stop\n unless :attr:`EventDispatcher.should_stop` is set to True, or\n forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.\n \"\"\"\n return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup))\n\n def wakeup_workers(self, channel=None):\n self.app.control.broadcast('heartbeat',\n connection=self.connection,\n channel=channel)\n\n def event_from_message(self, body, localize=True,\n now=time.time, tzfields=_TZGETTER,\n adjust_timestamp=adjust_timestamp,\n CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW):\n type = body['type']\n if type == 'task-sent':\n # clients never sync so cannot use their clock value\n _c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW\n self.adjust_clock(_c)\n else:\n try:\n clock = body['clock']\n except KeyError:\n body['clock'] = self.forward_clock()\n else:\n self.adjust_clock(clock)\n\n if localize:\n try:\n offset, timestamp = tzfields(body)\n except KeyError:\n pass\n else:\n body['timestamp'] = adjust_timestamp(timestamp, offset)\n body['local_received'] = now()\n return type, body\n\n def _receive(self, body, message, list=list, isinstance=isinstance):\n if isinstance(body, list): # celery 4.0: List of events\n process, from_message = self.process, self.event_from_message\n [process(*from_message(event)) for event in body]\n else:\n self.process(*self.event_from_message(body))\n\n @property\n def connection(self):\n return self.channel.connection.client if self.channel else None\n", "path": "celery/events/receiver.py"}]} | 2,024 | 146 |
gh_patches_debug_762 | rasdani/github-patches | git_diff | kubeflow__pipelines-2610 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
kfp 0.1.35 tar.gz in pypi.org is missing diagnose_me directory
**What happened:**
The 0.1.35 release of kfp available on pypi.org (i.e. what is installed via `pip3 install kfp`) seems to be missing the `kfp/cli/diagnose_me` directory containing the diagnose_me modules required by the cli. The release hosted on github contains these files.
This is the tar.gz file hosted on pypi: https://files.pythonhosted.org/packages/e8/02/51dbeae211ddf1c931b2d1613db90856b7d94a53c1d9f704593dfa6253ae/kfp-0.1.35.tar.gz
If you try to install and run kfp 0.1.35 via pip it causes an error:
```
Traceback (most recent call last):
File "/Users/shenderson/venvs/kubeflow/bin/kfp", line 5, in <module>
from kfp.__main__ import main
File "/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/__main__.py", line 15, in <module>
from .cli.cli import main
File "/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/cli/cli.py", line 21, in <module>
from .diagnose_me_cli import diagnose_me
File "/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/cli/diagnose_me_cli.py", line 6, in <module>
from .diagnose_me import dev_env
ModuleNotFoundError: No module named 'kfp.cli.diagnose_me'
```
**What did you expect to happen:**
All kfp modules including the diagnose_me package to be installed.
**What steps did you take:**
* Run `pip3 install --upgrade --force --no-cache-dir kfp`
* Run `kfp`
</issue>
<code>
[start of sdk/python/setup.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import re
17 from setuptools import setup
18
19 NAME = 'kfp'
20 #VERSION = .... Change the version in kfp/__init__.py
21
22 REQUIRES = [
23 'urllib3>=1.15,<1.25', #Fixing the version conflict with the "requests" package
24 'six >= 1.10',
25 'certifi',
26 'python-dateutil',
27 'PyYAML',
28 'google-cloud-storage>=1.13.0',
29 'kubernetes>=8.0.0, <=9.0.0',
30 'PyJWT>=1.6.4',
31 'cryptography>=2.4.2',
32 'google-auth>=1.6.1',
33 'requests_toolbelt>=0.8.0',
34 'cloudpickle==1.1.1',
35 'kfp-server-api >= 0.1.18, <= 0.1.25', #Update the upper version whenever a new version of the kfp-server-api package is released. Update the lower version when there is a breaking change in kfp-server-api.
36 'argo-models == 2.2.1a', #2.2.1a is equivalent to argo 2.2.1
37 'jsonschema >= 3.0.1',
38 'tabulate == 0.8.3',
39 'click == 7.0',
40 'Deprecated',
41 ]
42
43 def find_version(*file_path_parts):
44 here = os.path.abspath(os.path.dirname(__file__))
45 with open(os.path.join(here, *file_path_parts), 'r') as fp:
46 version_file_text = fp.read()
47
48 version_match = re.search(
49 r"^__version__ = ['\"]([^'\"]*)['\"]",
50 version_file_text,
51 re.M,
52 )
53 if version_match:
54 return version_match.group(1)
55
56 raise RuntimeError("Unable to find version string.")
57
58 setup(
59 name=NAME,
60 version=find_version("kfp", "__init__.py"),
61 description='KubeFlow Pipelines SDK',
62 author='google',
63 install_requires=REQUIRES,
64 packages=[
65 'kfp',
66 'kfp.cli',
67 'kfp.compiler',
68 'kfp.components',
69 'kfp.components.structures',
70 'kfp.components.structures.kubernetes',
71 'kfp.containers',
72 'kfp.dsl',
73 'kfp.notebook',
74 ],
75 classifiers=[
76 'Intended Audience :: Developers',
77 'Intended Audience :: Education',
78 'Intended Audience :: Science/Research',
79 'License :: OSI Approved :: Apache Software License',
80 'Programming Language :: Python :: 3',
81 'Programming Language :: Python :: 3.5',
82 'Programming Language :: Python :: 3.6',
83 'Programming Language :: Python :: 3.7',
84 'Topic :: Scientific/Engineering',
85 'Topic :: Scientific/Engineering :: Artificial Intelligence',
86 'Topic :: Software Development',
87 'Topic :: Software Development :: Libraries',
88 'Topic :: Software Development :: Libraries :: Python Modules',
89 ],
90 python_requires='>=3.5.3',
91 include_package_data=True,
92 entry_points={
93 'console_scripts': [
94 'dsl-compile = kfp.compiler.main:main', 'kfp=kfp.__main__:main'
95 ]
96 })
97
[end of sdk/python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -64,6 +64,7 @@
packages=[
'kfp',
'kfp.cli',
+ 'kfp.cli.diagnose_me',
'kfp.compiler',
'kfp.components',
'kfp.components.structures',
| {"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -64,6 +64,7 @@\n packages=[\n 'kfp',\n 'kfp.cli',\n+ 'kfp.cli.diagnose_me',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n", "issue": "kfp 0.1.35 tar.gz in pypi.org is missing diagnose_me directory\n**What happened:**\r\nThe 0.1.35 release of kfp available on pypi.org (i.e. what is installed via `pip3 install kfp`) seems to be missing the `kfp/cli/diagnose_me` directory containing the diagnose_me modules required by the cli. The release hosted on github contains these files.\r\n\r\nThis is the tar.gz file hosted on pypi: https://files.pythonhosted.org/packages/e8/02/51dbeae211ddf1c931b2d1613db90856b7d94a53c1d9f704593dfa6253ae/kfp-0.1.35.tar.gz\r\n\r\nIf you try to install and run kfp 0.1.35 via pip it causes an error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/shenderson/venvs/kubeflow/bin/kfp\", line 5, in <module>\r\n from kfp.__main__ import main\r\n File \"/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/__main__.py\", line 15, in <module>\r\n from .cli.cli import main\r\n File \"/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/cli/cli.py\", line 21, in <module>\r\n from .diagnose_me_cli import diagnose_me\r\n File \"/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/cli/diagnose_me_cli.py\", line 6, in <module>\r\n from .diagnose_me import dev_env\r\nModuleNotFoundError: No module named 'kfp.cli.diagnose_me'\r\n```\r\n\r\n**What did you expect to happen:**\r\nAll kfp modules including the diagnose_me package to be installed.\r\n\r\n**What steps did you take:**\r\n* Run `pip3 install --upgrade --force --no-cache-dir kfp`\r\n* Run `kfp`\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\nREQUIRES = [\n 'urllib3>=1.15,<1.25', #Fixing the version conflict with the \"requests\" package\n 'six >= 1.10',\n 'certifi',\n 'python-dateutil',\n 'PyYAML',\n 'google-cloud-storage>=1.13.0',\n 'kubernetes>=8.0.0, <=9.0.0',\n 'PyJWT>=1.6.4',\n 'cryptography>=2.4.2',\n 'google-auth>=1.6.1',\n 'requests_toolbelt>=0.8.0',\n 'cloudpickle==1.1.1',\n 'kfp-server-api >= 0.1.18, <= 0.1.25', #Update the upper version whenever a new version of the kfp-server-api package is released. Update the lower version when there is a breaking change in kfp-server-api.\n 'argo-models == 2.2.1a', #2.2.1a is equivalent to argo 2.2.1\n 'jsonschema >= 3.0.1',\n 'tabulate == 0.8.3',\n 'click == 7.0',\n 'Deprecated',\n]\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n name=NAME,\n version=find_version(\"kfp\", \"__init__.py\"),\n description='KubeFlow Pipelines SDK',\n author='google',\n install_requires=REQUIRES,\n packages=[\n 'kfp',\n 'kfp.cli',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.components.structures.kubernetes',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.notebook',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.5.3',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main', 'kfp=kfp.__main__:main'\n ]\n })\n", "path": "sdk/python/setup.py"}]} | 2,037 | 86 |
gh_patches_debug_41258 | rasdani/github-patches | git_diff | streamlink__streamlink-5774 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.artetv: error: Unable to validate response text: ValidationError(dict):
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
streamlink 6.5.0
### Description
I fix this issue
```
by adding '**API_HLS_NG**' in line 51 of file
`/usr/lib/python3.11/site-packages/streamlink/plugins/artetv.py`
like this :
```

link/streamlink/assets/19744191/b78f47ba-67b2-439b-b336-85bef7e4615a)
### Debug log
```text
error: Unable to validate response text: ValidationError(dict):
Unable to validate value of key 'data'
Context(dict):
Unable to validate value of key 'attributes'
Context(dict):
Unable to validate value of key 'streams'
Context(AnySchema):
ValidationError(AnySchema):
ValidationError(AnySchema):
ValidationError(dict):
Unable to validate value of key 'protocol'
Context(AnySchema):
ValidationError(equality):
'API_HLS_NG' does not equal 'HLS'
ValidationError(equality):
'API_HLS_NG' does not equal 'HLS_NG'
```
</issue>
<code>
[start of src/streamlink/plugins/artetv.py]
1 """
2 $description European public service channel promoting culture, including magazine shows, concerts and documentaries.
3 $url arte.tv
4 $type live, vod
5 $metadata title
6 """
7
8 import logging
9 import re
10 from operator import itemgetter
11
12 from streamlink.plugin import Plugin, pluginmatcher
13 from streamlink.plugin.api import validate
14 from streamlink.stream.hls import HLSStream
15
16
17 log = logging.getLogger(__name__)
18
19
20 @pluginmatcher(re.compile(r"""
21 https?://(?:\w+\.)?arte\.tv/(?:guide/)?
22 (?P<language>[a-z]{2})/
23 (?:
24 (?:videos/)?(?P<video_id>(?!RC-|videos)[^/]+?)/.+
25 |
26 (?:direct|live)
27 )
28 """, re.VERBOSE))
29 class ArteTV(Plugin):
30 API_URL = "https://api.arte.tv/api/player/v2/config/{0}/{1}"
31 API_TOKEN = "MzYyZDYyYmM1Y2Q3ZWRlZWFjMmIyZjZjNTRiMGY4MzY4NzBhOWQ5YjE4MGQ1NGFiODJmOTFlZDQwN2FkOTZjMQ"
32
33 def _get_streams(self):
34 language = self.match.group("language")
35 video_id = self.match.group("video_id")
36
37 json_url = self.API_URL.format(language, video_id or "LIVE")
38 headers = {
39 "Authorization": f"Bearer {self.API_TOKEN}",
40 }
41 streams, metadata = self.session.http.get(json_url, headers=headers, schema=validate.Schema(
42 validate.parse_json(),
43 {"data": {"attributes": {
44 "streams": validate.any(
45 [],
46 [
47 validate.all(
48 {
49 "url": validate.url(),
50 "slot": int,
51 "protocol": validate.any("HLS", "HLS_NG"),
52 },
53 validate.union_get("slot", "protocol", "url"),
54 ),
55 ],
56 ),
57 "metadata": {
58 "title": str,
59 "subtitle": validate.any(None, str),
60 },
61 }}},
62 validate.get(("data", "attributes")),
63 validate.union_get("streams", "metadata"),
64 ))
65
66 if not streams:
67 return
68
69 self.title = f"{metadata['title']} - {metadata['subtitle']}" if metadata["subtitle"] else metadata["title"]
70
71 for _slot, _protocol, url in sorted(streams, key=itemgetter(0)):
72 return HLSStream.parse_variant_playlist(self.session, url)
73
74
75 __plugin__ = ArteTV
76
[end of src/streamlink/plugins/artetv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/artetv.py b/src/streamlink/plugins/artetv.py
--- a/src/streamlink/plugins/artetv.py
+++ b/src/streamlink/plugins/artetv.py
@@ -2,6 +2,7 @@
$description European public service channel promoting culture, including magazine shows, concerts and documentaries.
$url arte.tv
$type live, vod
+$metadata id
$metadata title
"""
@@ -17,38 +18,41 @@
log = logging.getLogger(__name__)
-@pluginmatcher(re.compile(r"""
- https?://(?:\w+\.)?arte\.tv/(?:guide/)?
- (?P<language>[a-z]{2})/
- (?:
- (?:videos/)?(?P<video_id>(?!RC-|videos)[^/]+?)/.+
- |
- (?:direct|live)
- )
-""", re.VERBOSE))
+@pluginmatcher(
+ name="live",
+ pattern=re.compile(
+ r"https?://(?:\w+\.)?arte\.tv/(?P<language>[a-z]{2})/(?:direct|live)/?",
+ ),
+)
+@pluginmatcher(
+ name="vod",
+ pattern=re.compile(
+ r"https?://(?:\w+\.)?arte\.tv/(?:guide/)?(?P<language>[a-z]{2})/(?:videos/)?(?P<video_id>(?!RC-|videos)[^/]+?)/.+",
+ ),
+)
class ArteTV(Plugin):
- API_URL = "https://api.arte.tv/api/player/v2/config/{0}/{1}"
- API_TOKEN = "MzYyZDYyYmM1Y2Q3ZWRlZWFjMmIyZjZjNTRiMGY4MzY4NzBhOWQ5YjE4MGQ1NGFiODJmOTFlZDQwN2FkOTZjMQ"
+ API_URL = "https://api.arte.tv/api/player/v2/config/{language}/{id}"
def _get_streams(self):
- language = self.match.group("language")
- video_id = self.match.group("video_id")
+ self.id = self.match["video_id"] if self.matches["vod"] else "LIVE"
- json_url = self.API_URL.format(language, video_id or "LIVE")
- headers = {
- "Authorization": f"Bearer {self.API_TOKEN}",
- }
- streams, metadata = self.session.http.get(json_url, headers=headers, schema=validate.Schema(
+ json_url = self.API_URL.format(
+ language=self.match["language"],
+ id=self.id,
+ )
+ streams, metadata = self.session.http.get(json_url, schema=validate.Schema(
validate.parse_json(),
- {"data": {"attributes": {
+ {"data": {"attributes": dict}},
+ validate.get(("data", "attributes")),
+ {
"streams": validate.any(
[],
[
validate.all(
{
- "url": validate.url(),
"slot": int,
- "protocol": validate.any("HLS", "HLS_NG"),
+ "protocol": str,
+ "url": validate.url(),
},
validate.union_get("slot", "protocol", "url"),
),
@@ -58,17 +62,15 @@
"title": str,
"subtitle": validate.any(None, str),
},
- }}},
- validate.get(("data", "attributes")),
+ },
validate.union_get("streams", "metadata"),
))
- if not streams:
- return
-
self.title = f"{metadata['title']} - {metadata['subtitle']}" if metadata["subtitle"] else metadata["title"]
- for _slot, _protocol, url in sorted(streams, key=itemgetter(0)):
+ for _slot, protocol, url in sorted(streams, key=itemgetter(0)):
+ if "HLS" not in protocol:
+ continue
return HLSStream.parse_variant_playlist(self.session, url)
| {"golden_diff": "diff --git a/src/streamlink/plugins/artetv.py b/src/streamlink/plugins/artetv.py\n--- a/src/streamlink/plugins/artetv.py\n+++ b/src/streamlink/plugins/artetv.py\n@@ -2,6 +2,7 @@\n $description European public service channel promoting culture, including magazine shows, concerts and documentaries.\n $url arte.tv\n $type live, vod\n+$metadata id\n $metadata title\n \"\"\"\n \n@@ -17,38 +18,41 @@\n log = logging.getLogger(__name__)\n \n \n-@pluginmatcher(re.compile(r\"\"\"\n- https?://(?:\\w+\\.)?arte\\.tv/(?:guide/)?\n- (?P<language>[a-z]{2})/\n- (?:\n- (?:videos/)?(?P<video_id>(?!RC-|videos)[^/]+?)/.+\n- |\n- (?:direct|live)\n- )\n-\"\"\", re.VERBOSE))\n+@pluginmatcher(\n+ name=\"live\",\n+ pattern=re.compile(\n+ r\"https?://(?:\\w+\\.)?arte\\.tv/(?P<language>[a-z]{2})/(?:direct|live)/?\",\n+ ),\n+)\n+@pluginmatcher(\n+ name=\"vod\",\n+ pattern=re.compile(\n+ r\"https?://(?:\\w+\\.)?arte\\.tv/(?:guide/)?(?P<language>[a-z]{2})/(?:videos/)?(?P<video_id>(?!RC-|videos)[^/]+?)/.+\",\n+ ),\n+)\n class ArteTV(Plugin):\n- API_URL = \"https://api.arte.tv/api/player/v2/config/{0}/{1}\"\n- API_TOKEN = \"MzYyZDYyYmM1Y2Q3ZWRlZWFjMmIyZjZjNTRiMGY4MzY4NzBhOWQ5YjE4MGQ1NGFiODJmOTFlZDQwN2FkOTZjMQ\"\n+ API_URL = \"https://api.arte.tv/api/player/v2/config/{language}/{id}\"\n \n def _get_streams(self):\n- language = self.match.group(\"language\")\n- video_id = self.match.group(\"video_id\")\n+ self.id = self.match[\"video_id\"] if self.matches[\"vod\"] else \"LIVE\"\n \n- json_url = self.API_URL.format(language, video_id or \"LIVE\")\n- headers = {\n- \"Authorization\": f\"Bearer {self.API_TOKEN}\",\n- }\n- streams, metadata = self.session.http.get(json_url, headers=headers, schema=validate.Schema(\n+ json_url = self.API_URL.format(\n+ language=self.match[\"language\"],\n+ id=self.id,\n+ )\n+ streams, metadata = self.session.http.get(json_url, schema=validate.Schema(\n validate.parse_json(),\n- {\"data\": {\"attributes\": {\n+ {\"data\": {\"attributes\": dict}},\n+ validate.get((\"data\", \"attributes\")),\n+ {\n \"streams\": validate.any(\n [],\n [\n validate.all(\n {\n- \"url\": validate.url(),\n \"slot\": int,\n- \"protocol\": validate.any(\"HLS\", \"HLS_NG\"),\n+ \"protocol\": str,\n+ \"url\": validate.url(),\n },\n validate.union_get(\"slot\", \"protocol\", \"url\"),\n ),\n@@ -58,17 +62,15 @@\n \"title\": str,\n \"subtitle\": validate.any(None, str),\n },\n- }}},\n- validate.get((\"data\", \"attributes\")),\n+ },\n validate.union_get(\"streams\", \"metadata\"),\n ))\n \n- if not streams:\n- return\n-\n self.title = f\"{metadata['title']} - {metadata['subtitle']}\" if metadata[\"subtitle\"] else metadata[\"title\"]\n \n- for _slot, _protocol, url in sorted(streams, key=itemgetter(0)):\n+ for _slot, protocol, url in sorted(streams, key=itemgetter(0)):\n+ if \"HLS\" not in protocol:\n+ continue\n return HLSStream.parse_variant_playlist(self.session, url)\n", "issue": "plugins.artetv: error: Unable to validate response text: ValidationError(dict):\n### Checklist\r\n\r\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\r\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\r\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\r\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\r\n\r\n### Streamlink version\r\nstreamlink 6.5.0\r\n\r\n### Description\r\n\r\nI fix this issue \r\n```\r\nby adding '**API_HLS_NG**' in line 51 of file \r\n`/usr/lib/python3.11/site-packages/streamlink/plugins/artetv.py`\r\nlike this :\r\n```\r\n\r\nlink/streamlink/assets/19744191/b78f47ba-67b2-439b-b336-85bef7e4615a)\r\n\r\n### Debug log\r\n\r\n```text\r\nerror: Unable to validate response text: ValidationError(dict):\r\n Unable to validate value of key 'data'\r\n Context(dict):\r\n Unable to validate value of key 'attributes'\r\n Context(dict):\r\n Unable to validate value of key 'streams'\r\n Context(AnySchema):\r\n ValidationError(AnySchema):\r\n ValidationError(AnySchema):\r\n ValidationError(dict):\r\n Unable to validate value of key 'protocol'\r\n Context(AnySchema):\r\n ValidationError(equality):\r\n 'API_HLS_NG' does not equal 'HLS'\r\n ValidationError(equality):\r\n 'API_HLS_NG' does not equal 'HLS_NG'\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n$description European public service channel promoting culture, including magazine shows, concerts and documentaries.\n$url arte.tv\n$type live, vod\n$metadata title\n\"\"\"\n\nimport logging\nimport re\nfrom operator import itemgetter\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(r\"\"\"\n https?://(?:\\w+\\.)?arte\\.tv/(?:guide/)?\n (?P<language>[a-z]{2})/\n (?:\n (?:videos/)?(?P<video_id>(?!RC-|videos)[^/]+?)/.+\n |\n (?:direct|live)\n )\n\"\"\", re.VERBOSE))\nclass ArteTV(Plugin):\n API_URL = \"https://api.arte.tv/api/player/v2/config/{0}/{1}\"\n API_TOKEN = \"MzYyZDYyYmM1Y2Q3ZWRlZWFjMmIyZjZjNTRiMGY4MzY4NzBhOWQ5YjE4MGQ1NGFiODJmOTFlZDQwN2FkOTZjMQ\"\n\n def _get_streams(self):\n language = self.match.group(\"language\")\n video_id = self.match.group(\"video_id\")\n\n json_url = self.API_URL.format(language, video_id or \"LIVE\")\n headers = {\n \"Authorization\": f\"Bearer {self.API_TOKEN}\",\n }\n streams, metadata = self.session.http.get(json_url, headers=headers, schema=validate.Schema(\n validate.parse_json(),\n {\"data\": {\"attributes\": {\n \"streams\": validate.any(\n [],\n [\n validate.all(\n {\n \"url\": validate.url(),\n \"slot\": int,\n \"protocol\": validate.any(\"HLS\", \"HLS_NG\"),\n },\n validate.union_get(\"slot\", \"protocol\", \"url\"),\n ),\n ],\n ),\n \"metadata\": {\n \"title\": str,\n \"subtitle\": validate.any(None, str),\n },\n }}},\n validate.get((\"data\", \"attributes\")),\n validate.union_get(\"streams\", \"metadata\"),\n ))\n\n if not streams:\n return\n\n self.title = f\"{metadata['title']} - {metadata['subtitle']}\" if metadata[\"subtitle\"] else metadata[\"title\"]\n\n for _slot, _protocol, url in sorted(streams, key=itemgetter(0)):\n return HLSStream.parse_variant_playlist(self.session, url)\n\n\n__plugin__ = ArteTV\n", "path": "src/streamlink/plugins/artetv.py"}]} | 1,732 | 910 |
gh_patches_debug_20703 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-1817 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Search: search doesn't appear to use the organization
Example: the MapAction org has two public datasets, but searching "mapaction" or MapAction returns 0 results.
Other org searches will return results, but this is probably because the name of the org is mentioned in other metadata.
To do:
1. confirm that search queries from the homepage or main search bar are not using organizations
2. if that is the source of the problem, add org to the search queries
</issue>
<code>
[start of ckanext-hdx_search/ckanext/hdx_search/plugin.py]
1 import logging
2 import ckan.plugins as plugins
3 import ckan.plugins.toolkit as tk
4 import ckan.lib.plugins as lib_plugins
5
6
7 class HDXSearchPlugin(plugins.SingletonPlugin):
8 plugins.implements(plugins.IConfigurer, inherit=False)
9 plugins.implements(plugins.IRoutes, inherit=True)
10 plugins.implements(plugins.ITemplateHelpers, inherit=False)
11 plugins.implements(plugins.IPackageController, inherit=True)
12
13 def update_config(self, config):
14 tk.add_template_directory(config, 'templates')
15
16 def get_helpers(self):
17 return {}
18
19 def before_map(self, map):
20 map.connect('search', '/search',
21 controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')
22 map.connect('simple_search',
23 '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')
24 return map
25
26 def after_map(self, map):
27 map.connect('search', '/search',
28 controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')
29 map.connect('simple_search',
30 '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')
31 return map
32
33 def before_search(self, search_params):
34 if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:
35 search_params['facet.field'].append('vocab_Topics')
36
37 # If indicator flag is set, search only that type
38 if 'ext_indicator' in search_params['extras']:
39 if int(search_params['extras']['ext_indicator']) == 1:
40 search_params['fq'] = search_params['fq'] + ' +extras_indicator:1'
41 elif int(search_params['extras']['ext_indicator']) == 0:
42 search_params['fq'] = search_params[
43 'fq'] + ' -extras_indicator:1'
44 return search_params
45
46 def after_search(self, search_results, search_params):
47 return search_results
48
49 def before_view(self, pkg_dict):
50 return pkg_dict
51
[end of ckanext-hdx_search/ckanext/hdx_search/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext-hdx_search/ckanext/hdx_search/plugin.py b/ckanext-hdx_search/ckanext/hdx_search/plugin.py
--- a/ckanext-hdx_search/ckanext/hdx_search/plugin.py
+++ b/ckanext-hdx_search/ckanext/hdx_search/plugin.py
@@ -1,8 +1,13 @@
-import logging
+import logging, re
import ckan.plugins as plugins
import ckan.plugins.toolkit as tk
import ckan.lib.plugins as lib_plugins
+def convert_country(q):
+ for c in tk.get_action('group_list')({'user':'127.0.0.1'},{'all_fields': True}):
+ if re.findall(c['display_name'].lower(),q.lower()):
+ q += ' '+c['name']
+ return q
class HDXSearchPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IConfigurer, inherit=False)
@@ -31,6 +36,7 @@
return map
def before_search(self, search_params):
+ search_params['q'] = convert_country(search_params['q'])
if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:
search_params['facet.field'].append('vocab_Topics')
| {"golden_diff": "diff --git a/ckanext-hdx_search/ckanext/hdx_search/plugin.py b/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n--- a/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n+++ b/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n@@ -1,8 +1,13 @@\n-import logging\n+import logging, re\n import ckan.plugins as plugins\n import ckan.plugins.toolkit as tk\n import ckan.lib.plugins as lib_plugins\n \n+def convert_country(q):\n+ for c in tk.get_action('group_list')({'user':'127.0.0.1'},{'all_fields': True}):\n+ if re.findall(c['display_name'].lower(),q.lower()):\n+ q += ' '+c['name']\n+ return q\n \n class HDXSearchPlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer, inherit=False)\n@@ -31,6 +36,7 @@\n return map\n \n def before_search(self, search_params):\n+ search_params['q'] = convert_country(search_params['q'])\n if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:\n search_params['facet.field'].append('vocab_Topics')\n", "issue": "Search: search doesn't appear to use the organization\nExample: the MapAction org has two public datasets, but searching \"mapaction\" or MapAction returns 0 results. \n\nOther org searches will return results, but this is probably because the name of the org is mentioned in other metadata. \n\nTo do: \n1. confirm that search queries from the homepage or main search bar are not using organizations\n2. if that is the source of the problem, add org to the search queries\n\n", "before_files": [{"content": "import logging\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as tk\nimport ckan.lib.plugins as lib_plugins\n\n\nclass HDXSearchPlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer, inherit=False)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers, inherit=False)\n plugins.implements(plugins.IPackageController, inherit=True)\n\n def update_config(self, config):\n tk.add_template_directory(config, 'templates')\n\n def get_helpers(self):\n return {}\n\n def before_map(self, map):\n map.connect('search', '/search',\n controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')\n map.connect('simple_search',\n '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')\n return map\n\n def after_map(self, map):\n map.connect('search', '/search',\n controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')\n map.connect('simple_search',\n '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')\n return map\n\n def before_search(self, search_params):\n if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:\n search_params['facet.field'].append('vocab_Topics')\n\n # If indicator flag is set, search only that type\n if 'ext_indicator' in search_params['extras']:\n if int(search_params['extras']['ext_indicator']) == 1:\n search_params['fq'] = search_params['fq'] + ' +extras_indicator:1'\n elif int(search_params['extras']['ext_indicator']) == 0:\n search_params['fq'] = search_params[\n 'fq'] + ' -extras_indicator:1'\n return search_params\n\n def after_search(self, search_results, search_params):\n return search_results\n\n def before_view(self, pkg_dict):\n return pkg_dict\n", "path": "ckanext-hdx_search/ckanext/hdx_search/plugin.py"}]} | 1,205 | 288 |
gh_patches_debug_11786 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-2877 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BigQuery] Allow more recent versions of google-api-core?
### Describe the feature
Currently dbt-bigquery has [an upper limit of 1.16 on `google-api-core`](https://github.com/fishtown-analytics/dbt/blob/v0.18.1b3/plugins/bigquery/setup.py#L53). That release is from Jan of this year.
Would it be possible to loosen that?
While it's clearly not dbt's responsibility for us to be able to install arbitrary packages, here's an example where we can't instally `google-cloud-bigquery-datatransfer` because of this restriction:
```
[SolverProblemError]
Because no versions of google-cloud-bigquery-datatransfer match >2.0.0,<3.0.0
and google-cloud-bigquery-datatransfer (2.0.0) depends on google-api-core (>=1.22.2,<2.0.0dev), google-cloud-bigquery-datatransfer (>=2.0.0,<3.0.0) requires google-api-core (>=1.22.2,<2.0.0dev).
And because dbt-bigquery (0.18.0) depends on google-api-core (>=1.16.0,<1.17.0), google-cloud-bigquery-datatransfer (>=2.0.0,<3.0.0) is incompatible with dbt-bigquery (0.18.0).
And because dbt (0.18.0) depends on dbt-bigquery (0.18.0)
and no versions of dbt match >0.18.0,<0.19.0, google-cloud-bigquery-datatransfer (>=2.0.0,<3.0.0) is incompatible with dbt (>=0.18.0,<0.19.0).
So, because {repo} depends on both dbt (^0.18.0) and google-cloud-bigquery-datatransfer (^2.0.0), version solving failed.
```
Thanks as ever for the awesome product!
</issue>
<code>
[start of plugins/bigquery/setup.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 6):
6 print('Error: dbt does not support this version of Python.')
7 print('Please upgrade to Python 3.6 or higher.')
8 sys.exit(1)
9
10
11 from setuptools import setup
12 try:
13 from setuptools import find_namespace_packages
14 except ImportError:
15 # the user has a downlevel version of setuptools.
16 print('Error: dbt requires setuptools v40.1.0 or higher.')
17 print('Please upgrade setuptools with "pip install --upgrade setuptools" '
18 'and try again')
19 sys.exit(1)
20
21
22 package_name = "dbt-bigquery"
23 package_version = "0.19.0b1"
24 description = """The bigquery adapter plugin for dbt (data build tool)"""
25
26 this_directory = os.path.abspath(os.path.dirname(__file__))
27 with open(os.path.join(this_directory, 'README.md')) as f:
28 long_description = f.read()
29
30 setup(
31 name=package_name,
32 version=package_version,
33 description=description,
34 long_description=long_description,
35 long_description_content_type='text/markdown',
36 author="Fishtown Analytics",
37 author_email="[email protected]",
38 url="https://github.com/fishtown-analytics/dbt",
39 packages=find_namespace_packages(include=['dbt', 'dbt.*']),
40 package_data={
41 'dbt': [
42 'include/bigquery/dbt_project.yml',
43 'include/bigquery/sample_profiles.yml',
44 'include/bigquery/macros/*.sql',
45 'include/bigquery/macros/**/*.sql',
46 ]
47 },
48 install_requires=[
49 'dbt-core=={}'.format(package_version),
50 'protobuf>=3.6.0,<3.12',
51 'google-cloud-core>=1.3.0,<1.4',
52 'google-cloud-bigquery>=1.25.0,<1.26.0',
53 'google-api-core>=1.16.0,<1.17.0',
54 'googleapis-common-protos>=1.6.0,<1.7.0',
55 'six>=1.14.0',
56 ],
57 zip_safe=False,
58 classifiers=[
59 'Development Status :: 5 - Production/Stable',
60
61 'License :: OSI Approved :: Apache Software License',
62
63 'Operating System :: Microsoft :: Windows',
64 'Operating System :: MacOS :: MacOS X',
65 'Operating System :: POSIX :: Linux',
66
67 'Programming Language :: Python :: 3.6',
68 'Programming Language :: Python :: 3.7',
69 'Programming Language :: Python :: 3.8',
70 ],
71 python_requires=">=3.6.2",
72 )
73
[end of plugins/bigquery/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/bigquery/setup.py b/plugins/bigquery/setup.py
--- a/plugins/bigquery/setup.py
+++ b/plugins/bigquery/setup.py
@@ -47,11 +47,13 @@
},
install_requires=[
'dbt-core=={}'.format(package_version),
- 'protobuf>=3.6.0,<3.12',
- 'google-cloud-core>=1.3.0,<1.4',
- 'google-cloud-bigquery>=1.25.0,<1.26.0',
- 'google-api-core>=1.16.0,<1.17.0',
- 'googleapis-common-protos>=1.6.0,<1.7.0',
+ 'protobuf>=3.13.0,<4',
+ # These are more tightly pinned, as they have a track record of
+ # breaking changes in minor releases.
+ 'google-cloud-core>=1.3.0,<1.5',
+ 'google-cloud-bigquery>=1.25.0,<2.4',
+ 'google-api-core>=1.16.0,<1.24',
+ 'googleapis-common-protos>=1.6.0,<1.53',
'six>=1.14.0',
],
zip_safe=False,
| {"golden_diff": "diff --git a/plugins/bigquery/setup.py b/plugins/bigquery/setup.py\n--- a/plugins/bigquery/setup.py\n+++ b/plugins/bigquery/setup.py\n@@ -47,11 +47,13 @@\n },\n install_requires=[\n 'dbt-core=={}'.format(package_version),\n- 'protobuf>=3.6.0,<3.12',\n- 'google-cloud-core>=1.3.0,<1.4',\n- 'google-cloud-bigquery>=1.25.0,<1.26.0',\n- 'google-api-core>=1.16.0,<1.17.0',\n- 'googleapis-common-protos>=1.6.0,<1.7.0',\n+ 'protobuf>=3.13.0,<4',\n+ # These are more tightly pinned, as they have a track record of\n+ # breaking changes in minor releases.\n+ 'google-cloud-core>=1.3.0,<1.5',\n+ 'google-cloud-bigquery>=1.25.0,<2.4',\n+ 'google-api-core>=1.16.0,<1.24',\n+ 'googleapis-common-protos>=1.6.0,<1.53',\n 'six>=1.14.0',\n ],\n zip_safe=False,\n", "issue": "[BigQuery] Allow more recent versions of google-api-core?\n### Describe the feature\r\n\r\nCurrently dbt-bigquery has [an upper limit of 1.16 on `google-api-core`](https://github.com/fishtown-analytics/dbt/blob/v0.18.1b3/plugins/bigquery/setup.py#L53). That release is from Jan of this year.\r\n\r\nWould it be possible to loosen that?\r\n\r\nWhile it's clearly not dbt's responsibility for us to be able to install arbitrary packages, here's an example where we can't instally `google-cloud-bigquery-datatransfer` because of this restriction:\r\n\r\n```\r\n[SolverProblemError]\r\nBecause no versions of google-cloud-bigquery-datatransfer match >2.0.0,<3.0.0\r\n and google-cloud-bigquery-datatransfer (2.0.0) depends on google-api-core (>=1.22.2,<2.0.0dev), google-cloud-bigquery-datatransfer (>=2.0.0,<3.0.0) requires google-api-core (>=1.22.2,<2.0.0dev).\r\nAnd because dbt-bigquery (0.18.0) depends on google-api-core (>=1.16.0,<1.17.0), google-cloud-bigquery-datatransfer (>=2.0.0,<3.0.0) is incompatible with dbt-bigquery (0.18.0).\r\nAnd because dbt (0.18.0) depends on dbt-bigquery (0.18.0)\r\n and no versions of dbt match >0.18.0,<0.19.0, google-cloud-bigquery-datatransfer (>=2.0.0,<3.0.0) is incompatible with dbt (>=0.18.0,<0.19.0).\r\nSo, because {repo} depends on both dbt (^0.18.0) and google-cloud-bigquery-datatransfer (^2.0.0), version solving failed.\r\n```\r\n\r\nThanks as ever for the awesome product!\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\npackage_name = \"dbt-bigquery\"\npackage_version = \"0.19.0b1\"\ndescription = \"\"\"The bigquery adapter plugin for dbt (data build tool)\"\"\"\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md')) as f:\n long_description = f.read()\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/bigquery/dbt_project.yml',\n 'include/bigquery/sample_profiles.yml',\n 'include/bigquery/macros/*.sql',\n 'include/bigquery/macros/**/*.sql',\n ]\n },\n install_requires=[\n 'dbt-core=={}'.format(package_version),\n 'protobuf>=3.6.0,<3.12',\n 'google-cloud-core>=1.3.0,<1.4',\n 'google-cloud-bigquery>=1.25.0,<1.26.0',\n 'google-api-core>=1.16.0,<1.17.0',\n 'googleapis-common-protos>=1.6.0,<1.7.0',\n 'six>=1.14.0',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n python_requires=\">=3.6.2\",\n)\n", "path": "plugins/bigquery/setup.py"}]} | 1,719 | 296 |
gh_patches_debug_10622 | rasdani/github-patches | git_diff | mdn__kuma-6143 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Require minium length for "explanation" field in BCD signals
**Summary**
_What should be changed?_
A minimum length of 10 characters should be required for the "explanation" field in BCD signals
**Rationale**
_What problems would this solve?_
Less spam submissions
**Audience**
_Who would use this changed feature?_
BCD maintainers
**Proposal**
_What would users see and do? What would happen as a result?_
Users would be required to enter a meaningful explanation and hopefully refrain from submitting "fehfs", "test", and other garbage.
**Additional context**
_Is there anything else we should know?_
Was discussed in https://github.com/mdn/sprints/issues/2289
</issue>
<code>
[start of kuma/api/v1/serializers.py]
1 from rest_framework import exceptions
2 from rest_framework import serializers
3
4 from kuma.wiki.models import BCSignal, Document
5
6
7 class BCSignalSerializer(serializers.Serializer):
8 feature = serializers.CharField(max_length=255)
9 browsers = serializers.CharField(max_length=255)
10 slug = serializers.CharField(max_length=255)
11 locale = serializers.CharField(max_length=7)
12 explanation = serializers.CharField(allow_blank=True, max_length=1000)
13 supporting_material = serializers.CharField(
14 allow_blank=True, required=False, max_length=1000
15 )
16
17 def create(self, validated_data):
18 slug = validated_data.pop("slug")
19 locale = validated_data.pop("locale")
20 document = Document.objects.filter(slug=slug, locale=locale).first()
21
22 if document:
23 return BCSignal.objects.create(document=document, **validated_data)
24 raise exceptions.ValidationError("Document not found")
25
[end of kuma/api/v1/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/api/v1/serializers.py b/kuma/api/v1/serializers.py
--- a/kuma/api/v1/serializers.py
+++ b/kuma/api/v1/serializers.py
@@ -9,7 +9,11 @@
browsers = serializers.CharField(max_length=255)
slug = serializers.CharField(max_length=255)
locale = serializers.CharField(max_length=7)
- explanation = serializers.CharField(allow_blank=True, max_length=1000)
+ explanation = serializers.CharField(
+ # Make sure these match the constants in bcd-signal.jsx
+ max_length=1000,
+ min_length=10,
+ )
supporting_material = serializers.CharField(
allow_blank=True, required=False, max_length=1000
)
| {"golden_diff": "diff --git a/kuma/api/v1/serializers.py b/kuma/api/v1/serializers.py\n--- a/kuma/api/v1/serializers.py\n+++ b/kuma/api/v1/serializers.py\n@@ -9,7 +9,11 @@\n browsers = serializers.CharField(max_length=255)\n slug = serializers.CharField(max_length=255)\n locale = serializers.CharField(max_length=7)\n- explanation = serializers.CharField(allow_blank=True, max_length=1000)\n+ explanation = serializers.CharField(\n+ # Make sure these match the constants in bcd-signal.jsx\n+ max_length=1000,\n+ min_length=10,\n+ )\n supporting_material = serializers.CharField(\n allow_blank=True, required=False, max_length=1000\n )\n", "issue": "Require minium length for \"explanation\" field in BCD signals\n**Summary**\r\n_What should be changed?_\r\nA minimum length of 10 characters should be required for the \"explanation\" field in BCD signals\r\n\r\n**Rationale**\r\n_What problems would this solve?_\r\nLess spam submissions\r\n\r\n**Audience**\r\n_Who would use this changed feature?_\r\nBCD maintainers\r\n\r\n**Proposal**\r\n_What would users see and do? What would happen as a result?_\r\nUsers would be required to enter a meaningful explanation and hopefully refrain from submitting \"fehfs\", \"test\", and other garbage.\r\n\r\n**Additional context**\r\n_Is there anything else we should know?_\r\nWas discussed in https://github.com/mdn/sprints/issues/2289\n", "before_files": [{"content": "from rest_framework import exceptions\nfrom rest_framework import serializers\n\nfrom kuma.wiki.models import BCSignal, Document\n\n\nclass BCSignalSerializer(serializers.Serializer):\n feature = serializers.CharField(max_length=255)\n browsers = serializers.CharField(max_length=255)\n slug = serializers.CharField(max_length=255)\n locale = serializers.CharField(max_length=7)\n explanation = serializers.CharField(allow_blank=True, max_length=1000)\n supporting_material = serializers.CharField(\n allow_blank=True, required=False, max_length=1000\n )\n\n def create(self, validated_data):\n slug = validated_data.pop(\"slug\")\n locale = validated_data.pop(\"locale\")\n document = Document.objects.filter(slug=slug, locale=locale).first()\n\n if document:\n return BCSignal.objects.create(document=document, **validated_data)\n raise exceptions.ValidationError(\"Document not found\")\n", "path": "kuma/api/v1/serializers.py"}]} | 939 | 182 |
gh_patches_debug_7894 | rasdani/github-patches | git_diff | vega__altair-390 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pin vega version in requirements
To make sure things still work when ipyvega is updated (as it already has been)
</issue>
<code>
[start of setup.py]
1 LONG_DESCRIPTION = """
2 Altair: A declarative statistical visualization library for Python.
3
4 http://altair-viz.github.io/
5
6 This package provides a Python API for building statistical visualizations
7 in a declarative manner. This API contains no actual visualization rendering
8 code, but instead emits JSON data structures following the `Vega-Lite`_
9 specification. For convenience, Altair can optionally use `ipyvega`_ to
10 seamlessly display client-side renderings in the Jupyter notebook.
11
12 .. image:: https://raw.githubusercontent.com/altair-viz/altair/master/images/cars.png
13
14 Please note that if you wish to use altair in the Jupyter Notebook, the
15 `ipyvega`_ notebook extension must be enabled as follows::
16
17 $ pip install altair
18 $ pip install --upgrade notebook
19 $ jupyter nbextension install --sys-prefix --py vega
20
21 See the `Altair Documentation`_ for tutorials, detailed installation
22 instructions, and examples.
23 See the `Altair Github Repository`_ for issues, bug reports, and contributions.
24
25 .. _Altair Github Repository: http://github.com/altair-viz/altair/
26 .. _Altair Documentation: http://altair-viz.github.io/
27 .. _Vega-Lite: https://github.com/vega/vega-lite
28 .. _ipyvega: https://github.com/vega/ipyvega
29 """
30
31 DESCRIPTION = "Altair: A declarative statistical visualization library for Python."
32 NAME = "altair"
33 PACKAGES = ['altair',
34 'altair.v1',
35 'altair.v1.tests',
36 'altair.v1.schema',
37 'altair.v1.schema._interface',
38 'altair.v1.schema._interface.tests',
39 'altair.v1.examples',
40 'altair.v1.examples.tests',
41 'altair.datasets',
42 'altair.datasets.tests',
43 'altair.expr',
44 'altair.expr.tests',
45 'altair.tests',
46 'altair.utils',
47 'altair.utils.tests',
48 ]
49 PACKAGE_DATA = {'altair': ['notebooks/*.ipynb',
50 'notebooks/*.html',
51 'notebooks/auto_examples/*.ipynb',
52 'v1/schema/*.json',
53 'v1/examples/*.json',
54 'v1/examples/json/*.json',
55 'datasets/*.json',
56 'expr/*.json']}
57 AUTHOR = "Brian E. Granger / Jake VanderPlas"
58 AUTHOR_EMAIL = "[email protected] / [email protected]"
59 URL = 'http://altair-viz.github.io'
60 DOWNLOAD_URL = 'http://github.com/altair-viz/altair/'
61 LICENSE = 'BSD 3-clause'
62 INSTALL_REQUIRES = ['traitlets>=4.3.1','ipython','pandas','vega>=0.4.4']
63
64
65 import io
66 import os
67 import re
68
69 try:
70 from setuptools import setup
71 except ImportError:
72 from distutils.core import setup
73
74
75 def read(path, encoding='utf-8'):
76 path = os.path.join(os.path.dirname(__file__), path)
77 with io.open(path, encoding=encoding) as fp:
78 return fp.read()
79
80
81 def version(path):
82 """Obtain the packge version from a python file e.g. pkg/__init__.py
83
84 See <https://packaging.python.org/en/latest/single_source_version.html>.
85 """
86 version_file = read(path)
87 version_match = re.search(r"""^__version__ = ['"]([^'"]*)['"]""",
88 version_file, re.M)
89 if version_match:
90 return version_match.group(1)
91 raise RuntimeError("Unable to find version string.")
92
93
94 VERSION = version('altair/__init__.py')
95
96
97 setup(name=NAME,
98 version=VERSION,
99 description=DESCRIPTION,
100 long_description=LONG_DESCRIPTION,
101 author=AUTHOR,
102 author_email=AUTHOR_EMAIL,
103 url=URL,
104 download_url=DOWNLOAD_URL,
105 license=LICENSE,
106 packages=PACKAGES,
107 package_data=PACKAGE_DATA,
108 install_requires=INSTALL_REQUIRES,
109 classifiers=[
110 'Development Status :: 4 - Beta',
111 'Environment :: Console',
112 'Intended Audience :: Science/Research',
113 'License :: OSI Approved :: BSD License',
114 'Natural Language :: English',
115 'Programming Language :: Python :: 2.7',
116 'Programming Language :: Python :: 3.4',
117 'Programming Language :: Python :: 3.5'],
118 )
119
[end of setup.py]
[start of altair/__init__.py]
1 __version__ = '1.3.0.dev0'
2
3 from .v1 import *
4
[end of altair/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/altair/__init__.py b/altair/__init__.py
--- a/altair/__init__.py
+++ b/altair/__init__.py
@@ -1,3 +1,3 @@
-__version__ = '1.3.0.dev0'
+__version__ = '1.2.1.dev0'
from .v1 import *
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,7 +59,7 @@
URL = 'http://altair-viz.github.io'
DOWNLOAD_URL = 'http://github.com/altair-viz/altair/'
LICENSE = 'BSD 3-clause'
-INSTALL_REQUIRES = ['traitlets>=4.3.1','ipython','pandas','vega>=0.4.4']
+INSTALL_REQUIRES = ['traitlets>=4.3.1','ipython','pandas','vega==0.4.4']
import io
| {"golden_diff": "diff --git a/altair/__init__.py b/altair/__init__.py\n--- a/altair/__init__.py\n+++ b/altair/__init__.py\n@@ -1,3 +1,3 @@\n-__version__ = '1.3.0.dev0'\n+__version__ = '1.2.1.dev0'\n \n from .v1 import *\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,7 +59,7 @@\n URL = 'http://altair-viz.github.io'\n DOWNLOAD_URL = 'http://github.com/altair-viz/altair/'\n LICENSE = 'BSD 3-clause'\n-INSTALL_REQUIRES = ['traitlets>=4.3.1','ipython','pandas','vega>=0.4.4']\n+INSTALL_REQUIRES = ['traitlets>=4.3.1','ipython','pandas','vega==0.4.4']\n \n \n import io\n", "issue": "Pin vega version in requirements\nTo make sure things still work when ipyvega is updated (as it already has been)\n", "before_files": [{"content": "LONG_DESCRIPTION = \"\"\"\nAltair: A declarative statistical visualization library for Python.\n\nhttp://altair-viz.github.io/\n\nThis package provides a Python API for building statistical visualizations\nin a declarative manner. This API contains no actual visualization rendering\ncode, but instead emits JSON data structures following the `Vega-Lite`_\nspecification. For convenience, Altair can optionally use `ipyvega`_ to\nseamlessly display client-side renderings in the Jupyter notebook.\n\n.. image:: https://raw.githubusercontent.com/altair-viz/altair/master/images/cars.png\n\nPlease note that if you wish to use altair in the Jupyter Notebook, the\n`ipyvega`_ notebook extension must be enabled as follows::\n\n $ pip install altair\n $ pip install --upgrade notebook\n $ jupyter nbextension install --sys-prefix --py vega\n\nSee the `Altair Documentation`_ for tutorials, detailed installation\ninstructions, and examples.\nSee the `Altair Github Repository`_ for issues, bug reports, and contributions.\n\n.. _Altair Github Repository: http://github.com/altair-viz/altair/\n.. _Altair Documentation: http://altair-viz.github.io/\n.. _Vega-Lite: https://github.com/vega/vega-lite\n.. _ipyvega: https://github.com/vega/ipyvega\n\"\"\"\n\nDESCRIPTION = \"Altair: A declarative statistical visualization library for Python.\"\nNAME = \"altair\"\nPACKAGES = ['altair',\n 'altair.v1',\n 'altair.v1.tests',\n 'altair.v1.schema',\n 'altair.v1.schema._interface',\n 'altair.v1.schema._interface.tests',\n 'altair.v1.examples',\n 'altair.v1.examples.tests',\n 'altair.datasets',\n 'altair.datasets.tests',\n 'altair.expr',\n 'altair.expr.tests',\n 'altair.tests',\n 'altair.utils',\n 'altair.utils.tests',\n ]\nPACKAGE_DATA = {'altair': ['notebooks/*.ipynb',\n 'notebooks/*.html',\n 'notebooks/auto_examples/*.ipynb',\n 'v1/schema/*.json',\n 'v1/examples/*.json',\n 'v1/examples/json/*.json',\n 'datasets/*.json',\n 'expr/*.json']}\nAUTHOR = \"Brian E. Granger / Jake VanderPlas\"\nAUTHOR_EMAIL = \"[email protected] / [email protected]\"\nURL = 'http://altair-viz.github.io'\nDOWNLOAD_URL = 'http://github.com/altair-viz/altair/'\nLICENSE = 'BSD 3-clause'\nINSTALL_REQUIRES = ['traitlets>=4.3.1','ipython','pandas','vega>=0.4.4']\n\n\nimport io\nimport os\nimport re\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n\ndef read(path, encoding='utf-8'):\n path = os.path.join(os.path.dirname(__file__), path)\n with io.open(path, encoding=encoding) as fp:\n return fp.read()\n\n\ndef version(path):\n \"\"\"Obtain the packge version from a python file e.g. pkg/__init__.py\n\n See <https://packaging.python.org/en/latest/single_source_version.html>.\n \"\"\"\n version_file = read(path)\n version_match = re.search(r\"\"\"^__version__ = ['\"]([^'\"]*)['\"]\"\"\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nVERSION = version('altair/__init__.py')\n\n\nsetup(name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n url=URL,\n download_url=DOWNLOAD_URL,\n license=LICENSE,\n packages=PACKAGES,\n package_data=PACKAGE_DATA,\n install_requires=INSTALL_REQUIRES,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5'],\n )\n", "path": "setup.py"}, {"content": "__version__ = '1.3.0.dev0'\n\nfrom .v1 import *\n", "path": "altair/__init__.py"}]} | 1,820 | 227 |
gh_patches_debug_14864 | rasdani/github-patches | git_diff | benoitc__gunicorn-1931 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Must explicitly define `setuptools` as a dependency
When running gunicorn in a hardened Python docker image (with most of the dependencies removed) `setuptools` might be missing.
For instance:
```
Traceback (most recent call last):
File "/app/manage-docker.binary.runfiles/__main__/server.py", line 1, in <module>
from gunicorn.app.base import BaseApplication
File "/app/manage-docker.binary.runfiles/pypi__gunicorn_19_7_1/gunicorn/app/base.py", line 12, in <module>
from gunicorn import util
File "/app/manage-docker.binary.runfiles/pypi__gunicorn_19_7_1/gunicorn/util.py", line 12, in <module>
import pkg_resources
ImportError: No module named pkg_resources
```
Can be fixed by defining `setuptools` as a direct dependency within the project' `requirements.txt` file, however, it could be fix at the gunicorn codebase level by using `install_requires = ['setuptools']` in setup.py.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 import os
7 import sys
8
9 from setuptools import setup, find_packages
10 from setuptools.command.test import test as TestCommand
11
12 from gunicorn import __version__
13
14
15 CLASSIFIERS = [
16 'Development Status :: 4 - Beta',
17 'Environment :: Other Environment',
18 'Intended Audience :: Developers',
19 'License :: OSI Approved :: MIT License',
20 'Operating System :: MacOS :: MacOS X',
21 'Operating System :: POSIX',
22 'Programming Language :: Python',
23 'Programming Language :: Python :: 3',
24 'Programming Language :: Python :: 3.4',
25 'Programming Language :: Python :: 3.5',
26 'Programming Language :: Python :: 3.6',
27 'Programming Language :: Python :: 3.7',
28 'Programming Language :: Python :: 3 :: Only',
29 'Topic :: Internet',
30 'Topic :: Utilities',
31 'Topic :: Software Development :: Libraries :: Python Modules',
32 'Topic :: Internet :: WWW/HTTP',
33 'Topic :: Internet :: WWW/HTTP :: WSGI',
34 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
35 'Topic :: Internet :: WWW/HTTP :: Dynamic Content']
36
37 # read long description
38 with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
39 long_description = f.read()
40
41 # read dev requirements
42 fname = os.path.join(os.path.dirname(__file__), 'requirements_test.txt')
43 with open(fname) as f:
44 tests_require = [l.strip() for l in f.readlines()]
45
46 class PyTestCommand(TestCommand):
47 user_options = [
48 ("cov", None, "measure coverage")
49 ]
50
51 def initialize_options(self):
52 TestCommand.initialize_options(self)
53 self.cov = None
54
55 def finalize_options(self):
56 TestCommand.finalize_options(self)
57 self.test_args = ['tests']
58 if self.cov:
59 self.test_args += ['--cov', 'gunicorn']
60 self.test_suite = True
61
62 def run_tests(self):
63 import pytest
64 errno = pytest.main(self.test_args)
65 sys.exit(errno)
66
67
68 extra_require = {
69 'gevent': ['gevent>=0.13'],
70 'eventlet': ['eventlet>=0.9.7'],
71 'tornado': ['tornado>=0.2'],
72 'gthread': [],
73 }
74
75 setup(
76 name='gunicorn',
77 version=__version__,
78
79 description='WSGI HTTP Server for UNIX',
80 long_description=long_description,
81 author='Benoit Chesneau',
82 author_email='[email protected]',
83 license='MIT',
84 url='http://gunicorn.org',
85
86 python_requires='>=3.4',
87 classifiers=CLASSIFIERS,
88 zip_safe=False,
89 packages=find_packages(exclude=['examples', 'tests']),
90 include_package_data=True,
91
92 tests_require=tests_require,
93 cmdclass={'test': PyTestCommand},
94
95 entry_points="""
96 [console_scripts]
97 gunicorn=gunicorn.app.wsgiapp:run
98 gunicorn_paster=gunicorn.app.pasterapp:run
99
100 [paste.server_runner]
101 main=gunicorn.app.pasterapp:paste_server
102 """,
103 extras_require=extra_require,
104 )
105
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -65,6 +65,14 @@
sys.exit(errno)
+install_requires = [
+ # We depend on functioning pkg_resources.working_set.add_entry() and
+ # pkg_resources.load_entry_point(). These both work as of 3.0 which
+ # is the first version to support Python 3.4 which we require as a
+ # floor.
+ 'setuptools>=3.0',
+]
+
extra_require = {
'gevent': ['gevent>=0.13'],
'eventlet': ['eventlet>=0.9.7'],
@@ -84,6 +92,7 @@
url='http://gunicorn.org',
python_requires='>=3.4',
+ install_requires=install_requires,
classifiers=CLASSIFIERS,
zip_safe=False,
packages=find_packages(exclude=['examples', 'tests']),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,6 +65,14 @@\n sys.exit(errno)\n \n \n+install_requires = [\n+ # We depend on functioning pkg_resources.working_set.add_entry() and\n+ # pkg_resources.load_entry_point(). These both work as of 3.0 which\n+ # is the first version to support Python 3.4 which we require as a\n+ # floor.\n+ 'setuptools>=3.0',\n+]\n+\n extra_require = {\n 'gevent': ['gevent>=0.13'],\n 'eventlet': ['eventlet>=0.9.7'],\n@@ -84,6 +92,7 @@\n url='http://gunicorn.org',\n \n python_requires='>=3.4',\n+ install_requires=install_requires,\n classifiers=CLASSIFIERS,\n zip_safe=False,\n packages=find_packages(exclude=['examples', 'tests']),\n", "issue": "Must explicitly define `setuptools` as a dependency\nWhen running gunicorn in a hardened Python docker image (with most of the dependencies removed) `setuptools` might be missing.\r\n\r\nFor instance:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/app/manage-docker.binary.runfiles/__main__/server.py\", line 1, in <module>\r\n from gunicorn.app.base import BaseApplication\r\n File \"/app/manage-docker.binary.runfiles/pypi__gunicorn_19_7_1/gunicorn/app/base.py\", line 12, in <module>\r\n from gunicorn import util\r\n File \"/app/manage-docker.binary.runfiles/pypi__gunicorn_19_7_1/gunicorn/util.py\", line 12, in <module>\r\n import pkg_resources\r\nImportError: No module named pkg_resources\r\n```\r\n\r\nCan be fixed by defining `setuptools` as a direct dependency within the project' `requirements.txt` file, however, it could be fix at the gunicorn codebase level by using `install_requires = ['setuptools']` in setup.py. \n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\nfrom gunicorn import __version__\n\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Internet',\n 'Topic :: Utilities',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content']\n\n# read long description\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:\n long_description = f.read()\n\n# read dev requirements\nfname = os.path.join(os.path.dirname(__file__), 'requirements_test.txt')\nwith open(fname) as f:\n tests_require = [l.strip() for l in f.readlines()]\n\nclass PyTestCommand(TestCommand):\n user_options = [\n (\"cov\", None, \"measure coverage\")\n ]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.cov = None\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['tests']\n if self.cov:\n self.test_args += ['--cov', 'gunicorn']\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\nextra_require = {\n 'gevent': ['gevent>=0.13'],\n 'eventlet': ['eventlet>=0.9.7'],\n 'tornado': ['tornado>=0.2'],\n 'gthread': [],\n}\n\nsetup(\n name='gunicorn',\n version=__version__,\n\n description='WSGI HTTP Server for UNIX',\n long_description=long_description,\n author='Benoit Chesneau',\n author_email='[email protected]',\n license='MIT',\n url='http://gunicorn.org',\n\n python_requires='>=3.4',\n classifiers=CLASSIFIERS,\n zip_safe=False,\n packages=find_packages(exclude=['examples', 'tests']),\n include_package_data=True,\n\n tests_require=tests_require,\n cmdclass={'test': PyTestCommand},\n\n entry_points=\"\"\"\n [console_scripts]\n gunicorn=gunicorn.app.wsgiapp:run\n gunicorn_paster=gunicorn.app.pasterapp:run\n\n [paste.server_runner]\n main=gunicorn.app.pasterapp:paste_server\n \"\"\",\n extras_require=extra_require,\n)\n", "path": "setup.py"}]} | 1,691 | 217 |
gh_patches_debug_4178 | rasdani/github-patches | git_diff | learningequality__kolibri-12049 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'On my own' device - Merging a user is not working
## Observed behavior
Observed while integration testing the [v0.16.1-beta1 ](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta1) release.
When I try to merge a user created through 'On my own' I am getting an "Invalid URL" error in the console. Note that creating a new account through the same flow is working correctly. This issue is caused by the changes made in https://github.com/learningequality/kolibri/pull/12028 and is not extant in [v0.16.1-beta0](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta0).
https://github.com/learningequality/kolibri/assets/79847249/30daa3ca-918c-4c15-901b-c74c08b96466
## Expected behavior
Fully functional 'Merge accounts' user flow.
## Steps to reproduce the issue
1. Install [v0.16.1-beta1 ](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta1).
2. Setup a full device as a server and another device by going through the 'On my own' setup flow.
3. Attempt to merge the user from the 'On my own' device' to the server facility.
## Logs
[logs.zip](https://github.com/learningequality/kolibri/files/14850735/logs.zip)
## Usage Details
[v0.16.1-beta1 ](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta1)
Windows 11, Ubuntu 22 - Chrome
</issue>
<code>
[start of kolibri/plugins/user_profile/viewsets.py]
1 import requests
2 from django.contrib.auth import login
3 from django.core.exceptions import ValidationError as DjangoValidationError
4 from rest_framework.exceptions import ValidationError
5 from rest_framework.response import Response
6 from rest_framework.views import APIView
7
8 from .utils import TokenGenerator
9 from kolibri.core.auth.models import FacilityUser
10 from kolibri.core.utils.urls import reverse_remote
11 from kolibri.utils.urls import validator
12
13
14 class OnMyOwnSetupViewset(APIView):
15 """
16 Viewset to determine if the facility has been setup as an "On my own setup" facility.
17 """
18
19 def get(self, request, format=None):
20 if request.user.is_anonymous:
21 self.permission_denied(request)
22 user_facility = self.request.user.facility
23 return Response(
24 {
25 "on_my_own_setup": user_facility.on_my_own_setup,
26 }
27 )
28
29
30 class RemoteFacilityUserViewset(APIView):
31 def get(self, request):
32 baseurl = request.query_params.get("baseurl", "")
33 try:
34 validator(baseurl)
35 except DjangoValidationError as e:
36 raise ValidationError(detail=str(e))
37 username = request.query_params.get("username", None)
38 facility = request.query_params.get("facility", None)
39 if username is None or facility is None:
40 raise ValidationError(detail="Both username and facility are required")
41 url = reverse_remote(baseurl, "kolibri:core:publicsearchuser-list")
42 try:
43 response = requests.get(
44 url, params={"facility": facility, "search": username}
45 )
46 if response.status_code == 200:
47 return Response(response.json())
48 else:
49 return Response({})
50 except Exception as e:
51 raise ValidationError(detail=str(e))
52
53
54 class RemoteFacilityUserAuthenticatedViewset(APIView):
55 def post(self, request, *args, **kwargs):
56 baseurl = request.query_params.get("baseurl", "")
57 try:
58 validator(baseurl)
59 except DjangoValidationError as e:
60 raise ValidationError(detail=str(e))
61 username = request.data.get("username", None)
62 facility = request.data.get("facility", None)
63 password = request.data.get("password", None)
64 if username is None or facility is None:
65 raise ValidationError(detail="Both username and facility are required")
66 url = reverse_remote(baseurl, "kolibri:core:publicuser-list")
67 params = {"facility": facility, "search": username}
68
69 # adding facility so auth works when learners can login without password:
70 username = "username={}&facility={}".format(username, facility)
71
72 auth = requests.auth.HTTPBasicAuth(username, password)
73 try:
74 response = requests.get(url, params=params, verify=False, auth=auth)
75 if response.status_code == 200:
76 return Response(response.json())
77 else:
78 return Response({"error": response.json()["detail"]})
79 except Exception as e:
80 raise ValidationError(detail=str(e))
81
82
83 class LoginMergedUserViewset(APIView):
84 """
85 Viewset to login into kolibri using the merged user,
86 after the old user has been deleted
87 """
88
89 def post(self, request):
90 pk = request.data.get("pk", None)
91 token = request.data.get("token", None)
92 new_user = FacilityUser.objects.get(pk=pk)
93 if not TokenGenerator().check_token(new_user, token):
94 return Response({"error": "Unauthorized"}, status=401)
95 login(request, new_user)
96 return Response({"success": True})
97
[end of kolibri/plugins/user_profile/viewsets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/plugins/user_profile/viewsets.py b/kolibri/plugins/user_profile/viewsets.py
--- a/kolibri/plugins/user_profile/viewsets.py
+++ b/kolibri/plugins/user_profile/viewsets.py
@@ -53,7 +53,7 @@
class RemoteFacilityUserAuthenticatedViewset(APIView):
def post(self, request, *args, **kwargs):
- baseurl = request.query_params.get("baseurl", "")
+ baseurl = request.data.get("baseurl", "")
try:
validator(baseurl)
except DjangoValidationError as e:
| {"golden_diff": "diff --git a/kolibri/plugins/user_profile/viewsets.py b/kolibri/plugins/user_profile/viewsets.py\n--- a/kolibri/plugins/user_profile/viewsets.py\n+++ b/kolibri/plugins/user_profile/viewsets.py\n@@ -53,7 +53,7 @@\n \n class RemoteFacilityUserAuthenticatedViewset(APIView):\n def post(self, request, *args, **kwargs):\n- baseurl = request.query_params.get(\"baseurl\", \"\")\n+ baseurl = request.data.get(\"baseurl\", \"\")\n try:\n validator(baseurl)\n except DjangoValidationError as e:\n", "issue": "'On my own' device - Merging a user is not working\n## Observed behavior\r\nObserved while integration testing the [v0.16.1-beta1 ](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta1) release.\r\nWhen I try to merge a user created through 'On my own' I am getting an \"Invalid URL\" error in the console. Note that creating a new account through the same flow is working correctly. This issue is caused by the changes made in https://github.com/learningequality/kolibri/pull/12028 and is not extant in [v0.16.1-beta0](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta0).\r\n\r\nhttps://github.com/learningequality/kolibri/assets/79847249/30daa3ca-918c-4c15-901b-c74c08b96466\r\n\r\n## Expected behavior\r\n\r\nFully functional 'Merge accounts' user flow. \r\n\r\n## Steps to reproduce the issue\r\n\r\n1. Install [v0.16.1-beta1 ](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta1).\r\n2. Setup a full device as a server and another device by going through the 'On my own' setup flow.\r\n3. Attempt to merge the user from the 'On my own' device' to the server facility.\r\n\r\n## Logs\r\n\r\n[logs.zip](https://github.com/learningequality/kolibri/files/14850735/logs.zip)\r\n\r\n## Usage Details\r\n[v0.16.1-beta1 ](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta1)\r\nWindows 11, Ubuntu 22 - Chrome\n", "before_files": [{"content": "import requests\nfrom django.contrib.auth import login\nfrom django.core.exceptions import ValidationError as DjangoValidationError\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom .utils import TokenGenerator\nfrom kolibri.core.auth.models import FacilityUser\nfrom kolibri.core.utils.urls import reverse_remote\nfrom kolibri.utils.urls import validator\n\n\nclass OnMyOwnSetupViewset(APIView):\n \"\"\"\n Viewset to determine if the facility has been setup as an \"On my own setup\" facility.\n \"\"\"\n\n def get(self, request, format=None):\n if request.user.is_anonymous:\n self.permission_denied(request)\n user_facility = self.request.user.facility\n return Response(\n {\n \"on_my_own_setup\": user_facility.on_my_own_setup,\n }\n )\n\n\nclass RemoteFacilityUserViewset(APIView):\n def get(self, request):\n baseurl = request.query_params.get(\"baseurl\", \"\")\n try:\n validator(baseurl)\n except DjangoValidationError as e:\n raise ValidationError(detail=str(e))\n username = request.query_params.get(\"username\", None)\n facility = request.query_params.get(\"facility\", None)\n if username is None or facility is None:\n raise ValidationError(detail=\"Both username and facility are required\")\n url = reverse_remote(baseurl, \"kolibri:core:publicsearchuser-list\")\n try:\n response = requests.get(\n url, params={\"facility\": facility, \"search\": username}\n )\n if response.status_code == 200:\n return Response(response.json())\n else:\n return Response({})\n except Exception as e:\n raise ValidationError(detail=str(e))\n\n\nclass RemoteFacilityUserAuthenticatedViewset(APIView):\n def post(self, request, *args, **kwargs):\n baseurl = request.query_params.get(\"baseurl\", \"\")\n try:\n validator(baseurl)\n except DjangoValidationError as e:\n raise ValidationError(detail=str(e))\n username = request.data.get(\"username\", None)\n facility = request.data.get(\"facility\", None)\n password = request.data.get(\"password\", None)\n if username is None or facility is None:\n raise ValidationError(detail=\"Both username and facility are required\")\n url = reverse_remote(baseurl, \"kolibri:core:publicuser-list\")\n params = {\"facility\": facility, \"search\": username}\n\n # adding facility so auth works when learners can login without password:\n username = \"username={}&facility={}\".format(username, facility)\n\n auth = requests.auth.HTTPBasicAuth(username, password)\n try:\n response = requests.get(url, params=params, verify=False, auth=auth)\n if response.status_code == 200:\n return Response(response.json())\n else:\n return Response({\"error\": response.json()[\"detail\"]})\n except Exception as e:\n raise ValidationError(detail=str(e))\n\n\nclass LoginMergedUserViewset(APIView):\n \"\"\"\n Viewset to login into kolibri using the merged user,\n after the old user has been deleted\n \"\"\"\n\n def post(self, request):\n pk = request.data.get(\"pk\", None)\n token = request.data.get(\"token\", None)\n new_user = FacilityUser.objects.get(pk=pk)\n if not TokenGenerator().check_token(new_user, token):\n return Response({\"error\": \"Unauthorized\"}, status=401)\n login(request, new_user)\n return Response({\"success\": True})\n", "path": "kolibri/plugins/user_profile/viewsets.py"}]} | 1,870 | 128 |
gh_patches_debug_1219 | rasdani/github-patches | git_diff | pulp__pulpcore-4641 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pulp_file version is set to 3.40.0.dev
**Version**
pulpcore 3.40.0
**Describe the bug**
Status API reports pulp_file version as 3.40.0.dev
</issue>
<code>
[start of pulp_file/app/__init__.py]
1 from pulpcore.plugin import PulpPluginAppConfig
2
3
4 class PulpFilePluginAppConfig(PulpPluginAppConfig):
5 """
6 Entry point for pulp_file plugin.
7 """
8
9 name = "pulp_file.app"
10 label = "file"
11 version = "3.40.0.dev"
12 python_package_name = "pulp_file" # TODO Add python_module_name
13 domain_compatible = True
14
[end of pulp_file/app/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulp_file/app/__init__.py b/pulp_file/app/__init__.py
--- a/pulp_file/app/__init__.py
+++ b/pulp_file/app/__init__.py
@@ -8,6 +8,6 @@
name = "pulp_file.app"
label = "file"
- version = "3.40.0.dev"
+ version = "3.41.0.dev"
python_package_name = "pulp_file" # TODO Add python_module_name
domain_compatible = True
| {"golden_diff": "diff --git a/pulp_file/app/__init__.py b/pulp_file/app/__init__.py\n--- a/pulp_file/app/__init__.py\n+++ b/pulp_file/app/__init__.py\n@@ -8,6 +8,6 @@\n \n name = \"pulp_file.app\"\n label = \"file\"\n- version = \"3.40.0.dev\"\n+ version = \"3.41.0.dev\"\n python_package_name = \"pulp_file\" # TODO Add python_module_name\n domain_compatible = True\n", "issue": "pulp_file version is set to 3.40.0.dev \n**Version**\r\npulpcore 3.40.0\r\n\r\n**Describe the bug**\r\nStatus API reports pulp_file version as 3.40.0.dev\n", "before_files": [{"content": "from pulpcore.plugin import PulpPluginAppConfig\n\n\nclass PulpFilePluginAppConfig(PulpPluginAppConfig):\n \"\"\"\n Entry point for pulp_file plugin.\n \"\"\"\n\n name = \"pulp_file.app\"\n label = \"file\"\n version = \"3.40.0.dev\"\n python_package_name = \"pulp_file\" # TODO Add python_module_name\n domain_compatible = True\n", "path": "pulp_file/app/__init__.py"}]} | 701 | 122 |
gh_patches_debug_3577 | rasdani/github-patches | git_diff | python__mypy-2596 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`Tuple[()]` is occasionally converted to `Tuple[Any, ...]`
Most obvious when the `Tuple[()]` is passed through a Callable
```
from typing import *
Type = Callable[[Tuple[()]], int]
x = "foo" # type: Type
```
Results in:
```
Incompatible types in assignment (expression has type "str", variable has type Callable[[Tuple[Any, ...]], int])
```
As a side note,
```Type = Tuple[()]```
Also appears to give a weird error.
</issue>
<code>
[start of mypy/exprtotype.py]
1 """Translate an Expression to a Type value."""
2
3 from mypy.nodes import (
4 Expression, NameExpr, MemberExpr, IndexExpr, TupleExpr,
5 ListExpr, StrExpr, BytesExpr, UnicodeExpr, EllipsisExpr
6 )
7 from mypy.parsetype import parse_str_as_type, TypeParseError
8 from mypy.types import Type, UnboundType, TypeList, EllipsisType
9
10
11 class TypeTranslationError(Exception):
12 """Exception raised when an expression is not valid as a type."""
13
14
15 def expr_to_unanalyzed_type(expr: Expression) -> Type:
16 """Translate an expression to the corresponding type.
17
18 The result is not semantically analyzed. It can be UnboundType or TypeList.
19 Raise TypeTranslationError if the expression cannot represent a type.
20 """
21 if isinstance(expr, NameExpr):
22 name = expr.name
23 return UnboundType(name, line=expr.line, column=expr.column)
24 elif isinstance(expr, MemberExpr):
25 fullname = get_member_expr_fullname(expr)
26 if fullname:
27 return UnboundType(fullname, line=expr.line, column=expr.column)
28 else:
29 raise TypeTranslationError()
30 elif isinstance(expr, IndexExpr):
31 base = expr_to_unanalyzed_type(expr.base)
32 if isinstance(base, UnboundType):
33 if base.args:
34 raise TypeTranslationError()
35 if isinstance(expr.index, TupleExpr):
36 args = expr.index.items
37 else:
38 args = [expr.index]
39 base.args = [expr_to_unanalyzed_type(arg) for arg in args]
40 return base
41 else:
42 raise TypeTranslationError()
43 elif isinstance(expr, ListExpr):
44 return TypeList([expr_to_unanalyzed_type(t) for t in expr.items],
45 line=expr.line, column=expr.column)
46 elif isinstance(expr, (StrExpr, BytesExpr, UnicodeExpr)):
47 # Parse string literal type.
48 try:
49 result = parse_str_as_type(expr.value, expr.line)
50 except TypeParseError:
51 raise TypeTranslationError()
52 return result
53 elif isinstance(expr, EllipsisExpr):
54 return EllipsisType(expr.line)
55 else:
56 raise TypeTranslationError()
57
58
59 def get_member_expr_fullname(expr: MemberExpr) -> str:
60 """Return the qualified name representation of a member expression.
61
62 Return a string of form foo.bar, foo.bar.baz, or similar, or None if the
63 argument cannot be represented in this form.
64 """
65 if isinstance(expr.expr, NameExpr):
66 initial = expr.expr.name
67 elif isinstance(expr.expr, MemberExpr):
68 initial = get_member_expr_fullname(expr.expr)
69 else:
70 return None
71 return '{}.{}'.format(initial, expr.name)
72
[end of mypy/exprtotype.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mypy/exprtotype.py b/mypy/exprtotype.py
--- a/mypy/exprtotype.py
+++ b/mypy/exprtotype.py
@@ -37,6 +37,8 @@
else:
args = [expr.index]
base.args = [expr_to_unanalyzed_type(arg) for arg in args]
+ if not base.args:
+ base.empty_tuple_index = True
return base
else:
raise TypeTranslationError()
| {"golden_diff": "diff --git a/mypy/exprtotype.py b/mypy/exprtotype.py\n--- a/mypy/exprtotype.py\n+++ b/mypy/exprtotype.py\n@@ -37,6 +37,8 @@\n else:\n args = [expr.index]\n base.args = [expr_to_unanalyzed_type(arg) for arg in args]\n+ if not base.args:\n+ base.empty_tuple_index = True\n return base\n else:\n raise TypeTranslationError()\n", "issue": "`Tuple[()]` is occasionally converted to `Tuple[Any, ...]`\nMost obvious when the `Tuple[()]` is passed through a Callable\r\n```\r\nfrom typing import *\r\n\r\nType = Callable[[Tuple[()]], int]\r\nx = \"foo\" # type: Type\r\n```\r\nResults in:\r\n```\r\nIncompatible types in assignment (expression has type \"str\", variable has type Callable[[Tuple[Any, ...]], int])\r\n```\r\n\r\nAs a side note,\r\n```Type = Tuple[()]```\r\nAlso appears to give a weird error.\n", "before_files": [{"content": "\"\"\"Translate an Expression to a Type value.\"\"\"\n\nfrom mypy.nodes import (\n Expression, NameExpr, MemberExpr, IndexExpr, TupleExpr,\n ListExpr, StrExpr, BytesExpr, UnicodeExpr, EllipsisExpr\n)\nfrom mypy.parsetype import parse_str_as_type, TypeParseError\nfrom mypy.types import Type, UnboundType, TypeList, EllipsisType\n\n\nclass TypeTranslationError(Exception):\n \"\"\"Exception raised when an expression is not valid as a type.\"\"\"\n\n\ndef expr_to_unanalyzed_type(expr: Expression) -> Type:\n \"\"\"Translate an expression to the corresponding type.\n\n The result is not semantically analyzed. It can be UnboundType or TypeList.\n Raise TypeTranslationError if the expression cannot represent a type.\n \"\"\"\n if isinstance(expr, NameExpr):\n name = expr.name\n return UnboundType(name, line=expr.line, column=expr.column)\n elif isinstance(expr, MemberExpr):\n fullname = get_member_expr_fullname(expr)\n if fullname:\n return UnboundType(fullname, line=expr.line, column=expr.column)\n else:\n raise TypeTranslationError()\n elif isinstance(expr, IndexExpr):\n base = expr_to_unanalyzed_type(expr.base)\n if isinstance(base, UnboundType):\n if base.args:\n raise TypeTranslationError()\n if isinstance(expr.index, TupleExpr):\n args = expr.index.items\n else:\n args = [expr.index]\n base.args = [expr_to_unanalyzed_type(arg) for arg in args]\n return base\n else:\n raise TypeTranslationError()\n elif isinstance(expr, ListExpr):\n return TypeList([expr_to_unanalyzed_type(t) for t in expr.items],\n line=expr.line, column=expr.column)\n elif isinstance(expr, (StrExpr, BytesExpr, UnicodeExpr)):\n # Parse string literal type.\n try:\n result = parse_str_as_type(expr.value, expr.line)\n except TypeParseError:\n raise TypeTranslationError()\n return result\n elif isinstance(expr, EllipsisExpr):\n return EllipsisType(expr.line)\n else:\n raise TypeTranslationError()\n\n\ndef get_member_expr_fullname(expr: MemberExpr) -> str:\n \"\"\"Return the qualified name representation of a member expression.\n\n Return a string of form foo.bar, foo.bar.baz, or similar, or None if the\n argument cannot be represented in this form.\n \"\"\"\n if isinstance(expr.expr, NameExpr):\n initial = expr.expr.name\n elif isinstance(expr.expr, MemberExpr):\n initial = get_member_expr_fullname(expr.expr)\n else:\n return None\n return '{}.{}'.format(initial, expr.name)\n", "path": "mypy/exprtotype.py"}]} | 1,364 | 110 |
gh_patches_debug_56668 | rasdani/github-patches | git_diff | magenta__magenta-841 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
execfile() was removed from Python 3
https://github.com/tensorflow/magenta/blob/master/magenta/tools/pip/setup.py#L23
</issue>
<code>
[start of magenta/tools/pip/setup.py]
1 # Copyright 2016 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """A setuptools based setup module for magenta."""
15
16 from setuptools import find_packages
17 from setuptools import setup
18
19 # Bit of a hack to parse the version string stored in version.py without
20 # executing __init__.py, which will end up requiring a bunch of dependencies to
21 # execute (e.g., tensorflow, pretty_midi, etc.).
22 # Makes the __version__ variable available.
23 execfile('magenta/version.py')
24
25
26 REQUIRED_PACKAGES = [
27 'IPython',
28 'Pillow >= 3.4.2',
29 'bokeh >= 0.12.0',
30 'futures',
31 'intervaltree >= 2.1.0',
32 'matplotlib >= 1.5.3',
33 'mido == 1.2.6',
34 'numpy >= 1.11.0',
35 'pandas >= 0.18.1',
36 'pretty_midi >= 0.2.6',
37 'python-rtmidi',
38 'scipy >= 0.18.1',
39 'tensorflow >= 1.1.0',
40 'wheel',
41 ]
42
43 CONSOLE_SCRIPTS = [
44 'magenta.interfaces.midi.magenta_midi',
45 'magenta.interfaces.midi.midi_clock',
46 'magenta.models.drums_rnn.drums_rnn_create_dataset',
47 'magenta.models.drums_rnn.drums_rnn_generate',
48 'magenta.models.drums_rnn.drums_rnn_train',
49 'magenta.models.image_stylization.image_stylization_create_dataset',
50 'magenta.models.image_stylization.image_stylization_evaluate',
51 'magenta.models.image_stylization.image_stylization_finetune',
52 'magenta.models.image_stylization.image_stylization_train',
53 'magenta.models.image_stylization.image_stylization_transform',
54 'magenta.models.improv_rnn.improv_rnn_create_dataset',
55 'magenta.models.improv_rnn.improv_rnn_generate',
56 'magenta.models.improv_rnn.improv_rnn_train',
57 'magenta.models.melody_rnn.melody_rnn_create_dataset',
58 'magenta.models.melody_rnn.melody_rnn_generate',
59 'magenta.models.melody_rnn.melody_rnn_train',
60 'magenta.models.nsynth.wavenet.nsynth_generate',
61 'magenta.models.nsynth.wavenet.nsynth_save_embeddings',
62 'magenta.models.performance_rnn.performance_rnn_create_dataset',
63 'magenta.models.performance_rnn.performance_rnn_generate',
64 'magenta.models.performance_rnn.performance_rnn_train',
65 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_create_dataset',
66 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_generate',
67 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_train',
68 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',
69 'magenta.models.polyphony_rnn.polyphony_rnn_generate',
70 'magenta.models.polyphony_rnn.polyphony_rnn_train',
71 'magenta.models.rl_tuner.rl_tuner_train',
72 'magenta.models.sketch_rnn.sketch_rnn_train',
73 'magenta.scripts.convert_dir_to_note_sequences',
74 ]
75
76 setup(
77 name='magenta',
78 version=__version__, # pylint: disable=undefined-variable
79 description='Use machine learning to create art and music',
80 long_description='',
81 url='https://magenta.tensorflow.org/',
82 author='Google Inc.',
83 author_email='[email protected]',
84 license='Apache 2',
85 # PyPI package information.
86 classifiers=[
87 'Development Status :: 4 - Beta',
88 'Intended Audience :: Developers',
89 'Intended Audience :: Education',
90 'Intended Audience :: Science/Research',
91 'License :: OSI Approved :: Apache Software License',
92 'Programming Language :: Python :: 2.7',
93 'Programming Language :: Python :: 3',
94 'Topic :: Scientific/Engineering :: Mathematics',
95 'Topic :: Software Development :: Libraries :: Python Modules',
96 'Topic :: Software Development :: Libraries',
97 ],
98 keywords='tensorflow machine learning magenta music art',
99
100 packages=find_packages(),
101 install_requires=REQUIRED_PACKAGES,
102 entry_points={
103 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in
104 ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],
105 },
106
107 include_package_data=True,
108 package_data={
109 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],
110 },
111 )
112
[end of magenta/tools/pip/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/magenta/tools/pip/setup.py b/magenta/tools/pip/setup.py
--- a/magenta/tools/pip/setup.py
+++ b/magenta/tools/pip/setup.py
@@ -20,7 +20,8 @@
# executing __init__.py, which will end up requiring a bunch of dependencies to
# execute (e.g., tensorflow, pretty_midi, etc.).
# Makes the __version__ variable available.
-execfile('magenta/version.py')
+with open('magenta/version.py') as in_file:
+ exec(in_file.read())
REQUIRED_PACKAGES = [
| {"golden_diff": "diff --git a/magenta/tools/pip/setup.py b/magenta/tools/pip/setup.py\n--- a/magenta/tools/pip/setup.py\n+++ b/magenta/tools/pip/setup.py\n@@ -20,7 +20,8 @@\n # executing __init__.py, which will end up requiring a bunch of dependencies to\n # execute (e.g., tensorflow, pretty_midi, etc.).\n # Makes the __version__ variable available.\n-execfile('magenta/version.py')\n+with open('magenta/version.py') as in_file:\n+ exec(in_file.read())\n \n \n REQUIRED_PACKAGES = [\n", "issue": "execfile() was removed from Python 3\nhttps://github.com/tensorflow/magenta/blob/master/magenta/tools/pip/setup.py#L23\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A setuptools based setup module for magenta.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Bit of a hack to parse the version string stored in version.py without\n# executing __init__.py, which will end up requiring a bunch of dependencies to\n# execute (e.g., tensorflow, pretty_midi, etc.).\n# Makes the __version__ variable available.\nexecfile('magenta/version.py')\n\n\nREQUIRED_PACKAGES = [\n 'IPython',\n 'Pillow >= 3.4.2',\n 'bokeh >= 0.12.0',\n 'futures',\n 'intervaltree >= 2.1.0',\n 'matplotlib >= 1.5.3',\n 'mido == 1.2.6',\n 'numpy >= 1.11.0',\n 'pandas >= 0.18.1',\n 'pretty_midi >= 0.2.6',\n 'python-rtmidi',\n 'scipy >= 0.18.1',\n 'tensorflow >= 1.1.0',\n 'wheel',\n]\n\nCONSOLE_SCRIPTS = [\n 'magenta.interfaces.midi.magenta_midi',\n 'magenta.interfaces.midi.midi_clock',\n 'magenta.models.drums_rnn.drums_rnn_create_dataset',\n 'magenta.models.drums_rnn.drums_rnn_generate',\n 'magenta.models.drums_rnn.drums_rnn_train',\n 'magenta.models.image_stylization.image_stylization_create_dataset',\n 'magenta.models.image_stylization.image_stylization_evaluate',\n 'magenta.models.image_stylization.image_stylization_finetune',\n 'magenta.models.image_stylization.image_stylization_train',\n 'magenta.models.image_stylization.image_stylization_transform',\n 'magenta.models.improv_rnn.improv_rnn_create_dataset',\n 'magenta.models.improv_rnn.improv_rnn_generate',\n 'magenta.models.improv_rnn.improv_rnn_train',\n 'magenta.models.melody_rnn.melody_rnn_create_dataset',\n 'magenta.models.melody_rnn.melody_rnn_generate',\n 'magenta.models.melody_rnn.melody_rnn_train',\n 'magenta.models.nsynth.wavenet.nsynth_generate',\n 'magenta.models.nsynth.wavenet.nsynth_save_embeddings',\n 'magenta.models.performance_rnn.performance_rnn_create_dataset',\n 'magenta.models.performance_rnn.performance_rnn_generate',\n 'magenta.models.performance_rnn.performance_rnn_train',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_create_dataset',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_generate',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_train',\n 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',\n 'magenta.models.polyphony_rnn.polyphony_rnn_generate',\n 'magenta.models.polyphony_rnn.polyphony_rnn_train',\n 'magenta.models.rl_tuner.rl_tuner_train',\n 'magenta.models.sketch_rnn.sketch_rnn_train',\n 'magenta.scripts.convert_dir_to_note_sequences',\n]\n\nsetup(\n name='magenta',\n version=__version__, # pylint: disable=undefined-variable\n description='Use machine learning to create art and music',\n long_description='',\n url='https://magenta.tensorflow.org/',\n author='Google Inc.',\n author_email='[email protected]',\n license='Apache 2',\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='tensorflow machine learning magenta music art',\n\n packages=find_packages(),\n install_requires=REQUIRED_PACKAGES,\n entry_points={\n 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in\n ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],\n },\n\n include_package_data=True,\n package_data={\n 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],\n },\n)\n", "path": "magenta/tools/pip/setup.py"}]} | 1,914 | 129 |
gh_patches_debug_18735 | rasdani/github-patches | git_diff | openfun__marsha-1060 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create XAPI statements for live video
## Feature Request
**Is your feature request related to a problem or unsupported use case? Please describe.**
When a video is a live all the existing XAPI statement are sent like a regular videos. Some events should not be sent and some data can't be computed
**Describe the solution you'd like**
Change the activity-type to `http://id.tincanapi.com/activitytype/webinar`
Send statement for those events :
- initialized
- play
- pause
- interacted
Also, do not send video length info, we can't have it. The completion threshold can not be computed too.
</issue>
<code>
[start of src/backend/marsha/core/xapi.py]
1 """XAPI module."""
2 import re
3 import uuid
4
5 from django.conf import settings
6 from django.utils import timezone
7 from django.utils.translation import to_locale
8
9 import requests
10
11
12 class XAPIStatement:
13 """Object to work on a XAPI Statement."""
14
15 statement = None
16
17 def __init__(self, video, statement, lti_user):
18 """Compute a valid xapi satement.
19
20 Parameters
21 ----------
22 video : Type[.models/videos]
23 The video object used in the xAPI statement
24
25 statement : dictionary
26 Statement containing base information to send to the LRS
27 An example of expected statement:
28 {
29 "verb": {
30 "id": "http://adlnet.gov/expapi/verbs/initialized",
31 "display": {
32 "en-US": "initialized"
33 }
34 },
35 "context": {
36 "extensions": {
37 "https://w3id.org/xapi/video/extensions/volume": 1,
38 "https://w3id.org/xapi/video/extensions/video-playback-size": "640x264",
39 }
40 }
41 }
42
43 lti_user : Type[lti.LTIUser]
44 Object representing data stored in the JWT Token and related to the user authenticated
45 with LTI
46
47 """
48 try:
49 user_id = lti_user.user.get("id")
50 except AttributeError:
51 user_id = lti_user.session_id
52
53 homepage = video.playlist.consumer_site.domain
54
55 if re.match(r"^http(s?):\/\/.*", homepage) is None:
56 homepage = f"http://{homepage}"
57
58 if "id" not in statement:
59 statement["id"] = str(uuid.uuid4())
60
61 statement["timestamp"] = timezone.now().isoformat()
62 statement["context"].update(
63 {"contextActivities": {"category": [{"id": "https://w3id.org/xapi/video"}]}}
64 )
65
66 statement["actor"] = {
67 "objectType": "Agent",
68 "account": {"name": user_id, "homePage": homepage},
69 }
70
71 statement["object"] = {
72 "definition": {
73 "type": "https://w3id.org/xapi/video/activity-type/video",
74 "name": {
75 to_locale(settings.LANGUAGE_CODE).replace("_", "-"): video.title
76 },
77 },
78 "id": "uuid://{id}".format(id=str(video.id)),
79 "objectType": "Activity",
80 }
81
82 object_extensions = {}
83 if lti_user.course.get("school_name") is not None:
84 object_extensions[
85 "https://w3id.org/xapi/acrossx/extensions/school"
86 ] = lti_user.course["school_name"]
87
88 if lti_user.course.get("course_name") is not None:
89 object_extensions[
90 "http://adlnet.gov/expapi/activities/course"
91 ] = lti_user.course["course_name"]
92
93 if lti_user.course.get("course_run") is not None:
94 object_extensions[
95 "http://adlnet.gov/expapi/activities/module"
96 ] = lti_user.course["course_run"]
97
98 if object_extensions:
99 statement["object"]["definition"]["extensions"] = object_extensions
100
101 self.statement = statement
102
103 def get_statement(self):
104 """Return the enriched statement."""
105 return self.statement
106
107
108 class XAPI:
109 """The XAPI object compute statements and send them to a LRS."""
110
111 def __init__(self, url, auth_token, xapi_version="1.0.3"):
112 """Initialize the XAPI module.
113
114 Parameters
115 ----------
116 url: string
117 The LRS endpoint to fetch
118
119 auth_token: string
120 The basic_auth token used to authenticate on the LRS
121
122 xapi_version: string
123 The xAPI version used.
124
125 """
126 self.url = url
127 self.auth_token = auth_token
128 self.xapi_version = xapi_version
129
130 def send(self, xapi_statement):
131 """Send the statement to a LRS.
132
133 Parameters
134 ----------
135 statement : Type[.XAPIStatement]
136
137 """
138 headers = {
139 "Authorization": self.auth_token,
140 "Content-Type": "application/json",
141 "X-Experience-API-Version": self.xapi_version,
142 }
143
144 response = requests.post(
145 self.url, json=xapi_statement.get_statement(), headers=headers
146 )
147
148 response.raise_for_status()
149
[end of src/backend/marsha/core/xapi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/backend/marsha/core/xapi.py b/src/backend/marsha/core/xapi.py
--- a/src/backend/marsha/core/xapi.py
+++ b/src/backend/marsha/core/xapi.py
@@ -52,6 +52,12 @@
homepage = video.playlist.consumer_site.domain
+ activity_type = "https://w3id.org/xapi/video/activity-type/video"
+
+ # When the video is a live we change the activity to webinar
+ if video.live_state is not None:
+ activity_type = "http://id.tincanapi.com/activitytype/webinar"
+
if re.match(r"^http(s?):\/\/.*", homepage) is None:
homepage = f"http://{homepage}"
@@ -70,7 +76,7 @@
statement["object"] = {
"definition": {
- "type": "https://w3id.org/xapi/video/activity-type/video",
+ "type": activity_type,
"name": {
to_locale(settings.LANGUAGE_CODE).replace("_", "-"): video.title
},
| {"golden_diff": "diff --git a/src/backend/marsha/core/xapi.py b/src/backend/marsha/core/xapi.py\n--- a/src/backend/marsha/core/xapi.py\n+++ b/src/backend/marsha/core/xapi.py\n@@ -52,6 +52,12 @@\n \n homepage = video.playlist.consumer_site.domain\n \n+ activity_type = \"https://w3id.org/xapi/video/activity-type/video\"\n+\n+ # When the video is a live we change the activity to webinar\n+ if video.live_state is not None:\n+ activity_type = \"http://id.tincanapi.com/activitytype/webinar\"\n+\n if re.match(r\"^http(s?):\\/\\/.*\", homepage) is None:\n homepage = f\"http://{homepage}\"\n \n@@ -70,7 +76,7 @@\n \n statement[\"object\"] = {\n \"definition\": {\n- \"type\": \"https://w3id.org/xapi/video/activity-type/video\",\n+ \"type\": activity_type,\n \"name\": {\n to_locale(settings.LANGUAGE_CODE).replace(\"_\", \"-\"): video.title\n },\n", "issue": "Create XAPI statements for live video\n## Feature Request\r\n\r\n**Is your feature request related to a problem or unsupported use case? Please describe.**\r\n\r\nWhen a video is a live all the existing XAPI statement are sent like a regular videos. Some events should not be sent and some data can't be computed\r\n\r\n**Describe the solution you'd like**\r\n\r\nChange the activity-type to `http://id.tincanapi.com/activitytype/webinar`\r\nSend statement for those events : \r\n- initialized\r\n- play\r\n- pause\r\n- interacted\r\n\r\nAlso, do not send video length info, we can't have it. The completion threshold can not be computed too.\r\n\n", "before_files": [{"content": "\"\"\"XAPI module.\"\"\"\nimport re\nimport uuid\n\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom django.utils.translation import to_locale\n\nimport requests\n\n\nclass XAPIStatement:\n \"\"\"Object to work on a XAPI Statement.\"\"\"\n\n statement = None\n\n def __init__(self, video, statement, lti_user):\n \"\"\"Compute a valid xapi satement.\n\n Parameters\n ----------\n video : Type[.models/videos]\n The video object used in the xAPI statement\n\n statement : dictionary\n Statement containing base information to send to the LRS\n An example of expected statement:\n {\n \"verb\": {\n \"id\": \"http://adlnet.gov/expapi/verbs/initialized\",\n \"display\": {\n \"en-US\": \"initialized\"\n }\n },\n \"context\": {\n \"extensions\": {\n \"https://w3id.org/xapi/video/extensions/volume\": 1,\n \"https://w3id.org/xapi/video/extensions/video-playback-size\": \"640x264\",\n }\n }\n }\n\n lti_user : Type[lti.LTIUser]\n Object representing data stored in the JWT Token and related to the user authenticated\n with LTI\n\n \"\"\"\n try:\n user_id = lti_user.user.get(\"id\")\n except AttributeError:\n user_id = lti_user.session_id\n\n homepage = video.playlist.consumer_site.domain\n\n if re.match(r\"^http(s?):\\/\\/.*\", homepage) is None:\n homepage = f\"http://{homepage}\"\n\n if \"id\" not in statement:\n statement[\"id\"] = str(uuid.uuid4())\n\n statement[\"timestamp\"] = timezone.now().isoformat()\n statement[\"context\"].update(\n {\"contextActivities\": {\"category\": [{\"id\": \"https://w3id.org/xapi/video\"}]}}\n )\n\n statement[\"actor\"] = {\n \"objectType\": \"Agent\",\n \"account\": {\"name\": user_id, \"homePage\": homepage},\n }\n\n statement[\"object\"] = {\n \"definition\": {\n \"type\": \"https://w3id.org/xapi/video/activity-type/video\",\n \"name\": {\n to_locale(settings.LANGUAGE_CODE).replace(\"_\", \"-\"): video.title\n },\n },\n \"id\": \"uuid://{id}\".format(id=str(video.id)),\n \"objectType\": \"Activity\",\n }\n\n object_extensions = {}\n if lti_user.course.get(\"school_name\") is not None:\n object_extensions[\n \"https://w3id.org/xapi/acrossx/extensions/school\"\n ] = lti_user.course[\"school_name\"]\n\n if lti_user.course.get(\"course_name\") is not None:\n object_extensions[\n \"http://adlnet.gov/expapi/activities/course\"\n ] = lti_user.course[\"course_name\"]\n\n if lti_user.course.get(\"course_run\") is not None:\n object_extensions[\n \"http://adlnet.gov/expapi/activities/module\"\n ] = lti_user.course[\"course_run\"]\n\n if object_extensions:\n statement[\"object\"][\"definition\"][\"extensions\"] = object_extensions\n\n self.statement = statement\n\n def get_statement(self):\n \"\"\"Return the enriched statement.\"\"\"\n return self.statement\n\n\nclass XAPI:\n \"\"\"The XAPI object compute statements and send them to a LRS.\"\"\"\n\n def __init__(self, url, auth_token, xapi_version=\"1.0.3\"):\n \"\"\"Initialize the XAPI module.\n\n Parameters\n ----------\n url: string\n The LRS endpoint to fetch\n\n auth_token: string\n The basic_auth token used to authenticate on the LRS\n\n xapi_version: string\n The xAPI version used.\n\n \"\"\"\n self.url = url\n self.auth_token = auth_token\n self.xapi_version = xapi_version\n\n def send(self, xapi_statement):\n \"\"\"Send the statement to a LRS.\n\n Parameters\n ----------\n statement : Type[.XAPIStatement]\n\n \"\"\"\n headers = {\n \"Authorization\": self.auth_token,\n \"Content-Type\": \"application/json\",\n \"X-Experience-API-Version\": self.xapi_version,\n }\n\n response = requests.post(\n self.url, json=xapi_statement.get_statement(), headers=headers\n )\n\n response.raise_for_status()\n", "path": "src/backend/marsha/core/xapi.py"}]} | 1,974 | 239 |
gh_patches_debug_34117 | rasdani/github-patches | git_diff | ESMCI__cime-1090 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
case.lt_archive
lt_archive script has several problems preventing functionality.
</issue>
<code>
[start of utils/python/CIME/case_lt_archive.py]
1 from CIME.XML.standard_module_setup import *
2 from CIME.utils import expect, does_file_have_string, append_status
3 from CIME.XML.lt_archive import LTArchive
4
5 import time
6
7 logger = logging.getLogger(__name__)
8
9 ###############################################################################
10 def case_lt_archive(case):
11 ###############################################################################
12 caseroot = case.get_value("CASEROOT")
13
14 # max number of threads needed by scripts
15 os.environ["maxthrds"] = 1
16
17 # document start
18 append_status("lt_archive starting",caseroot=caseroot,sfile="CaseStatus")
19
20 # determine status of run and short term archiving
21 runComplete = does_file_have_string(os.path.join(caseroot, "CaseStatus"),
22 "run SUCCESSFUL")
23 staComplete = does_file_have_string(os.path.join(caseroot, "stArchiveStatus"),
24 "st_archive_complete")
25
26 # set up envrionment vars and call the lt_archive.sh script
27 if runComplete and staComplete:
28 os.environ["DOUT_S_ROOT"] = case.get_value("DOUT_S_ROOT")
29 os.environ["DOUT_L_MSROOT"] = case.get_value("DOUT_L_MSROOT")
30 os.environ["DOUT_L_HPSS_ACCNT"] = case.get_value("DOUT_L_HPSS_ACCNT")
31
32 lid = time.strftime("%y%m%d-%H%M%S")
33 lt_archive = LTArchive(case.get_value("MACH"))
34 lt_archive_args = lt_archive.get_lt_archive_args()
35 cmd = os.path.join(caseroot, "Tools/lt_archive.sh") \
36 + lt_archive_args + "ltArchiveStatus." + lid + " 2>&1"
37 run_cmd_no_fail(cmd, from_dir=caseroot)
38 else:
39 expect(False,
40 "lt_archive: run or st_archive is not yet complete or was not successful."
41 "Unable to perform long term archive...")
42
43 # document completion
44 append_status("lt_archive completed" ,caseroot=caseroot, sfile="CaseStatus")
45
46 return True
47
[end of utils/python/CIME/case_lt_archive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/python/CIME/case_lt_archive.py b/utils/python/CIME/case_lt_archive.py
--- a/utils/python/CIME/case_lt_archive.py
+++ b/utils/python/CIME/case_lt_archive.py
@@ -12,17 +12,16 @@
caseroot = case.get_value("CASEROOT")
# max number of threads needed by scripts
- os.environ["maxthrds"] = 1
+ os.environ["maxthrds"] = "1"
# document start
append_status("lt_archive starting",caseroot=caseroot,sfile="CaseStatus")
# determine status of run and short term archiving
runComplete = does_file_have_string(os.path.join(caseroot, "CaseStatus"),
- "run SUCCESSFUL")
- staComplete = does_file_have_string(os.path.join(caseroot, "stArchiveStatus"),
- "st_archive_complete")
-
+ "Run SUCCESSFUL")
+ staComplete = does_file_have_string(os.path.join(caseroot, "CaseStatus"),
+ "st_archiving completed")
# set up envrionment vars and call the lt_archive.sh script
if runComplete and staComplete:
os.environ["DOUT_S_ROOT"] = case.get_value("DOUT_S_ROOT")
@@ -32,10 +31,13 @@
lid = time.strftime("%y%m%d-%H%M%S")
lt_archive = LTArchive(case.get_value("MACH"))
lt_archive_args = lt_archive.get_lt_archive_args()
- cmd = os.path.join(caseroot, "Tools/lt_archive.sh") \
+ if lt_archive_args is None:
+ lt_archive_args = " "
+ cmd = os.path.join(caseroot, "Tools", "lt_archive.sh") \
+ lt_archive_args + "ltArchiveStatus." + lid + " 2>&1"
run_cmd_no_fail(cmd, from_dir=caseroot)
else:
+ logger.warn("runComplete %s staComplete %s"%(runComplete, staComplete))
expect(False,
"lt_archive: run or st_archive is not yet complete or was not successful."
"Unable to perform long term archive...")
| {"golden_diff": "diff --git a/utils/python/CIME/case_lt_archive.py b/utils/python/CIME/case_lt_archive.py\n--- a/utils/python/CIME/case_lt_archive.py\n+++ b/utils/python/CIME/case_lt_archive.py\n@@ -12,17 +12,16 @@\n caseroot = case.get_value(\"CASEROOT\")\n \n # max number of threads needed by scripts\n- os.environ[\"maxthrds\"] = 1\n+ os.environ[\"maxthrds\"] = \"1\"\n \n # document start\n append_status(\"lt_archive starting\",caseroot=caseroot,sfile=\"CaseStatus\")\n \n # determine status of run and short term archiving\n runComplete = does_file_have_string(os.path.join(caseroot, \"CaseStatus\"),\n- \"run SUCCESSFUL\")\n- staComplete = does_file_have_string(os.path.join(caseroot, \"stArchiveStatus\"),\n- \"st_archive_complete\")\n-\n+ \"Run SUCCESSFUL\")\n+ staComplete = does_file_have_string(os.path.join(caseroot, \"CaseStatus\"),\n+ \"st_archiving completed\")\n # set up envrionment vars and call the lt_archive.sh script\n if runComplete and staComplete:\n os.environ[\"DOUT_S_ROOT\"] = case.get_value(\"DOUT_S_ROOT\")\n@@ -32,10 +31,13 @@\n lid = time.strftime(\"%y%m%d-%H%M%S\")\n lt_archive = LTArchive(case.get_value(\"MACH\"))\n lt_archive_args = lt_archive.get_lt_archive_args()\n- cmd = os.path.join(caseroot, \"Tools/lt_archive.sh\") \\\n+ if lt_archive_args is None:\n+ lt_archive_args = \" \"\n+ cmd = os.path.join(caseroot, \"Tools\", \"lt_archive.sh\") \\\n + lt_archive_args + \"ltArchiveStatus.\" + lid + \" 2>&1\"\n run_cmd_no_fail(cmd, from_dir=caseroot)\n else:\n+ logger.warn(\"runComplete %s staComplete %s\"%(runComplete, staComplete))\n expect(False,\n \"lt_archive: run or st_archive is not yet complete or was not successful.\"\n \"Unable to perform long term archive...\")\n", "issue": "case.lt_archive\nlt_archive script has several problems preventing functionality. \n", "before_files": [{"content": "from CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect, does_file_have_string, append_status\nfrom CIME.XML.lt_archive import LTArchive\n\nimport time\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef case_lt_archive(case):\n###############################################################################\n caseroot = case.get_value(\"CASEROOT\")\n\n # max number of threads needed by scripts\n os.environ[\"maxthrds\"] = 1\n\n # document start\n append_status(\"lt_archive starting\",caseroot=caseroot,sfile=\"CaseStatus\")\n\n # determine status of run and short term archiving\n runComplete = does_file_have_string(os.path.join(caseroot, \"CaseStatus\"),\n \"run SUCCESSFUL\")\n staComplete = does_file_have_string(os.path.join(caseroot, \"stArchiveStatus\"),\n \"st_archive_complete\")\n\n # set up envrionment vars and call the lt_archive.sh script\n if runComplete and staComplete:\n os.environ[\"DOUT_S_ROOT\"] = case.get_value(\"DOUT_S_ROOT\")\n os.environ[\"DOUT_L_MSROOT\"] = case.get_value(\"DOUT_L_MSROOT\")\n os.environ[\"DOUT_L_HPSS_ACCNT\"] = case.get_value(\"DOUT_L_HPSS_ACCNT\")\n\n lid = time.strftime(\"%y%m%d-%H%M%S\")\n lt_archive = LTArchive(case.get_value(\"MACH\"))\n lt_archive_args = lt_archive.get_lt_archive_args()\n cmd = os.path.join(caseroot, \"Tools/lt_archive.sh\") \\\n + lt_archive_args + \"ltArchiveStatus.\" + lid + \" 2>&1\"\n run_cmd_no_fail(cmd, from_dir=caseroot)\n else:\n expect(False,\n \"lt_archive: run or st_archive is not yet complete or was not successful.\"\n \"Unable to perform long term archive...\")\n\n # document completion\n append_status(\"lt_archive completed\" ,caseroot=caseroot, sfile=\"CaseStatus\")\n\n return True\n", "path": "utils/python/CIME/case_lt_archive.py"}]} | 1,077 | 482 |
gh_patches_debug_37636 | rasdani/github-patches | git_diff | doccano__doccano-1222 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Enhancement request] Meaningful error on labels naming conflict
Feature description
---------
Try rename a label to an existing name.
You get a 500 error.
Desired: a meaningful error.
Related: #601, #826.
</issue>
<code>
[start of app/api/views/label.py]
1 import json
2
3 from django.db import IntegrityError, transaction
4 from django.shortcuts import get_object_or_404
5 from rest_framework import generics, status
6 from rest_framework.exceptions import ParseError
7 from rest_framework.parsers import MultiPartParser
8 from rest_framework.permissions import IsAuthenticated
9 from rest_framework.response import Response
10 from rest_framework.views import APIView
11
12 from ..models import Label, Project
13 from ..permissions import IsInProjectReadOnlyOrAdmin, IsProjectAdmin
14 from ..serializers import LabelSerializer
15
16
17 class LabelList(generics.ListCreateAPIView):
18 serializer_class = LabelSerializer
19 pagination_class = None
20 permission_classes = [IsAuthenticated & IsInProjectReadOnlyOrAdmin]
21
22 def get_queryset(self):
23 project = get_object_or_404(Project, pk=self.kwargs['project_id'])
24 return project.labels
25
26 def perform_create(self, serializer):
27 project = get_object_or_404(Project, pk=self.kwargs['project_id'])
28 serializer.save(project=project)
29
30
31 class LabelDetail(generics.RetrieveUpdateDestroyAPIView):
32 queryset = Label.objects.all()
33 serializer_class = LabelSerializer
34 lookup_url_kwarg = 'label_id'
35 permission_classes = [IsAuthenticated & IsInProjectReadOnlyOrAdmin]
36
37
38 class LabelUploadAPI(APIView):
39 parser_classes = (MultiPartParser,)
40 permission_classes = [IsAuthenticated & IsProjectAdmin]
41
42 @transaction.atomic
43 def post(self, request, *args, **kwargs):
44 if 'file' not in request.data:
45 raise ParseError('Empty content')
46 labels = json.load(request.data['file'])
47 project = get_object_or_404(Project, pk=kwargs['project_id'])
48 try:
49 for label in labels:
50 serializer = LabelSerializer(data=label)
51 serializer.is_valid(raise_exception=True)
52 serializer.save(project=project)
53 return Response(status=status.HTTP_201_CREATED)
54 except IntegrityError:
55 content = {'error': 'IntegrityError: you cannot create a label with same name or shortkey.'}
56 return Response(content, status=status.HTTP_400_BAD_REQUEST)
57
[end of app/api/views/label.py]
[start of app/api/exceptions.py]
1 from rest_framework import status
2 from rest_framework.exceptions import (APIException, PermissionDenied,
3 ValidationError)
4
5
6 class FileParseException(APIException):
7 status_code = status.HTTP_400_BAD_REQUEST
8 default_detail = 'Invalid file format, line {}: {}'
9 default_code = 'invalid'
10
11 def __init__(self, line_num, line, code=None):
12 detail = self.default_detail.format(line_num, line)
13 super().__init__(detail, code)
14
15
16 class AutoLabelingException(APIException):
17 status_code = status.HTTP_400_BAD_REQUEST
18 default_detail = 'Auto labeling not allowed for the document with labels.'
19
20
21 class AutoLabeliingPermissionDenied(PermissionDenied):
22 default_detail = 'You do not have permission to perform auto labeling.' \
23 'Please ask the project administrators to add you.'
24
25
26 class URLConnectionError(ValidationError):
27 default_detail = 'Failed to establish a connection. Please check the URL or network.'
28
29
30 class AWSTokenError(ValidationError):
31 default_detail = 'The security token included in the request is invalid.'
32
33
34 class SampleDataException(ValidationError):
35 default_detail = 'The response is empty. Maybe the sample data is not appropriate.' \
36 'Please specify another sample data which returns at least one label.'
37
[end of app/api/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/exceptions.py b/app/api/exceptions.py
--- a/app/api/exceptions.py
+++ b/app/api/exceptions.py
@@ -34,3 +34,8 @@
class SampleDataException(ValidationError):
default_detail = 'The response is empty. Maybe the sample data is not appropriate.' \
'Please specify another sample data which returns at least one label.'
+
+
+class LabelValidationError(APIException):
+ status_code = status.HTTP_400_BAD_REQUEST
+ default_detail = 'You cannot create a label with same name or shortcut key.'
diff --git a/app/api/views/label.py b/app/api/views/label.py
--- a/app/api/views/label.py
+++ b/app/api/views/label.py
@@ -9,6 +9,7 @@
from rest_framework.response import Response
from rest_framework.views import APIView
+from ..exceptions import LabelValidationError
from ..models import Label, Project
from ..permissions import IsInProjectReadOnlyOrAdmin, IsProjectAdmin
from ..serializers import LabelSerializer
@@ -27,6 +28,11 @@
project = get_object_or_404(Project, pk=self.kwargs['project_id'])
serializer.save(project=project)
+ def delete(self, request, *args, **kwargs):
+ delete_ids = request.data['ids']
+ Label.objects.filter(pk__in=delete_ids).delete()
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
class LabelDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Label.objects.all()
@@ -43,14 +49,14 @@
def post(self, request, *args, **kwargs):
if 'file' not in request.data:
raise ParseError('Empty content')
- labels = json.load(request.data['file'])
project = get_object_or_404(Project, pk=kwargs['project_id'])
try:
- for label in labels:
- serializer = LabelSerializer(data=label)
- serializer.is_valid(raise_exception=True)
- serializer.save(project=project)
+ labels = json.load(request.data['file'])
+ serializer = LabelSerializer(data=labels, many=True)
+ serializer.is_valid(raise_exception=True)
+ serializer.save(project=project)
return Response(status=status.HTTP_201_CREATED)
+ except json.decoder.JSONDecodeError:
+ raise ParseError('The file format is invalid.')
except IntegrityError:
- content = {'error': 'IntegrityError: you cannot create a label with same name or shortkey.'}
- return Response(content, status=status.HTTP_400_BAD_REQUEST)
+ raise LabelValidationError
| {"golden_diff": "diff --git a/app/api/exceptions.py b/app/api/exceptions.py\n--- a/app/api/exceptions.py\n+++ b/app/api/exceptions.py\n@@ -34,3 +34,8 @@\n class SampleDataException(ValidationError):\n default_detail = 'The response is empty. Maybe the sample data is not appropriate.' \\\n 'Please specify another sample data which returns at least one label.'\n+\n+\n+class LabelValidationError(APIException):\n+ status_code = status.HTTP_400_BAD_REQUEST\n+ default_detail = 'You cannot create a label with same name or shortcut key.'\ndiff --git a/app/api/views/label.py b/app/api/views/label.py\n--- a/app/api/views/label.py\n+++ b/app/api/views/label.py\n@@ -9,6 +9,7 @@\n from rest_framework.response import Response\n from rest_framework.views import APIView\n \n+from ..exceptions import LabelValidationError\n from ..models import Label, Project\n from ..permissions import IsInProjectReadOnlyOrAdmin, IsProjectAdmin\n from ..serializers import LabelSerializer\n@@ -27,6 +28,11 @@\n project = get_object_or_404(Project, pk=self.kwargs['project_id'])\n serializer.save(project=project)\n \n+ def delete(self, request, *args, **kwargs):\n+ delete_ids = request.data['ids']\n+ Label.objects.filter(pk__in=delete_ids).delete()\n+ return Response(status=status.HTTP_204_NO_CONTENT)\n+\n \n class LabelDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Label.objects.all()\n@@ -43,14 +49,14 @@\n def post(self, request, *args, **kwargs):\n if 'file' not in request.data:\n raise ParseError('Empty content')\n- labels = json.load(request.data['file'])\n project = get_object_or_404(Project, pk=kwargs['project_id'])\n try:\n- for label in labels:\n- serializer = LabelSerializer(data=label)\n- serializer.is_valid(raise_exception=True)\n- serializer.save(project=project)\n+ labels = json.load(request.data['file'])\n+ serializer = LabelSerializer(data=labels, many=True)\n+ serializer.is_valid(raise_exception=True)\n+ serializer.save(project=project)\n return Response(status=status.HTTP_201_CREATED)\n+ except json.decoder.JSONDecodeError:\n+ raise ParseError('The file format is invalid.')\n except IntegrityError:\n- content = {'error': 'IntegrityError: you cannot create a label with same name or shortkey.'}\n- return Response(content, status=status.HTTP_400_BAD_REQUEST)\n+ raise LabelValidationError\n", "issue": "[Enhancement request] Meaningful error on labels naming conflict\nFeature description\r\n---------\r\nTry rename a label to an existing name.\r\n\r\nYou get a 500 error.\r\n\r\nDesired: a meaningful error.\r\n\r\nRelated: #601, #826.\n", "before_files": [{"content": "import json\n\nfrom django.db import IntegrityError, transaction\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import generics, status\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.parsers import MultiPartParser\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom ..models import Label, Project\nfrom ..permissions import IsInProjectReadOnlyOrAdmin, IsProjectAdmin\nfrom ..serializers import LabelSerializer\n\n\nclass LabelList(generics.ListCreateAPIView):\n serializer_class = LabelSerializer\n pagination_class = None\n permission_classes = [IsAuthenticated & IsInProjectReadOnlyOrAdmin]\n\n def get_queryset(self):\n project = get_object_or_404(Project, pk=self.kwargs['project_id'])\n return project.labels\n\n def perform_create(self, serializer):\n project = get_object_or_404(Project, pk=self.kwargs['project_id'])\n serializer.save(project=project)\n\n\nclass LabelDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Label.objects.all()\n serializer_class = LabelSerializer\n lookup_url_kwarg = 'label_id'\n permission_classes = [IsAuthenticated & IsInProjectReadOnlyOrAdmin]\n\n\nclass LabelUploadAPI(APIView):\n parser_classes = (MultiPartParser,)\n permission_classes = [IsAuthenticated & IsProjectAdmin]\n\n @transaction.atomic\n def post(self, request, *args, **kwargs):\n if 'file' not in request.data:\n raise ParseError('Empty content')\n labels = json.load(request.data['file'])\n project = get_object_or_404(Project, pk=kwargs['project_id'])\n try:\n for label in labels:\n serializer = LabelSerializer(data=label)\n serializer.is_valid(raise_exception=True)\n serializer.save(project=project)\n return Response(status=status.HTTP_201_CREATED)\n except IntegrityError:\n content = {'error': 'IntegrityError: you cannot create a label with same name or shortkey.'}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n", "path": "app/api/views/label.py"}, {"content": "from rest_framework import status\nfrom rest_framework.exceptions import (APIException, PermissionDenied,\n ValidationError)\n\n\nclass FileParseException(APIException):\n status_code = status.HTTP_400_BAD_REQUEST\n default_detail = 'Invalid file format, line {}: {}'\n default_code = 'invalid'\n\n def __init__(self, line_num, line, code=None):\n detail = self.default_detail.format(line_num, line)\n super().__init__(detail, code)\n\n\nclass AutoLabelingException(APIException):\n status_code = status.HTTP_400_BAD_REQUEST\n default_detail = 'Auto labeling not allowed for the document with labels.'\n\n\nclass AutoLabeliingPermissionDenied(PermissionDenied):\n default_detail = 'You do not have permission to perform auto labeling.' \\\n 'Please ask the project administrators to add you.'\n\n\nclass URLConnectionError(ValidationError):\n default_detail = 'Failed to establish a connection. Please check the URL or network.'\n\n\nclass AWSTokenError(ValidationError):\n default_detail = 'The security token included in the request is invalid.'\n\n\nclass SampleDataException(ValidationError):\n default_detail = 'The response is empty. Maybe the sample data is not appropriate.' \\\n 'Please specify another sample data which returns at least one label.'\n", "path": "app/api/exceptions.py"}]} | 1,500 | 578 |
gh_patches_debug_37307 | rasdani/github-patches | git_diff | bridgecrewio__checkov-464 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scanning IAM policy only takes First SID in json rather than looping through
**Describe the bug**
It seems when specifying more than one SID in a json, the policies do not loop through each one rather it just looks at the first one and ends.
**To Reproduce**
Steps to reproduce the behavior:
1. Create policy with more than one SID
`{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SqsAllow",
"Effect": "Allow",
"Action": [
"sqs:GetQueueAttributes",
"sqs:GetQueueUrl",
"sqs:ListDeadLetterSourceQueues",
"sqs:ListQueues",
"sqs:ReceiveMessage",
"sqs:SendMessage",
"sqs:SendMessageBatch"
],
"Resource": "*"
},
{
"Sid": "ALL",
"Effect": "Allow",
"Action": [ "*"
],
"Resource": ["*"]
},`
2. Run Checkov against policy
**Expected behavior**
I would expect the scan to check each json within the policy rather than the first one
**Desktop (please complete the following information):**
- OS: Mac
- Checkov Version: 1.0.442
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3 import json
4
5
6 class IAMStarActionPolicyDocument(BaseResourceCheck):
7
8 def __init__(self):
9 name = "Ensure no IAM policies documents allow \"*\" as a statement's actions"
10 id = "CKV_AWS_63"
11 supported_resources = ['aws_iam_role_policy', 'aws_iam_user_policy', 'aws_iam_group_policy', 'aws_iam_policy']
12 categories = [CheckCategories.IAM]
13 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
14
15 def scan_resource_conf(self, conf):
16 if 'policy' in conf.keys():
17 try:
18 policy_block = json.loads(conf['policy'][0])
19 if 'Statement' in policy_block.keys():
20 if 'Action' in policy_block['Statement'][0] and \
21 policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \
22 policy_block['Statement'][0]['Action'][0] == "*":
23 return CheckResult.FAILED
24 except: # nosec
25 pass
26 return CheckResult.PASSED
27
28
29 check = IAMStarActionPolicyDocument()
30
[end of checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py]
[start of checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3 import json
4
5
6 class IAMAdminPolicyDocument(BaseResourceCheck):
7
8 def __init__(self):
9 name = "Ensure IAM policies that allow full \"*-*\" administrative privileges are not created"
10 id = "CKV_AWS_62"
11 supported_resources = ['aws_iam_role_policy', 'aws_iam_user_policy', 'aws_iam_group_policy', 'aws_iam_policy']
12 categories = [CheckCategories.IAM]
13 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
14
15 def scan_resource_conf(self, conf):
16 if 'policy' in conf.keys():
17 try:
18 policy_block = json.loads(conf['policy'][0])
19 if 'Statement' in policy_block.keys():
20 if 'Action' in policy_block['Statement'][0] and \
21 policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \
22 policy_block['Statement'][0]['Action'][0] == "*" and \
23 'Resource' in policy_block['Statement'][0] and \
24 policy_block['Statement'][0]['Resource'] == '*':
25 return CheckResult.FAILED
26 except: # nosec
27 pass
28 return CheckResult.PASSED
29
30
31 check = IAMAdminPolicyDocument()
32
[end of checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py]
[start of checkov/terraform/checks/data/aws/StarActionPolicyDocument.py]
1 from checkov.terraform.checks.data.base_check import BaseDataCheck
2 from checkov.common.models.enums import CheckResult, CheckCategories
3
4
5 class StarActionPolicyDocument(BaseDataCheck):
6 def __init__(self):
7 name = "Ensure no IAM policies documents allow \"*\" as a statement's actions"
8 id = "CKV_AWS_49"
9 supported_data = ['aws_iam_policy_document']
10 categories = [CheckCategories.IAM]
11 super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)
12
13 def scan_data_conf(self, conf):
14 """
15 validates iam policy document
16 https://learn.hashicorp.com/terraform/aws/iam-policy
17 :param conf: aws_kms_key configuration
18 :return: <CheckResult>
19 """
20 key = 'statement'
21 if key in conf.keys():
22 for statement in conf['statement']:
23 if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow':
24 return CheckResult.FAILED
25 return CheckResult.PASSED
26
27
28 check = StarActionPolicyDocument()
29
[end of checkov/terraform/checks/data/aws/StarActionPolicyDocument.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py
--- a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py
+++ b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py
@@ -19,7 +19,7 @@
"""
key = 'statement'
if key in conf.keys():
- for statement in conf['statement']:
+ for statement in conf[key]:
if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow':
return CheckResult.FAILED
return CheckResult.PASSED
diff --git a/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py b/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py
--- a/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py
+++ b/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py
@@ -17,13 +17,13 @@
try:
policy_block = json.loads(conf['policy'][0])
if 'Statement' in policy_block.keys():
- if 'Action' in policy_block['Statement'][0] and \
- policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \
- policy_block['Statement'][0]['Action'][0] == "*" and \
- 'Resource' in policy_block['Statement'][0] and \
- policy_block['Statement'][0]['Resource'] == '*':
+ for statement in policy_block['Statement']:
+ if 'Action' in statement and \
+ statement.get('Effect', ['Allow']) == 'Allow' and \
+ '*' in statement.get('Action', ['']) and \
+ '*' in statement.get('Resource', ['']):
return CheckResult.FAILED
- except: # nosec
+ except: # nosec
pass
return CheckResult.PASSED
diff --git a/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py b/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py
--- a/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py
+++ b/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py
@@ -17,9 +17,10 @@
try:
policy_block = json.loads(conf['policy'][0])
if 'Statement' in policy_block.keys():
- if 'Action' in policy_block['Statement'][0] and \
- policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \
- policy_block['Statement'][0]['Action'][0] == "*":
+ for statement in policy_block['Statement']:
+ if 'Action' in statement and \
+ statement.get('Effect', ['Allow']) == 'Allow' and \
+ '*' in statement.get('Action', ['']):
return CheckResult.FAILED
except: # nosec
pass
| {"golden_diff": "diff --git a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n--- a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n+++ b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n@@ -19,7 +19,7 @@\n \"\"\"\n key = 'statement'\n if key in conf.keys():\n- for statement in conf['statement']:\n+ for statement in conf[key]:\n if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow':\n return CheckResult.FAILED\n return CheckResult.PASSED\ndiff --git a/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py b/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py\n--- a/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py\n+++ b/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py\n@@ -17,13 +17,13 @@\n try:\n policy_block = json.loads(conf['policy'][0])\n if 'Statement' in policy_block.keys():\n- if 'Action' in policy_block['Statement'][0] and \\\n- policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \\\n- policy_block['Statement'][0]['Action'][0] == \"*\" and \\\n- 'Resource' in policy_block['Statement'][0] and \\\n- policy_block['Statement'][0]['Resource'] == '*':\n+ for statement in policy_block['Statement']:\n+ if 'Action' in statement and \\\n+ statement.get('Effect', ['Allow']) == 'Allow' and \\\n+ '*' in statement.get('Action', ['']) and \\\n+ '*' in statement.get('Resource', ['']):\n return CheckResult.FAILED\n- except: # nosec\n+ except: # nosec\n pass\n return CheckResult.PASSED\n \ndiff --git a/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py b/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py\n--- a/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py\n+++ b/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py\n@@ -17,9 +17,10 @@\n try:\n policy_block = json.loads(conf['policy'][0])\n if 'Statement' in policy_block.keys():\n- if 'Action' in policy_block['Statement'][0] and \\\n- policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \\\n- policy_block['Statement'][0]['Action'][0] == \"*\":\n+ for statement in policy_block['Statement']:\n+ if 'Action' in statement and \\\n+ statement.get('Effect', ['Allow']) == 'Allow' and \\\n+ '*' in statement.get('Action', ['']):\n return CheckResult.FAILED\n except: # nosec\n pass\n", "issue": "Scanning IAM policy only takes First SID in json rather than looping through\n**Describe the bug**\r\nIt seems when specifying more than one SID in a json, the policies do not loop through each one rather it just looks at the first one and ends. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create policy with more than one SID\r\n`{\r\n \"Version\": \"2012-10-17\",\r\n \"Statement\": [\r\n {\r\n \"Sid\": \"SqsAllow\",\r\n \"Effect\": \"Allow\",\r\n \"Action\": [\r\n \"sqs:GetQueueAttributes\",\r\n \"sqs:GetQueueUrl\",\r\n \"sqs:ListDeadLetterSourceQueues\",\r\n \"sqs:ListQueues\",\r\n \"sqs:ReceiveMessage\",\r\n \"sqs:SendMessage\",\r\n \"sqs:SendMessageBatch\"\r\n ],\r\n \"Resource\": \"*\"\r\n },\r\n {\r\n \"Sid\": \"ALL\",\r\n \"Effect\": \"Allow\",\r\n \"Action\": [ \"*\"\r\n ],\r\n \"Resource\": [\"*\"]\r\n },`\r\n2. Run Checkov against policy\r\n\r\n\r\n**Expected behavior**\r\nI would expect the scan to check each json within the policy rather than the first one\r\n\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Mac\r\n - Checkov Version: 1.0.442\r\n\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nimport json\n\n\nclass IAMStarActionPolicyDocument(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure no IAM policies documents allow \\\"*\\\" as a statement's actions\"\n id = \"CKV_AWS_63\"\n supported_resources = ['aws_iam_role_policy', 'aws_iam_user_policy', 'aws_iam_group_policy', 'aws_iam_policy']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'policy' in conf.keys():\n try:\n policy_block = json.loads(conf['policy'][0])\n if 'Statement' in policy_block.keys():\n if 'Action' in policy_block['Statement'][0] and \\\n policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \\\n policy_block['Statement'][0]['Action'][0] == \"*\":\n return CheckResult.FAILED\n except: # nosec\n pass\n return CheckResult.PASSED\n\n\ncheck = IAMStarActionPolicyDocument()\n", "path": "checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py"}, {"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nimport json\n\n\nclass IAMAdminPolicyDocument(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure IAM policies that allow full \\\"*-*\\\" administrative privileges are not created\"\n id = \"CKV_AWS_62\"\n supported_resources = ['aws_iam_role_policy', 'aws_iam_user_policy', 'aws_iam_group_policy', 'aws_iam_policy']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'policy' in conf.keys():\n try:\n policy_block = json.loads(conf['policy'][0])\n if 'Statement' in policy_block.keys():\n if 'Action' in policy_block['Statement'][0] and \\\n policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \\\n policy_block['Statement'][0]['Action'][0] == \"*\" and \\\n 'Resource' in policy_block['Statement'][0] and \\\n policy_block['Statement'][0]['Resource'] == '*':\n return CheckResult.FAILED\n except: # nosec\n pass\n return CheckResult.PASSED\n\n\ncheck = IAMAdminPolicyDocument()\n", "path": "checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py"}, {"content": "from checkov.terraform.checks.data.base_check import BaseDataCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\n\nclass StarActionPolicyDocument(BaseDataCheck):\n def __init__(self):\n name = \"Ensure no IAM policies documents allow \\\"*\\\" as a statement's actions\"\n id = \"CKV_AWS_49\"\n supported_data = ['aws_iam_policy_document']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\n\n def scan_data_conf(self, conf):\n \"\"\"\n validates iam policy document\n https://learn.hashicorp.com/terraform/aws/iam-policy\n :param conf: aws_kms_key configuration\n :return: <CheckResult>\n \"\"\"\n key = 'statement'\n if key in conf.keys():\n for statement in conf['statement']:\n if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow':\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = StarActionPolicyDocument()\n", "path": "checkov/terraform/checks/data/aws/StarActionPolicyDocument.py"}]} | 1,892 | 676 |
gh_patches_debug_12976 | rasdani/github-patches | git_diff | urllib3__urllib3-2042 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
urllib3 logo is unreadable in docs in dark mode
This is a recent Furo addition, you can see it in this pull request build: https://urllib3--2026.org.readthedocs.build/en/2026/index.html. Here's what I see (with Firefox on macOS with dark mode enabled):
<img width="237" alt="urllib3 logo in dark mode in docs" src="https://user-images.githubusercontent.com/42327/96408490-ad2c8300-11f4-11eb-8054-661fb38a6c23.png">
I'm not sure what the correct fix is here. The obvious one would be to force a white background. I guess we could also... add a dark mode urllib3 logo, by switching black letters to white?
(The rest of the content looks good, even if the contrast seems low to me.)
</issue>
<code>
[start of docs/conf.py]
1 import os
2 import sys
3 from datetime import date
4
5 # If extensions (or modules to document with autodoc) are in another directory,
6 # add these directories to sys.path here. If the directory is relative to the
7 # documentation root, use os.path.abspath to make it absolute, like shown here.
8
9 root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
10 sys.path.insert(0, root_path)
11
12 # Mock some expensive/platform-specific modules so build will work.
13 # (https://read-the-docs.readthedocs.io/en/latest/faq.html#\
14 # i-get-import-errors-on-libraries-that-depend-on-c-modules)
15 from unittest import mock
16
17
18 class MockModule(mock.Mock):
19 @classmethod
20 def __getattr__(cls, name):
21 return MockModule()
22
23
24 MOCK_MODULES = ("ntlm",)
25
26 sys.modules.update((mod_name, MockModule()) for mod_name in MOCK_MODULES)
27
28
29 import urllib3
30
31 # -- General configuration -----------------------------------------------------
32
33
34 # Add any Sphinx extension module names here, as strings. They can be extensions
35 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
36 extensions = [
37 "sphinx.ext.autodoc",
38 "sphinx.ext.doctest",
39 "sphinx.ext.intersphinx",
40 ]
41
42 # Test code blocks only when explicitly specified
43 doctest_test_doctest_blocks = ""
44
45 # Add any paths that contain templates here, relative to this directory.
46 templates_path = ["_templates"]
47
48 # The suffix of source filenames.
49 source_suffix = ".rst"
50
51 # The master toctree document.
52 master_doc = "index"
53
54 # General information about the project.
55 project = "urllib3"
56 copyright = f"{date.today().year}, Andrey Petrov"
57
58 # The short X.Y version.
59 version = urllib3.__version__
60 # The full version, including alpha/beta/rc tags.
61 release = version
62
63 # List of patterns, relative to source directory, that match files and
64 # directories to ignore when looking for source files.
65 exclude_patterns = ["_build"]
66
67 # The name of the Pygments (syntax highlighting) style to use.
68 pygments_style = "friendly"
69
70 # The theme to use for HTML and HTML Help pages. See the documentation for
71 # a list of builtin themes.
72 html_theme = "furo"
73 html_favicon = "images/favicon.png"
74 html_logo = "images/banner.svg"
75
76 html_theme_options = {
77 "announcement": """
78 <a style=\"text-decoration: none; color: white;\"
79 href=\"https://opencollective.com/urllib3\">
80 <img src=\"/en/latest/_static/favicon.png\"/> Sponsor urllib3 v2.0 on Open Collective
81 </a>
82 """,
83 "sidebar_hide_name": True,
84 }
85
86 intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
87
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -73,8 +73,8 @@
# a list of builtin themes.
html_theme = "furo"
html_favicon = "images/favicon.png"
-html_logo = "images/banner.svg"
+html_static_path = ["_static"]
html_theme_options = {
"announcement": """
<a style=\"text-decoration: none; color: white;\"
@@ -83,6 +83,8 @@
</a>
""",
"sidebar_hide_name": True,
+ "light_logo": "banner.svg",
+ "dark_logo": "dark-logo.svg",
}
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -73,8 +73,8 @@\n # a list of builtin themes.\n html_theme = \"furo\"\n html_favicon = \"images/favicon.png\"\n-html_logo = \"images/banner.svg\"\n \n+html_static_path = [\"_static\"]\n html_theme_options = {\n \"announcement\": \"\"\"\n <a style=\\\"text-decoration: none; color: white;\\\" \n@@ -83,6 +83,8 @@\n </a>\n \"\"\",\n \"sidebar_hide_name\": True,\n+ \"light_logo\": \"banner.svg\",\n+ \"dark_logo\": \"dark-logo.svg\",\n }\n \n intersphinx_mapping = {\"python\": (\"https://docs.python.org/3\", None)}\n", "issue": "urllib3 logo is unreadable in docs in dark mode\nThis is a recent Furo addition, you can see it in this pull request build: https://urllib3--2026.org.readthedocs.build/en/2026/index.html. Here's what I see (with Firefox on macOS with dark mode enabled):\r\n\r\n<img width=\"237\" alt=\"urllib3 logo in dark mode in docs\" src=\"https://user-images.githubusercontent.com/42327/96408490-ad2c8300-11f4-11eb-8054-661fb38a6c23.png\">\r\n\r\nI'm not sure what the correct fix is here. The obvious one would be to force a white background. I guess we could also... add a dark mode urllib3 logo, by switching black letters to white?\r\n\r\n(The rest of the content looks good, even if the contrast seems low to me.)\n", "before_files": [{"content": "import os\nimport sys\nfrom datetime import date\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nroot_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\nsys.path.insert(0, root_path)\n\n# Mock some expensive/platform-specific modules so build will work.\n# (https://read-the-docs.readthedocs.io/en/latest/faq.html#\\\n# i-get-import-errors-on-libraries-that-depend-on-c-modules)\nfrom unittest import mock\n\n\nclass MockModule(mock.Mock):\n @classmethod\n def __getattr__(cls, name):\n return MockModule()\n\n\nMOCK_MODULES = (\"ntlm\",)\n\nsys.modules.update((mod_name, MockModule()) for mod_name in MOCK_MODULES)\n\n\nimport urllib3\n\n# -- General configuration -----------------------------------------------------\n\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n]\n\n# Test code blocks only when explicitly specified\ndoctest_test_doctest_blocks = \"\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"urllib3\"\ncopyright = f\"{date.today().year}, Andrey Petrov\"\n\n# The short X.Y version.\nversion = urllib3.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"friendly\"\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"furo\"\nhtml_favicon = \"images/favicon.png\"\nhtml_logo = \"images/banner.svg\"\n\nhtml_theme_options = {\n \"announcement\": \"\"\"\n <a style=\\\"text-decoration: none; color: white;\\\" \n href=\\\"https://opencollective.com/urllib3\\\">\n <img src=\\\"/en/latest/_static/favicon.png\\\"/> Sponsor urllib3 v2.0 on Open Collective\n </a>\n \"\"\",\n \"sidebar_hide_name\": True,\n}\n\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3\", None)}\n", "path": "docs/conf.py"}]} | 1,517 | 172 |
gh_patches_debug_31822 | rasdani/github-patches | git_diff | TencentBlueKing__bk-user-805 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
启动命令: gunicorn 支持退出前打印堆栈
遇到gunicorn 直接退出的情况, 可以使用 https://stackoverflow.com/questions/57167240/is-it-possible-to-get-a-stack-trace-when-a-gunicorn-worker-hits-a-timeout 方式调试, 打印退出前堆栈, 从而推断问题代码位置
</issue>
<code>
[start of src/login/wsgi.py]
1 # -*- coding: utf-8 -*-
2 """
3 Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS
4 Community Edition) available.
5 Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
6 Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at http://opensource.org/licenses/MIT
8 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9 an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10 specific language governing permissions and limitations under the License.
11 """
12
13 import os
14
15 from dj_static import Cling
16 from django.core.wsgi import get_wsgi_application
17
18 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bklogin.config.prod")
19
20 application = Cling(get_wsgi_application())
21
[end of src/login/wsgi.py]
[start of src/saas/wsgi.py]
1 # -*- coding: utf-8 -*-
2 """
3 TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
4 Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
5 Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at http://opensource.org/licenses/MIT
7 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
8 an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
9 specific language governing permissions and limitations under the License.
10 """
11 import os
12
13 from django.core.wsgi import get_wsgi_application
14
15 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bkuser_shell.config.overlays.prod")
16
17 application = get_wsgi_application()
18
[end of src/saas/wsgi.py]
[start of src/api/wsgi.py]
1 # -*- coding: utf-8 -*-
2 """
3 TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
4 Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
5 Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at http://opensource.org/licenses/MIT
7 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
8 an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
9 specific language governing permissions and limitations under the License.
10 """
11 import os
12
13 from django.core.wsgi import get_wsgi_application
14
15 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bkuser_core.config.overlays.prod")
16
17 application = get_wsgi_application()
18
[end of src/api/wsgi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/api/wsgi.py b/src/api/wsgi.py
--- a/src/api/wsgi.py
+++ b/src/api/wsgi.py
@@ -8,10 +8,13 @@
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
+import faulthandler
import os
from django.core.wsgi import get_wsgi_application
+faulthandler.enable()
+
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bkuser_core.config.overlays.prod")
application = get_wsgi_application()
diff --git a/src/login/wsgi.py b/src/login/wsgi.py
--- a/src/login/wsgi.py
+++ b/src/login/wsgi.py
@@ -10,11 +10,14 @@
specific language governing permissions and limitations under the License.
"""
+import faulthandler
import os
from dj_static import Cling
from django.core.wsgi import get_wsgi_application
+faulthandler.enable()
+
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bklogin.config.prod")
application = Cling(get_wsgi_application())
diff --git a/src/saas/wsgi.py b/src/saas/wsgi.py
--- a/src/saas/wsgi.py
+++ b/src/saas/wsgi.py
@@ -8,10 +8,13 @@
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
+import faulthandler
import os
from django.core.wsgi import get_wsgi_application
+faulthandler.enable()
+
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bkuser_shell.config.overlays.prod")
application = get_wsgi_application()
| {"golden_diff": "diff --git a/src/api/wsgi.py b/src/api/wsgi.py\n--- a/src/api/wsgi.py\n+++ b/src/api/wsgi.py\n@@ -8,10 +8,13 @@\n an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n specific language governing permissions and limitations under the License.\n \"\"\"\n+import faulthandler\n import os\n \n from django.core.wsgi import get_wsgi_application\n \n+faulthandler.enable()\n+\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"bkuser_core.config.overlays.prod\")\n \n application = get_wsgi_application()\ndiff --git a/src/login/wsgi.py b/src/login/wsgi.py\n--- a/src/login/wsgi.py\n+++ b/src/login/wsgi.py\n@@ -10,11 +10,14 @@\n specific language governing permissions and limitations under the License.\n \"\"\"\n \n+import faulthandler\n import os\n \n from dj_static import Cling\n from django.core.wsgi import get_wsgi_application\n \n+faulthandler.enable()\n+\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"bklogin.config.prod\")\n \n application = Cling(get_wsgi_application())\ndiff --git a/src/saas/wsgi.py b/src/saas/wsgi.py\n--- a/src/saas/wsgi.py\n+++ b/src/saas/wsgi.py\n@@ -8,10 +8,13 @@\n an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n specific language governing permissions and limitations under the License.\n \"\"\"\n+import faulthandler\n import os\n \n from django.core.wsgi import get_wsgi_application\n \n+faulthandler.enable()\n+\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"bkuser_shell.config.overlays.prod\")\n \n application = get_wsgi_application()\n", "issue": "\u542f\u52a8\u547d\u4ee4: gunicorn \u652f\u6301\u9000\u51fa\u524d\u6253\u5370\u5806\u6808\n\u9047\u5230gunicorn \u76f4\u63a5\u9000\u51fa\u7684\u60c5\u51b5, \u53ef\u4ee5\u4f7f\u7528 https://stackoverflow.com/questions/57167240/is-it-possible-to-get-a-stack-trace-when-a-gunicorn-worker-hits-a-timeout \u65b9\u5f0f\u8c03\u8bd5, \u6253\u5370\u9000\u51fa\u524d\u5806\u6808, \u4ece\u800c\u63a8\u65ad\u95ee\u9898\u4ee3\u7801\u4f4d\u7f6e\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencent is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91PaaS\u5e73\u53f0\u793e\u533a\u7248 (BlueKing PaaS\nCommunity Edition) available.\nCopyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\n\nimport os\n\nfrom dj_static import Cling\nfrom django.core.wsgi import get_wsgi_application\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"bklogin.config.prod\")\n\napplication = Cling(get_wsgi_application())\n", "path": "src/login/wsgi.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91-\u7528\u6237\u7ba1\u7406(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nimport os\n\nfrom django.core.wsgi import get_wsgi_application\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"bkuser_shell.config.overlays.prod\")\n\napplication = get_wsgi_application()\n", "path": "src/saas/wsgi.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91-\u7528\u6237\u7ba1\u7406(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nimport os\n\nfrom django.core.wsgi import get_wsgi_application\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"bkuser_core.config.overlays.prod\")\n\napplication = get_wsgi_application()\n", "path": "src/api/wsgi.py"}]} | 1,370 | 400 |
gh_patches_debug_17087 | rasdani/github-patches | git_diff | ivy-llc__ivy-17675 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
median
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/stat.py]
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import (
5 to_ivy_arrays_and_back,
6 )
7
8
9 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
10 @to_ivy_arrays_and_back
11 def mean(input, axis=None, keepdim=False, out=None):
12 ret = ivy.mean(input, axis=axis, keepdims=keepdim, out=out)
13 ret = ivy.expand_dims(ret, axis=-1) if ret.ndim == 0 else ret
14 return ret
15
16
17 @with_unsupported_dtypes({"2.5.0 and below": ("complex", "int8")}, "paddle")
18 @to_ivy_arrays_and_back
19 def numel(x, name=None):
20 prod = ivy.prod(x.size, dtype=ivy.int64)
21 try:
22 length = len(x)
23 except (ValueError, TypeError):
24 length = 1 # if 0 dimensional tensor with 1 element
25 return ivy.array([prod if prod > 0 else ivy.array(length, dtype=ivy.int64)])
26
27
28 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
29 @to_ivy_arrays_and_back
30 def nanquantile(a, q, axis=None, keepdims=False, interpolation="linear", out=None):
31 return ivy.nanquantile(
32 a, q, axis=axis, keepdims=keepdims, interpolation=interpolation, out=out
33 )
34
[end of ivy/functional/frontends/paddle/tensor/stat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/tensor/stat.py b/ivy/functional/frontends/paddle/tensor/stat.py
--- a/ivy/functional/frontends/paddle/tensor/stat.py
+++ b/ivy/functional/frontends/paddle/tensor/stat.py
@@ -1,6 +1,6 @@
# global
import ivy
-from ivy.func_wrapper import with_unsupported_dtypes
+from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from ivy.functional.frontends.paddle.func_wrapper import (
to_ivy_arrays_and_back,
)
@@ -31,3 +31,17 @@
return ivy.nanquantile(
a, q, axis=axis, keepdims=keepdims, interpolation=interpolation, out=out
)
+
+
+@with_supported_dtypes(
+ {"2.5.0 and below": ("bool", "float16", "float32", "float64", "int32", "int64")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def median(x, axis=None, keepdim=False, name=None):
+ x = (
+ ivy.astype(x, ivy.float64)
+ if ivy.dtype(x) == "float64"
+ else ivy.astype(x, ivy.float32)
+ )
+ return ivy.median(x, axis=axis, keepdims=keepdim)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/stat.py b/ivy/functional/frontends/paddle/tensor/stat.py\n--- a/ivy/functional/frontends/paddle/tensor/stat.py\n+++ b/ivy/functional/frontends/paddle/tensor/stat.py\n@@ -1,6 +1,6 @@\n # global\n import ivy\n-from ivy.func_wrapper import with_unsupported_dtypes\n+from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\n from ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n )\n@@ -31,3 +31,17 @@\n return ivy.nanquantile(\n a, q, axis=axis, keepdims=keepdims, interpolation=interpolation, out=out\n )\n+\n+\n+@with_supported_dtypes(\n+ {\"2.5.0 and below\": (\"bool\", \"float16\", \"float32\", \"float64\", \"int32\", \"int64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def median(x, axis=None, keepdim=False, name=None):\n+ x = (\n+ ivy.astype(x, ivy.float64)\n+ if ivy.dtype(x) == \"float64\"\n+ else ivy.astype(x, ivy.float32)\n+ )\n+ return ivy.median(x, axis=axis, keepdims=keepdim)\n", "issue": "median\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef mean(input, axis=None, keepdim=False, out=None):\n ret = ivy.mean(input, axis=axis, keepdims=keepdim, out=out)\n ret = ivy.expand_dims(ret, axis=-1) if ret.ndim == 0 else ret\n return ret\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"complex\", \"int8\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef numel(x, name=None):\n prod = ivy.prod(x.size, dtype=ivy.int64)\n try:\n length = len(x)\n except (ValueError, TypeError):\n length = 1 # if 0 dimensional tensor with 1 element\n return ivy.array([prod if prod > 0 else ivy.array(length, dtype=ivy.int64)])\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef nanquantile(a, q, axis=None, keepdims=False, interpolation=\"linear\", out=None):\n return ivy.nanquantile(\n a, q, axis=axis, keepdims=keepdims, interpolation=interpolation, out=out\n )\n", "path": "ivy/functional/frontends/paddle/tensor/stat.py"}]} | 968 | 322 |
gh_patches_debug_2019 | rasdani/github-patches | git_diff | litestar-org__litestar-1005 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: openapi render for multiple tags isn't consistent
**Describe the bug**
When the openapi renders tags from both a controller and a route it is not deterministic. This may not be a bug? But it surprised me so thought I'd raise it.
I'm unsure if I'm doing something crazy but for a project, we check in the generated json openapi schema so we can browse the API live in gitlab. I've recently added a tag to both a controller and a route in it. But because the order of the tags isn't consistent they are going to keep flip flopping as we have a pre-commit that generates the json to make sure it's up to date. I hope that ramble makes sense...
**To Reproduce**
```python
from typing import Dict
from starlite import Starlite, Controller, get
class TestController(Controller):
tags = ["a"]
@get("/", tags=["b"])
def hello_world(self) -> Dict[str, str]:
"""Handler function that returns a greeting dictionary."""
return {"hello": "world"}
app = Starlite(route_handlers=[TestController])
print(app.openapi_schema.paths["/"].get.tags)
```
If you run that multiple times, you will see you get either:
```python
['a', 'b']
```
or
```python
['b', 'a']
```
**Additional context**
I believe the problem is [here](https://github.com/starlite-api/starlite/blob/835749112e8364c1516f45973c924774aca22ca9/starlite/openapi/path_item.py#L59) as it forces construction of a new set. Sorting them before returning would be viable as there shouldn't be _too many_ tags and it's a one time thing I believe?
But as I said, it may not be a problem you care about as I could be doing something silly.
</issue>
<code>
[start of starlite/openapi/path_item.py]
1 from inspect import cleandoc
2 from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast
3
4 from pydantic_openapi_schema.v3_1_0.operation import Operation
5 from pydantic_openapi_schema.v3_1_0.path_item import PathItem
6
7 from starlite.openapi.parameters import create_parameter_for_handler
8 from starlite.openapi.request_body import create_request_body
9 from starlite.openapi.responses import create_responses
10 from starlite.utils.helpers import unwrap_partial
11
12 if TYPE_CHECKING:
13 from pydantic import BaseModel
14 from pydantic_openapi_schema.v3_1_0 import SecurityRequirement
15
16 from starlite.handlers import HTTPRouteHandler
17 from starlite.plugins.base import PluginProtocol
18 from starlite.routes import HTTPRoute
19
20
21 def get_description_for_handler(route_handler: "HTTPRouteHandler", use_handler_docstrings: bool) -> Optional[str]:
22 """Produce the operation description for a route handler, either by using the description value if provided,
23
24 or the docstring - if config is enabled.
25
26 Args:
27 route_handler: A route handler instance.
28 use_handler_docstrings: If `True` and `route_handler.description` is `None` returns docstring of wrapped
29 handler function.
30
31 Returns:
32 An optional description string
33 """
34 handler_description = route_handler.description
35 if handler_description is None and use_handler_docstrings:
36 fn = unwrap_partial(route_handler.fn.value)
37 return cleandoc(fn.__doc__) if fn.__doc__ else None
38 return handler_description
39
40
41 def extract_layered_values(
42 route_handler: "HTTPRouteHandler",
43 ) -> Tuple[Optional[List[str]], Optional[List[Dict[str, List[str]]]]]:
44 """Extract the tags and security values from the route handler layers.
45
46 Args:
47 route_handler: A Route Handler instance.
48
49 Returns:
50 A tuple of optional lists.
51 """
52 tags: List[str] = []
53 security: List["SecurityRequirement"] = []
54 for layer in route_handler.ownership_layers:
55 if layer.tags:
56 tags.extend(layer.tags)
57 if layer.security:
58 security.extend(layer.security)
59 return list(set(tags)) if tags else None, security or None
60
61
62 def create_path_item(
63 route: "HTTPRoute", create_examples: bool, plugins: List["PluginProtocol"], use_handler_docstrings: bool
64 ) -> PathItem:
65 """Create a PathItem model for the given route parsing all http_methods into Operation Models."""
66 path_item = PathItem()
67 for http_method, handler_tuple in route.route_handler_map.items():
68 route_handler, _ = handler_tuple
69 if route_handler.include_in_schema:
70 handler_fields = cast("BaseModel", route_handler.signature_model).__fields__
71 parameters = (
72 create_parameter_for_handler(
73 route_handler=route_handler,
74 handler_fields=handler_fields,
75 path_parameters=route.path_parameters,
76 generate_examples=create_examples,
77 )
78 or None
79 )
80 raises_validation_error = bool("data" in handler_fields or path_item.parameters or parameters)
81 handler_name = unwrap_partial(route_handler.handler_name).replace("_", " ").title()
82 request_body = None
83 if "data" in handler_fields:
84 request_body = create_request_body(
85 field=handler_fields["data"], generate_examples=create_examples, plugins=plugins
86 )
87
88 tags, security = extract_layered_values(route_handler)
89 operation = Operation(
90 operationId=route_handler.operation_id or handler_name,
91 tags=tags,
92 summary=route_handler.summary,
93 description=get_description_for_handler(route_handler, use_handler_docstrings),
94 deprecated=route_handler.deprecated,
95 responses=create_responses(
96 route_handler=route_handler,
97 raises_validation_error=raises_validation_error,
98 generate_examples=create_examples,
99 plugins=plugins,
100 ),
101 requestBody=request_body,
102 parameters=parameters, # type: ignore[arg-type]
103 security=security,
104 )
105 setattr(path_item, http_method.lower(), operation)
106 return path_item
107
[end of starlite/openapi/path_item.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlite/openapi/path_item.py b/starlite/openapi/path_item.py
--- a/starlite/openapi/path_item.py
+++ b/starlite/openapi/path_item.py
@@ -56,7 +56,7 @@
tags.extend(layer.tags)
if layer.security:
security.extend(layer.security)
- return list(set(tags)) if tags else None, security or None
+ return sorted(set(tags)) if tags else None, security or None
def create_path_item(
| {"golden_diff": "diff --git a/starlite/openapi/path_item.py b/starlite/openapi/path_item.py\n--- a/starlite/openapi/path_item.py\n+++ b/starlite/openapi/path_item.py\n@@ -56,7 +56,7 @@\n tags.extend(layer.tags)\n if layer.security:\n security.extend(layer.security)\n- return list(set(tags)) if tags else None, security or None\n+ return sorted(set(tags)) if tags else None, security or None\n \n \n def create_path_item(\n", "issue": "Bug: openapi render for multiple tags isn't consistent\n**Describe the bug**\r\nWhen the openapi renders tags from both a controller and a route it is not deterministic. This may not be a bug? But it surprised me so thought I'd raise it.\r\n\r\nI'm unsure if I'm doing something crazy but for a project, we check in the generated json openapi schema so we can browse the API live in gitlab. I've recently added a tag to both a controller and a route in it. But because the order of the tags isn't consistent they are going to keep flip flopping as we have a pre-commit that generates the json to make sure it's up to date. I hope that ramble makes sense...\r\n\r\n**To Reproduce**\r\n```python\r\nfrom typing import Dict \r\n \r\nfrom starlite import Starlite, Controller, get \r\n \r\nclass TestController(Controller): \r\n tags = [\"a\"] \r\n \r\n @get(\"/\", tags=[\"b\"]) \r\n def hello_world(self) -> Dict[str, str]: \r\n \"\"\"Handler function that returns a greeting dictionary.\"\"\"\r\n return {\"hello\": \"world\"} \r\n \r\n \r\napp = Starlite(route_handlers=[TestController]) \r\nprint(app.openapi_schema.paths[\"/\"].get.tags) \r\n```\r\nIf you run that multiple times, you will see you get either:\r\n```python\r\n['a', 'b']\r\n```\r\nor\r\n```python\r\n['b', 'a']\r\n```\r\n\r\n**Additional context**\r\nI believe the problem is [here](https://github.com/starlite-api/starlite/blob/835749112e8364c1516f45973c924774aca22ca9/starlite/openapi/path_item.py#L59) as it forces construction of a new set. Sorting them before returning would be viable as there shouldn't be _too many_ tags and it's a one time thing I believe?\r\n\r\nBut as I said, it may not be a problem you care about as I could be doing something silly.\r\n\n", "before_files": [{"content": "from inspect import cleandoc\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast\n\nfrom pydantic_openapi_schema.v3_1_0.operation import Operation\nfrom pydantic_openapi_schema.v3_1_0.path_item import PathItem\n\nfrom starlite.openapi.parameters import create_parameter_for_handler\nfrom starlite.openapi.request_body import create_request_body\nfrom starlite.openapi.responses import create_responses\nfrom starlite.utils.helpers import unwrap_partial\n\nif TYPE_CHECKING:\n from pydantic import BaseModel\n from pydantic_openapi_schema.v3_1_0 import SecurityRequirement\n\n from starlite.handlers import HTTPRouteHandler\n from starlite.plugins.base import PluginProtocol\n from starlite.routes import HTTPRoute\n\n\ndef get_description_for_handler(route_handler: \"HTTPRouteHandler\", use_handler_docstrings: bool) -> Optional[str]:\n \"\"\"Produce the operation description for a route handler, either by using the description value if provided,\n\n or the docstring - if config is enabled.\n\n Args:\n route_handler: A route handler instance.\n use_handler_docstrings: If `True` and `route_handler.description` is `None` returns docstring of wrapped\n handler function.\n\n Returns:\n An optional description string\n \"\"\"\n handler_description = route_handler.description\n if handler_description is None and use_handler_docstrings:\n fn = unwrap_partial(route_handler.fn.value)\n return cleandoc(fn.__doc__) if fn.__doc__ else None\n return handler_description\n\n\ndef extract_layered_values(\n route_handler: \"HTTPRouteHandler\",\n) -> Tuple[Optional[List[str]], Optional[List[Dict[str, List[str]]]]]:\n \"\"\"Extract the tags and security values from the route handler layers.\n\n Args:\n route_handler: A Route Handler instance.\n\n Returns:\n A tuple of optional lists.\n \"\"\"\n tags: List[str] = []\n security: List[\"SecurityRequirement\"] = []\n for layer in route_handler.ownership_layers:\n if layer.tags:\n tags.extend(layer.tags)\n if layer.security:\n security.extend(layer.security)\n return list(set(tags)) if tags else None, security or None\n\n\ndef create_path_item(\n route: \"HTTPRoute\", create_examples: bool, plugins: List[\"PluginProtocol\"], use_handler_docstrings: bool\n) -> PathItem:\n \"\"\"Create a PathItem model for the given route parsing all http_methods into Operation Models.\"\"\"\n path_item = PathItem()\n for http_method, handler_tuple in route.route_handler_map.items():\n route_handler, _ = handler_tuple\n if route_handler.include_in_schema:\n handler_fields = cast(\"BaseModel\", route_handler.signature_model).__fields__\n parameters = (\n create_parameter_for_handler(\n route_handler=route_handler,\n handler_fields=handler_fields,\n path_parameters=route.path_parameters,\n generate_examples=create_examples,\n )\n or None\n )\n raises_validation_error = bool(\"data\" in handler_fields or path_item.parameters or parameters)\n handler_name = unwrap_partial(route_handler.handler_name).replace(\"_\", \" \").title()\n request_body = None\n if \"data\" in handler_fields:\n request_body = create_request_body(\n field=handler_fields[\"data\"], generate_examples=create_examples, plugins=plugins\n )\n\n tags, security = extract_layered_values(route_handler)\n operation = Operation(\n operationId=route_handler.operation_id or handler_name,\n tags=tags,\n summary=route_handler.summary,\n description=get_description_for_handler(route_handler, use_handler_docstrings),\n deprecated=route_handler.deprecated,\n responses=create_responses(\n route_handler=route_handler,\n raises_validation_error=raises_validation_error,\n generate_examples=create_examples,\n plugins=plugins,\n ),\n requestBody=request_body,\n parameters=parameters, # type: ignore[arg-type]\n security=security,\n )\n setattr(path_item, http_method.lower(), operation)\n return path_item\n", "path": "starlite/openapi/path_item.py"}]} | 2,041 | 108 |
gh_patches_debug_3584 | rasdani/github-patches | git_diff | vas3k__vas3k.club-220 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Только часть id до дефиса выделена когда тебя @тэгнули

https://vas3k.club/post/2295/#comment-8177cee9-5bef-49bf-bade-44deea61e5d5
</issue>
<code>
[start of common/regexp.py]
1 import re
2
3 USERNAME_RE = re.compile(r"(?:\s|\n|^)@([A-Za-z0-9_]{3,})")
4 IMAGE_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:jpg|jpeg|gif|png)")
5 VIDEO_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:mov|mp4)")
6 YOUTUBE_RE = re.compile(
7 r"http(?:s?):\/\/(?:www\.)?youtu(?:be\.com\/watch\?v=|\.be\/)([\w\-\_]*)(&(amp;)?[\w\?=]*)?"
8 )
9 TWITTER_RE = re.compile(r"(https?:\/\/twitter.com\/[a-zA-Z0-9_]+\/status\/[\d]+)")
10 FAVICON_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:jpg|jpeg|gif|png|ico)")
11
[end of common/regexp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/common/regexp.py b/common/regexp.py
--- a/common/regexp.py
+++ b/common/regexp.py
@@ -1,6 +1,6 @@
import re
-USERNAME_RE = re.compile(r"(?:\s|\n|^)@([A-Za-z0-9_]{3,})")
+USERNAME_RE = re.compile(r"(?:\s|\n|^)@([A-Za-z0-9_-]{3,})")
IMAGE_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:jpg|jpeg|gif|png)")
VIDEO_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:mov|mp4)")
YOUTUBE_RE = re.compile(
| {"golden_diff": "diff --git a/common/regexp.py b/common/regexp.py\n--- a/common/regexp.py\n+++ b/common/regexp.py\n@@ -1,6 +1,6 @@\n import re\n \n-USERNAME_RE = re.compile(r\"(?:\\s|\\n|^)@([A-Za-z0-9_]{3,})\")\n+USERNAME_RE = re.compile(r\"(?:\\s|\\n|^)@([A-Za-z0-9_-]{3,})\")\n IMAGE_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png)\")\n VIDEO_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:mov|mp4)\")\n YOUTUBE_RE = re.compile(\n", "issue": "\u0422\u043e\u043b\u044c\u043a\u043e \u0447\u0430\u0441\u0442\u044c id \u0434\u043e \u0434\u0435\u0444\u0438\u0441\u0430 \u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0430 \u043a\u043e\u0433\u0434\u0430 \u0442\u0435\u0431\u044f @\u0442\u044d\u0433\u043d\u0443\u043b\u0438\n\r\nhttps://vas3k.club/post/2295/#comment-8177cee9-5bef-49bf-bade-44deea61e5d5\r\n\r\n\r\n\n", "before_files": [{"content": "import re\n\nUSERNAME_RE = re.compile(r\"(?:\\s|\\n|^)@([A-Za-z0-9_]{3,})\")\nIMAGE_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png)\")\nVIDEO_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:mov|mp4)\")\nYOUTUBE_RE = re.compile(\n r\"http(?:s?):\\/\\/(?:www\\.)?youtu(?:be\\.com\\/watch\\?v=|\\.be\\/)([\\w\\-\\_]*)(&(amp;)?\u200c\u200b[\\w\\?\u200c\u200b=]*)?\"\n)\nTWITTER_RE = re.compile(r\"(https?:\\/\\/twitter.com\\/[a-zA-Z0-9_]+\\/status\\/[\\d]+)\")\nFAVICON_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png|ico)\")\n", "path": "common/regexp.py"}]} | 883 | 165 |
gh_patches_debug_10581 | rasdani/github-patches | git_diff | pytorch__rl-598 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] timeit profiling class does not correctly track how many times a function has been called.
## Describe the bug
In file **pytorch/rl/torchrl/_utils.py**, in the class **timeit**, starting at line 31, the code is the following
```
count = self._REG[self.name][1]
self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)
self._REG[self.name][1] = self._REG[self.name][1] + t
self._REG[self.name][2] = count + 1
```
I understand self._REG[self.name][2] tracks the number of times a function has been called.
In that case, shouldn't the first line be changed to
```
count = self._REG[self.name][2]
```
## Checklist
- [X] I have checked that there is no similar issue in the repo (**required**)
- [ ] I have read the [documentation](https://github.com/pytorch/rl/tree/main/docs/) (**required**)
- [ ] I have provided a minimal working example to reproduce the bug (**required**)
</issue>
<code>
[start of torchrl/_utils.py]
1 import collections
2 import math
3 import time
4
5 import numpy as np
6
7
8 class timeit:
9 """A dirty but easy to use decorator for profiling code."""
10
11 _REG = {}
12
13 def __init__(self, name):
14 self.name = name
15
16 def __call__(self, fn):
17 def decorated_fn(*args, **kwargs):
18 with self:
19 out = fn(*args, **kwargs)
20 return out
21
22 return decorated_fn
23
24 def __enter__(self):
25 self.t0 = time.time()
26
27 def __exit__(self, exc_type, exc_val, exc_tb):
28 t = time.time() - self.t0
29 self._REG.setdefault(self.name, [0.0, 0.0, 0])
30
31 count = self._REG[self.name][1]
32 self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)
33 self._REG[self.name][1] = self._REG[self.name][1] + t
34 self._REG[self.name][2] = count + 1
35
36 @staticmethod
37 def print(prefix=None):
38 keys = list(timeit._REG)
39 keys.sort()
40 for name in keys:
41 strings = []
42 if prefix:
43 strings.append(prefix)
44 strings.append(
45 f"{name} took {timeit._REG[name][0] * 1000:4.4} msec (total = {timeit._REG[name][1]} sec)"
46 )
47 print(" -- ".join(strings))
48
49 @staticmethod
50 def erase():
51 for k in timeit._REG:
52 timeit._REG[k] = [0.0, 0.0, 0]
53
54
55 def _check_for_faulty_process(processes):
56 terminate = False
57 for p in processes:
58 if not p.is_alive():
59 terminate = True
60 for _p in processes:
61 if _p.is_alive():
62 _p.terminate()
63 if terminate:
64 break
65 if terminate:
66 raise RuntimeError(
67 "At least one process failed. Check for more infos in the log."
68 )
69
70
71 def seed_generator(seed):
72 """A seed generator function.
73
74 Given a seeding integer, generates a deterministic next seed to be used in a
75 seeding sequence.
76
77 Args:
78 seed (int): initial seed.
79
80 Returns: Next seed of the chain.
81
82 """
83 max_seed_val = (
84 2 ** 32 - 1
85 ) # https://discuss.pytorch.org/t/what-is-the-max-seed-you-can-set-up/145688
86 rng = np.random.default_rng(seed)
87 seed = int.from_bytes(rng.bytes(8), "big")
88 return seed % max_seed_val
89
90
91 class KeyDependentDefaultDict(collections.defaultdict):
92 """A key-dependent default dict.
93
94 Examples:
95 >>> my_dict = KeyDependentDefaultDict(lambda key: "foo_" + key)
96 >>> print(my_dict["bar"])
97 foo_bar
98 """
99
100 def __init__(self, fun):
101 self.fun = fun
102 super().__init__()
103
104 def __missing__(self, key):
105 value = self.fun(key)
106 self[key] = value
107 return value
108
109
110 def prod(sequence):
111 """General prod function, that generalised usage across math and np.
112
113 Created for multiple python versions compatibility).
114
115 """
116 if hasattr(math, "prod"):
117 return math.prod(sequence)
118 else:
119 return int(np.prod(sequence))
120
[end of torchrl/_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchrl/_utils.py b/torchrl/_utils.py
--- a/torchrl/_utils.py
+++ b/torchrl/_utils.py
@@ -26,12 +26,13 @@
def __exit__(self, exc_type, exc_val, exc_tb):
t = time.time() - self.t0
- self._REG.setdefault(self.name, [0.0, 0.0, 0])
+ val = self._REG.setdefault(self.name, [0.0, 0.0, 0])
- count = self._REG[self.name][1]
- self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)
- self._REG[self.name][1] = self._REG[self.name][1] + t
- self._REG[self.name][2] = count + 1
+ count = val[2]
+ N = count + 1
+ val[0] = val[0] * (count / N) + t / N
+ val[1] += t
+ val[2] = N
@staticmethod
def print(prefix=None):
| {"golden_diff": "diff --git a/torchrl/_utils.py b/torchrl/_utils.py\n--- a/torchrl/_utils.py\n+++ b/torchrl/_utils.py\n@@ -26,12 +26,13 @@\n \n def __exit__(self, exc_type, exc_val, exc_tb):\n t = time.time() - self.t0\n- self._REG.setdefault(self.name, [0.0, 0.0, 0])\n+ val = self._REG.setdefault(self.name, [0.0, 0.0, 0])\n \n- count = self._REG[self.name][1]\n- self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)\n- self._REG[self.name][1] = self._REG[self.name][1] + t\n- self._REG[self.name][2] = count + 1\n+ count = val[2]\n+ N = count + 1\n+ val[0] = val[0] * (count / N) + t / N\n+ val[1] += t\n+ val[2] = N\n \n @staticmethod\n def print(prefix=None):\n", "issue": "[BUG] timeit profiling class does not correctly track how many times a function has been called.\n## Describe the bug\r\n\r\nIn file **pytorch/rl/torchrl/_utils.py**, in the class **timeit**, starting at line 31, the code is the following\r\n\r\n```\r\ncount = self._REG[self.name][1]\r\nself._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)\r\nself._REG[self.name][1] = self._REG[self.name][1] + t\r\nself._REG[self.name][2] = count + 1\r\n```\r\n\r\nI understand self._REG[self.name][2] tracks the number of times a function has been called.\r\nIn that case, shouldn't the first line be changed to \r\n\r\n```\r\ncount = self._REG[self.name][2]\r\n```\r\n\r\n## Checklist\r\n\r\n- [X] I have checked that there is no similar issue in the repo (**required**)\r\n- [ ] I have read the [documentation](https://github.com/pytorch/rl/tree/main/docs/) (**required**)\r\n- [ ] I have provided a minimal working example to reproduce the bug (**required**)\r\n\n", "before_files": [{"content": "import collections\nimport math\nimport time\n\nimport numpy as np\n\n\nclass timeit:\n \"\"\"A dirty but easy to use decorator for profiling code.\"\"\"\n\n _REG = {}\n\n def __init__(self, name):\n self.name = name\n\n def __call__(self, fn):\n def decorated_fn(*args, **kwargs):\n with self:\n out = fn(*args, **kwargs)\n return out\n\n return decorated_fn\n\n def __enter__(self):\n self.t0 = time.time()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n t = time.time() - self.t0\n self._REG.setdefault(self.name, [0.0, 0.0, 0])\n\n count = self._REG[self.name][1]\n self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)\n self._REG[self.name][1] = self._REG[self.name][1] + t\n self._REG[self.name][2] = count + 1\n\n @staticmethod\n def print(prefix=None):\n keys = list(timeit._REG)\n keys.sort()\n for name in keys:\n strings = []\n if prefix:\n strings.append(prefix)\n strings.append(\n f\"{name} took {timeit._REG[name][0] * 1000:4.4} msec (total = {timeit._REG[name][1]} sec)\"\n )\n print(\" -- \".join(strings))\n\n @staticmethod\n def erase():\n for k in timeit._REG:\n timeit._REG[k] = [0.0, 0.0, 0]\n\n\ndef _check_for_faulty_process(processes):\n terminate = False\n for p in processes:\n if not p.is_alive():\n terminate = True\n for _p in processes:\n if _p.is_alive():\n _p.terminate()\n if terminate:\n break\n if terminate:\n raise RuntimeError(\n \"At least one process failed. Check for more infos in the log.\"\n )\n\n\ndef seed_generator(seed):\n \"\"\"A seed generator function.\n\n Given a seeding integer, generates a deterministic next seed to be used in a\n seeding sequence.\n\n Args:\n seed (int): initial seed.\n\n Returns: Next seed of the chain.\n\n \"\"\"\n max_seed_val = (\n 2 ** 32 - 1\n ) # https://discuss.pytorch.org/t/what-is-the-max-seed-you-can-set-up/145688\n rng = np.random.default_rng(seed)\n seed = int.from_bytes(rng.bytes(8), \"big\")\n return seed % max_seed_val\n\n\nclass KeyDependentDefaultDict(collections.defaultdict):\n \"\"\"A key-dependent default dict.\n\n Examples:\n >>> my_dict = KeyDependentDefaultDict(lambda key: \"foo_\" + key)\n >>> print(my_dict[\"bar\"])\n foo_bar\n \"\"\"\n\n def __init__(self, fun):\n self.fun = fun\n super().__init__()\n\n def __missing__(self, key):\n value = self.fun(key)\n self[key] = value\n return value\n\n\ndef prod(sequence):\n \"\"\"General prod function, that generalised usage across math and np.\n\n Created for multiple python versions compatibility).\n\n \"\"\"\n if hasattr(math, \"prod\"):\n return math.prod(sequence)\n else:\n return int(np.prod(sequence))\n", "path": "torchrl/_utils.py"}]} | 1,815 | 275 |
gh_patches_debug_39579 | rasdani/github-patches | git_diff | vyperlang__vyper-2071 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Infinite loop from multidimensional array in calldata
### Version Information
* vyper Version (output of `vyper --version`): latest master
* OS: linux
* Python Version: `3.8.2`
### What's your issue about?
Using a multidimensional array in the function inputs, where the length of the >=2nd dimension is 6 or more, causes an infinite loop when calling the function.
For example, each of the following methods will compile but attempting to call them fails with out of gas:
```python
@public
def foo(a: uint256[1][6]):
pass
@public
def bar(a: uint256[1][1][6]):
pass
```
For comparison, these methods execute as expected:
```python
@public
def foo(a: uint256[6][1]):
pass
@public
def bar(a: uint256[100][5][5][5]):
pass
```
#### Some observations:
* The length of the first array element has no effect.
* The data type has no effect.
* The location of the array within calldata, and total number of arguments, has no effect.
* The number of dimensions, or dimension that exceeds a length of 5, has no effect.
### How can it be fixed?
Unsure at this time.
</issue>
<code>
[start of vyper/parser/arg_clamps.py]
1 import functools
2 import uuid
3
4 from vyper.parser.lll_node import LLLnode
5 from vyper.types.types import (
6 ByteArrayLike,
7 ListType,
8 get_size_of_type,
9 is_base_type,
10 )
11 from vyper.utils import MemoryPositions
12
13
14 def _mk_calldatacopy_copier(pos, sz, mempos):
15 return ["calldatacopy", mempos, ["add", 4, pos], sz]
16
17
18 def _mk_codecopy_copier(pos, sz, mempos):
19 return ["codecopy", mempos, ["add", "~codelen", pos], sz]
20
21
22 def make_arg_clamper(datapos, mempos, typ, is_init=False):
23 """
24 Clamps argument to type limits.
25 """
26
27 if not is_init:
28 data_decl = ["calldataload", ["add", 4, datapos]]
29 copier = functools.partial(_mk_calldatacopy_copier, mempos=mempos)
30 else:
31 data_decl = ["codeload", ["add", "~codelen", datapos]]
32 copier = functools.partial(_mk_codecopy_copier, mempos=mempos)
33 # Numbers: make sure they're in range
34 if is_base_type(typ, "int128"):
35 return LLLnode.from_list(
36 [
37 "clamp",
38 ["mload", MemoryPositions.MINNUM],
39 data_decl,
40 ["mload", MemoryPositions.MAXNUM],
41 ],
42 typ=typ,
43 annotation="checking int128 input",
44 )
45 # Booleans: make sure they're zero or one
46 elif is_base_type(typ, "bool"):
47 return LLLnode.from_list(
48 ["uclamplt", data_decl, 2], typ=typ, annotation="checking bool input",
49 )
50 # Addresses: make sure they're in range
51 elif is_base_type(typ, "address"):
52 return LLLnode.from_list(
53 ["uclamplt", data_decl, ["mload", MemoryPositions.ADDRSIZE]],
54 typ=typ,
55 annotation="checking address input",
56 )
57 # Bytes: make sure they have the right size
58 elif isinstance(typ, ByteArrayLike):
59 return LLLnode.from_list(
60 [
61 "seq",
62 copier(data_decl, 32 + typ.maxlen),
63 ["assert", ["le", ["calldataload", ["add", 4, data_decl]], typ.maxlen]],
64 ],
65 typ=None,
66 annotation="checking bytearray input",
67 )
68 # Lists: recurse
69 elif isinstance(typ, ListType):
70 if typ.count > 5 or (type(datapos) is list and type(mempos) is list):
71 subtype_size = get_size_of_type(typ.subtype)
72 i_incr = subtype_size * 32
73
74 mem_to = subtype_size * 32 * (typ.count - 1)
75 loop_label = f"_check_list_loop_{str(uuid.uuid4())}"
76
77 offset = 288
78 o = [
79 ["mstore", offset, 0], # init loop
80 ["label", loop_label],
81 make_arg_clamper(
82 ["add", datapos, ["mload", offset]],
83 ["add", mempos, ["mload", offset]],
84 typ.subtype,
85 is_init,
86 ),
87 ["mstore", offset, ["add", ["mload", offset], i_incr]],
88 ["if", ["lt", ["mload", offset], mem_to], ["goto", loop_label]],
89 ]
90 else:
91 o = []
92 for i in range(typ.count):
93 offset = get_size_of_type(typ.subtype) * 32 * i
94 o.append(make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))
95 return LLLnode.from_list(["seq"] + o, typ=None, annotation="checking list input")
96 # Otherwise don't make any checks
97 else:
98 return LLLnode.from_list("pass")
99
[end of vyper/parser/arg_clamps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vyper/parser/arg_clamps.py b/vyper/parser/arg_clamps.py
--- a/vyper/parser/arg_clamps.py
+++ b/vyper/parser/arg_clamps.py
@@ -22,6 +22,22 @@
def make_arg_clamper(datapos, mempos, typ, is_init=False):
"""
Clamps argument to type limits.
+
+ Arguments
+ ---------
+ datapos : int | LLLnode
+ Calldata offset of the value being clamped
+ mempos : int | LLLnode
+ Memory offset that the value is stored at during clamping
+ typ : vyper.types.types.BaseType
+ Type of the value
+ is_init : bool, optional
+ Boolean indicating if we are generating init bytecode
+
+ Returns
+ -------
+ LLLnode
+ Arg clamper LLL
"""
if not is_init:
@@ -68,31 +84,45 @@
# Lists: recurse
elif isinstance(typ, ListType):
if typ.count > 5 or (type(datapos) is list and type(mempos) is list):
- subtype_size = get_size_of_type(typ.subtype)
- i_incr = subtype_size * 32
+ # find ultimate base type
+ subtype = typ.subtype
+ while hasattr(subtype, "subtype"):
+ subtype = subtype.subtype
+
+ # make arg clamper for the base type
+ offset = MemoryPositions.FREE_LOOP_INDEX
+ clamper = make_arg_clamper(
+ ["add", datapos, ["mload", offset]],
+ ["add", mempos, ["mload", offset]],
+ subtype,
+ is_init,
+ )
+ if clamper.value == "pass":
+ # no point looping if the base type doesn't require clamping
+ return clamper
+
+ # loop the entire array at once, even if it's multidimensional
+ type_size = get_size_of_type(typ)
+ i_incr = get_size_of_type(subtype) * 32
- mem_to = subtype_size * 32 * (typ.count - 1)
+ mem_to = type_size * 32
loop_label = f"_check_list_loop_{str(uuid.uuid4())}"
- offset = 288
- o = [
+ lll_node = [
["mstore", offset, 0], # init loop
["label", loop_label],
- make_arg_clamper(
- ["add", datapos, ["mload", offset]],
- ["add", mempos, ["mload", offset]],
- typ.subtype,
- is_init,
- ),
+ clamper,
["mstore", offset, ["add", ["mload", offset], i_incr]],
["if", ["lt", ["mload", offset], mem_to], ["goto", loop_label]],
]
else:
- o = []
+ lll_node = []
for i in range(typ.count):
offset = get_size_of_type(typ.subtype) * 32 * i
- o.append(make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))
- return LLLnode.from_list(["seq"] + o, typ=None, annotation="checking list input")
+ lll_node.append(
+ make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init)
+ )
+ return LLLnode.from_list(["seq"] + lll_node, typ=None, annotation="checking list input")
# Otherwise don't make any checks
else:
return LLLnode.from_list("pass")
| {"golden_diff": "diff --git a/vyper/parser/arg_clamps.py b/vyper/parser/arg_clamps.py\n--- a/vyper/parser/arg_clamps.py\n+++ b/vyper/parser/arg_clamps.py\n@@ -22,6 +22,22 @@\n def make_arg_clamper(datapos, mempos, typ, is_init=False):\n \"\"\"\n Clamps argument to type limits.\n+\n+ Arguments\n+ ---------\n+ datapos : int | LLLnode\n+ Calldata offset of the value being clamped\n+ mempos : int | LLLnode\n+ Memory offset that the value is stored at during clamping\n+ typ : vyper.types.types.BaseType\n+ Type of the value\n+ is_init : bool, optional\n+ Boolean indicating if we are generating init bytecode\n+\n+ Returns\n+ -------\n+ LLLnode\n+ Arg clamper LLL\n \"\"\"\n \n if not is_init:\n@@ -68,31 +84,45 @@\n # Lists: recurse\n elif isinstance(typ, ListType):\n if typ.count > 5 or (type(datapos) is list and type(mempos) is list):\n- subtype_size = get_size_of_type(typ.subtype)\n- i_incr = subtype_size * 32\n+ # find ultimate base type\n+ subtype = typ.subtype\n+ while hasattr(subtype, \"subtype\"):\n+ subtype = subtype.subtype\n+\n+ # make arg clamper for the base type\n+ offset = MemoryPositions.FREE_LOOP_INDEX\n+ clamper = make_arg_clamper(\n+ [\"add\", datapos, [\"mload\", offset]],\n+ [\"add\", mempos, [\"mload\", offset]],\n+ subtype,\n+ is_init,\n+ )\n+ if clamper.value == \"pass\":\n+ # no point looping if the base type doesn't require clamping\n+ return clamper\n+\n+ # loop the entire array at once, even if it's multidimensional\n+ type_size = get_size_of_type(typ)\n+ i_incr = get_size_of_type(subtype) * 32\n \n- mem_to = subtype_size * 32 * (typ.count - 1)\n+ mem_to = type_size * 32\n loop_label = f\"_check_list_loop_{str(uuid.uuid4())}\"\n \n- offset = 288\n- o = [\n+ lll_node = [\n [\"mstore\", offset, 0], # init loop\n [\"label\", loop_label],\n- make_arg_clamper(\n- [\"add\", datapos, [\"mload\", offset]],\n- [\"add\", mempos, [\"mload\", offset]],\n- typ.subtype,\n- is_init,\n- ),\n+ clamper,\n [\"mstore\", offset, [\"add\", [\"mload\", offset], i_incr]],\n [\"if\", [\"lt\", [\"mload\", offset], mem_to], [\"goto\", loop_label]],\n ]\n else:\n- o = []\n+ lll_node = []\n for i in range(typ.count):\n offset = get_size_of_type(typ.subtype) * 32 * i\n- o.append(make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))\n- return LLLnode.from_list([\"seq\"] + o, typ=None, annotation=\"checking list input\")\n+ lll_node.append(\n+ make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init)\n+ )\n+ return LLLnode.from_list([\"seq\"] + lll_node, typ=None, annotation=\"checking list input\")\n # Otherwise don't make any checks\n else:\n return LLLnode.from_list(\"pass\")\n", "issue": "Infinite loop from multidimensional array in calldata\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): latest master\r\n* OS: linux\r\n* Python Version: `3.8.2`\r\n\r\n### What's your issue about?\r\n\r\nUsing a multidimensional array in the function inputs, where the length of the >=2nd dimension is 6 or more, causes an infinite loop when calling the function.\r\n\r\nFor example, each of the following methods will compile but attempting to call them fails with out of gas:\r\n\r\n```python\r\n@public\r\ndef foo(a: uint256[1][6]):\r\n pass\r\n\r\n@public\r\ndef bar(a: uint256[1][1][6]):\r\n pass\r\n```\r\n\r\nFor comparison, these methods execute as expected:\r\n\r\n```python\r\n@public\r\ndef foo(a: uint256[6][1]):\r\n pass\r\n\r\n@public\r\ndef bar(a: uint256[100][5][5][5]):\r\n pass\r\n```\r\n\r\n#### Some observations:\r\n\r\n* The length of the first array element has no effect.\r\n* The data type has no effect.\r\n* The location of the array within calldata, and total number of arguments, has no effect.\r\n* The number of dimensions, or dimension that exceeds a length of 5, has no effect.\r\n\r\n### How can it be fixed?\r\nUnsure at this time.\n", "before_files": [{"content": "import functools\nimport uuid\n\nfrom vyper.parser.lll_node import LLLnode\nfrom vyper.types.types import (\n ByteArrayLike,\n ListType,\n get_size_of_type,\n is_base_type,\n)\nfrom vyper.utils import MemoryPositions\n\n\ndef _mk_calldatacopy_copier(pos, sz, mempos):\n return [\"calldatacopy\", mempos, [\"add\", 4, pos], sz]\n\n\ndef _mk_codecopy_copier(pos, sz, mempos):\n return [\"codecopy\", mempos, [\"add\", \"~codelen\", pos], sz]\n\n\ndef make_arg_clamper(datapos, mempos, typ, is_init=False):\n \"\"\"\n Clamps argument to type limits.\n \"\"\"\n\n if not is_init:\n data_decl = [\"calldataload\", [\"add\", 4, datapos]]\n copier = functools.partial(_mk_calldatacopy_copier, mempos=mempos)\n else:\n data_decl = [\"codeload\", [\"add\", \"~codelen\", datapos]]\n copier = functools.partial(_mk_codecopy_copier, mempos=mempos)\n # Numbers: make sure they're in range\n if is_base_type(typ, \"int128\"):\n return LLLnode.from_list(\n [\n \"clamp\",\n [\"mload\", MemoryPositions.MINNUM],\n data_decl,\n [\"mload\", MemoryPositions.MAXNUM],\n ],\n typ=typ,\n annotation=\"checking int128 input\",\n )\n # Booleans: make sure they're zero or one\n elif is_base_type(typ, \"bool\"):\n return LLLnode.from_list(\n [\"uclamplt\", data_decl, 2], typ=typ, annotation=\"checking bool input\",\n )\n # Addresses: make sure they're in range\n elif is_base_type(typ, \"address\"):\n return LLLnode.from_list(\n [\"uclamplt\", data_decl, [\"mload\", MemoryPositions.ADDRSIZE]],\n typ=typ,\n annotation=\"checking address input\",\n )\n # Bytes: make sure they have the right size\n elif isinstance(typ, ByteArrayLike):\n return LLLnode.from_list(\n [\n \"seq\",\n copier(data_decl, 32 + typ.maxlen),\n [\"assert\", [\"le\", [\"calldataload\", [\"add\", 4, data_decl]], typ.maxlen]],\n ],\n typ=None,\n annotation=\"checking bytearray input\",\n )\n # Lists: recurse\n elif isinstance(typ, ListType):\n if typ.count > 5 or (type(datapos) is list and type(mempos) is list):\n subtype_size = get_size_of_type(typ.subtype)\n i_incr = subtype_size * 32\n\n mem_to = subtype_size * 32 * (typ.count - 1)\n loop_label = f\"_check_list_loop_{str(uuid.uuid4())}\"\n\n offset = 288\n o = [\n [\"mstore\", offset, 0], # init loop\n [\"label\", loop_label],\n make_arg_clamper(\n [\"add\", datapos, [\"mload\", offset]],\n [\"add\", mempos, [\"mload\", offset]],\n typ.subtype,\n is_init,\n ),\n [\"mstore\", offset, [\"add\", [\"mload\", offset], i_incr]],\n [\"if\", [\"lt\", [\"mload\", offset], mem_to], [\"goto\", loop_label]],\n ]\n else:\n o = []\n for i in range(typ.count):\n offset = get_size_of_type(typ.subtype) * 32 * i\n o.append(make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))\n return LLLnode.from_list([\"seq\"] + o, typ=None, annotation=\"checking list input\")\n # Otherwise don't make any checks\n else:\n return LLLnode.from_list(\"pass\")\n", "path": "vyper/parser/arg_clamps.py"}]} | 1,885 | 838 |
gh_patches_debug_1907 | rasdani/github-patches | git_diff | google__flax-628 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
After update from 0.2.0: AttributeError: module 'jax.core' has no attribute 'eval_context'
After updating from flax 0.2.0 to flax 0.2.2 I get the above error message. Downgrading to 0.2.0 solves this, so the error source is located. I'm working with the now deprecated flax.nn package if backward-compatibility might be the reason for this issue.
The Issue is encountered in a custom RNN, when using the init_by_shape function in conjunction with jax.lax.scan.
</issue>
<code>
[start of setup.py]
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """setup.py for Flax."""
16
17 import os
18 from setuptools import find_packages
19 from setuptools import setup
20
21 here = os.path.abspath(os.path.dirname(__file__))
22 try:
23 README = open(os.path.join(here, "README.md"), encoding='utf-8').read()
24 except IOError:
25 README = ""
26
27 install_requires = [
28 "numpy>=1.12",
29 "jax>=0.1.59",
30 "matplotlib", # only needed for tensorboard export
31 "dataclasses;python_version<'3.7'", # will only install on py3.6
32 "msgpack",
33 ]
34
35 tests_require = [
36 "atari-py",
37 "clu", # All examples.
38 "gym",
39 "jaxlib",
40 "ml-collections",
41 "opencv-python",
42 "pytest",
43 "pytest-cov",
44 "pytest-xdist==1.34.0", # upgrading to 2.0 broke tests, need to investigate
45 "sentencepiece", # WMT example.
46 "svn",
47 "tensorflow",
48 "tensorflow_text", # WMT example.
49 "tensorflow_datasets",
50 ]
51
52 __version__ = None
53
54 with open('flax/version.py') as f:
55 exec(f.read(), globals())
56
57 setup(
58 name="flax",
59 version=__version__,
60 description="Flax: A neural network library for JAX designed for flexibility",
61 long_description="\n\n".join([README]),
62 long_description_content_type='text/markdown',
63 classifiers=[
64 "Development Status :: 3 - Alpha",
65 "Intended Audience :: Developers",
66 "Intended Audience :: Science/Research",
67 "License :: OSI Approved :: Apache Software License",
68 "Programming Language :: Python :: 3.7",
69 "Topic :: Scientific/Engineering :: Artificial Intelligence",
70 ],
71 keywords="",
72 author="Flax team",
73 author_email="[email protected]",
74 url="https://github.com/google/flax",
75 packages=find_packages(),
76 include_package_data=False,
77 zip_safe=False,
78 install_requires=install_requires,
79 extras_require={
80 "testing": tests_require,
81 },
82 )
83
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,7 @@
install_requires = [
"numpy>=1.12",
- "jax>=0.1.59",
+ "jax>=0.1.77",
"matplotlib", # only needed for tensorboard export
"dataclasses;python_version<'3.7'", # will only install on py3.6
"msgpack",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,7 @@\n \n install_requires = [\n \"numpy>=1.12\",\n- \"jax>=0.1.59\",\n+ \"jax>=0.1.77\",\n \"matplotlib\", # only needed for tensorboard export\n \"dataclasses;python_version<'3.7'\", # will only install on py3.6\n \"msgpack\",\n", "issue": "After update from 0.2.0: AttributeError: module 'jax.core' has no attribute 'eval_context'\nAfter updating from flax 0.2.0 to flax 0.2.2 I get the above error message. Downgrading to 0.2.0 solves this, so the error source is located. I'm working with the now deprecated flax.nn package if backward-compatibility might be the reason for this issue.\r\nThe Issue is encountered in a custom RNN, when using the init_by_shape function in conjunction with jax.lax.scan.\n", "before_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"setup.py for Flax.\"\"\"\n\nimport os\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, \"README.md\"), encoding='utf-8').read()\nexcept IOError:\n README = \"\"\n\ninstall_requires = [\n \"numpy>=1.12\",\n \"jax>=0.1.59\",\n \"matplotlib\", # only needed for tensorboard export\n \"dataclasses;python_version<'3.7'\", # will only install on py3.6\n \"msgpack\",\n]\n\ntests_require = [\n \"atari-py\",\n \"clu\", # All examples.\n \"gym\",\n \"jaxlib\",\n \"ml-collections\",\n \"opencv-python\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-xdist==1.34.0\", # upgrading to 2.0 broke tests, need to investigate\n \"sentencepiece\", # WMT example.\n \"svn\",\n \"tensorflow\",\n \"tensorflow_text\", # WMT example.\n \"tensorflow_datasets\",\n]\n\n__version__ = None\n\nwith open('flax/version.py') as f:\n exec(f.read(), globals())\n\nsetup(\n name=\"flax\",\n version=__version__,\n description=\"Flax: A neural network library for JAX designed for flexibility\",\n long_description=\"\\n\\n\".join([README]),\n long_description_content_type='text/markdown',\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"\",\n author=\"Flax team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/google/flax\",\n packages=find_packages(),\n include_package_data=False,\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n \"testing\": tests_require,\n },\n )\n", "path": "setup.py"}]} | 1,403 | 111 |
gh_patches_debug_5356 | rasdani/github-patches | git_diff | getsentry__sentry-python-1093 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
threading.setDaemon has been deprecated in favor of setting daemon attribute directly in Python 3.10
Ref : https://github.com/python/cpython/pull/25174
https://github.com/getsentry/sentry-python/blob/927903e3b354a42e427d91129c399d64d480a6b9/sentry_sdk/worker.py#L69
</issue>
<code>
[start of sentry_sdk/worker.py]
1 import os
2 import threading
3
4 from time import sleep, time
5 from sentry_sdk._compat import check_thread_support
6 from sentry_sdk._queue import Queue, Full
7 from sentry_sdk.utils import logger
8 from sentry_sdk.consts import DEFAULT_QUEUE_SIZE
9
10 from sentry_sdk._types import MYPY
11
12 if MYPY:
13 from typing import Any
14 from typing import Optional
15 from typing import Callable
16
17
18 _TERMINATOR = object()
19
20
21 class BackgroundWorker(object):
22 def __init__(self, queue_size=DEFAULT_QUEUE_SIZE):
23 # type: (int) -> None
24 check_thread_support()
25 self._queue = Queue(queue_size) # type: Queue
26 self._lock = threading.Lock()
27 self._thread = None # type: Optional[threading.Thread]
28 self._thread_for_pid = None # type: Optional[int]
29
30 @property
31 def is_alive(self):
32 # type: () -> bool
33 if self._thread_for_pid != os.getpid():
34 return False
35 if not self._thread:
36 return False
37 return self._thread.is_alive()
38
39 def _ensure_thread(self):
40 # type: () -> None
41 if not self.is_alive:
42 self.start()
43
44 def _timed_queue_join(self, timeout):
45 # type: (float) -> bool
46 deadline = time() + timeout
47 queue = self._queue
48
49 queue.all_tasks_done.acquire()
50
51 try:
52 while queue.unfinished_tasks:
53 delay = deadline - time()
54 if delay <= 0:
55 return False
56 queue.all_tasks_done.wait(timeout=delay)
57
58 return True
59 finally:
60 queue.all_tasks_done.release()
61
62 def start(self):
63 # type: () -> None
64 with self._lock:
65 if not self.is_alive:
66 self._thread = threading.Thread(
67 target=self._target, name="raven-sentry.BackgroundWorker"
68 )
69 self._thread.setDaemon(True)
70 self._thread.start()
71 self._thread_for_pid = os.getpid()
72
73 def kill(self):
74 # type: () -> None
75 """
76 Kill worker thread. Returns immediately. Not useful for
77 waiting on shutdown for events, use `flush` for that.
78 """
79 logger.debug("background worker got kill request")
80 with self._lock:
81 if self._thread:
82 try:
83 self._queue.put_nowait(_TERMINATOR)
84 except Full:
85 logger.debug("background worker queue full, kill failed")
86
87 self._thread = None
88 self._thread_for_pid = None
89
90 def flush(self, timeout, callback=None):
91 # type: (float, Optional[Any]) -> None
92 logger.debug("background worker got flush request")
93 with self._lock:
94 if self.is_alive and timeout > 0.0:
95 self._wait_flush(timeout, callback)
96 logger.debug("background worker flushed")
97
98 def _wait_flush(self, timeout, callback):
99 # type: (float, Optional[Any]) -> None
100 initial_timeout = min(0.1, timeout)
101 if not self._timed_queue_join(initial_timeout):
102 pending = self._queue.qsize() + 1
103 logger.debug("%d event(s) pending on flush", pending)
104 if callback is not None:
105 callback(pending, timeout)
106
107 if not self._timed_queue_join(timeout - initial_timeout):
108 pending = self._queue.qsize() + 1
109 logger.error("flush timed out, dropped %s events", pending)
110
111 def submit(self, callback):
112 # type: (Callable[[], None]) -> bool
113 self._ensure_thread()
114 try:
115 self._queue.put_nowait(callback)
116 return True
117 except Full:
118 return False
119
120 def _target(self):
121 # type: () -> None
122 while True:
123 callback = self._queue.get()
124 try:
125 if callback is _TERMINATOR:
126 break
127 try:
128 callback()
129 except Exception:
130 logger.error("Failed processing job", exc_info=True)
131 finally:
132 self._queue.task_done()
133 sleep(0)
134
[end of sentry_sdk/worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py
--- a/sentry_sdk/worker.py
+++ b/sentry_sdk/worker.py
@@ -66,7 +66,7 @@
self._thread = threading.Thread(
target=self._target, name="raven-sentry.BackgroundWorker"
)
- self._thread.setDaemon(True)
+ self._thread.daemon = True
self._thread.start()
self._thread_for_pid = os.getpid()
| {"golden_diff": "diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py\n--- a/sentry_sdk/worker.py\n+++ b/sentry_sdk/worker.py\n@@ -66,7 +66,7 @@\n self._thread = threading.Thread(\n target=self._target, name=\"raven-sentry.BackgroundWorker\"\n )\n- self._thread.setDaemon(True)\n+ self._thread.daemon = True\n self._thread.start()\n self._thread_for_pid = os.getpid()\n", "issue": "threading.setDaemon has been deprecated in favor of setting daemon attribute directly in Python 3.10\nRef : https://github.com/python/cpython/pull/25174\r\n\r\nhttps://github.com/getsentry/sentry-python/blob/927903e3b354a42e427d91129c399d64d480a6b9/sentry_sdk/worker.py#L69\n", "before_files": [{"content": "import os\nimport threading\n\nfrom time import sleep, time\nfrom sentry_sdk._compat import check_thread_support\nfrom sentry_sdk._queue import Queue, Full\nfrom sentry_sdk.utils import logger\nfrom sentry_sdk.consts import DEFAULT_QUEUE_SIZE\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import Optional\n from typing import Callable\n\n\n_TERMINATOR = object()\n\n\nclass BackgroundWorker(object):\n def __init__(self, queue_size=DEFAULT_QUEUE_SIZE):\n # type: (int) -> None\n check_thread_support()\n self._queue = Queue(queue_size) # type: Queue\n self._lock = threading.Lock()\n self._thread = None # type: Optional[threading.Thread]\n self._thread_for_pid = None # type: Optional[int]\n\n @property\n def is_alive(self):\n # type: () -> bool\n if self._thread_for_pid != os.getpid():\n return False\n if not self._thread:\n return False\n return self._thread.is_alive()\n\n def _ensure_thread(self):\n # type: () -> None\n if not self.is_alive:\n self.start()\n\n def _timed_queue_join(self, timeout):\n # type: (float) -> bool\n deadline = time() + timeout\n queue = self._queue\n\n queue.all_tasks_done.acquire()\n\n try:\n while queue.unfinished_tasks:\n delay = deadline - time()\n if delay <= 0:\n return False\n queue.all_tasks_done.wait(timeout=delay)\n\n return True\n finally:\n queue.all_tasks_done.release()\n\n def start(self):\n # type: () -> None\n with self._lock:\n if not self.is_alive:\n self._thread = threading.Thread(\n target=self._target, name=\"raven-sentry.BackgroundWorker\"\n )\n self._thread.setDaemon(True)\n self._thread.start()\n self._thread_for_pid = os.getpid()\n\n def kill(self):\n # type: () -> None\n \"\"\"\n Kill worker thread. Returns immediately. Not useful for\n waiting on shutdown for events, use `flush` for that.\n \"\"\"\n logger.debug(\"background worker got kill request\")\n with self._lock:\n if self._thread:\n try:\n self._queue.put_nowait(_TERMINATOR)\n except Full:\n logger.debug(\"background worker queue full, kill failed\")\n\n self._thread = None\n self._thread_for_pid = None\n\n def flush(self, timeout, callback=None):\n # type: (float, Optional[Any]) -> None\n logger.debug(\"background worker got flush request\")\n with self._lock:\n if self.is_alive and timeout > 0.0:\n self._wait_flush(timeout, callback)\n logger.debug(\"background worker flushed\")\n\n def _wait_flush(self, timeout, callback):\n # type: (float, Optional[Any]) -> None\n initial_timeout = min(0.1, timeout)\n if not self._timed_queue_join(initial_timeout):\n pending = self._queue.qsize() + 1\n logger.debug(\"%d event(s) pending on flush\", pending)\n if callback is not None:\n callback(pending, timeout)\n\n if not self._timed_queue_join(timeout - initial_timeout):\n pending = self._queue.qsize() + 1\n logger.error(\"flush timed out, dropped %s events\", pending)\n\n def submit(self, callback):\n # type: (Callable[[], None]) -> bool\n self._ensure_thread()\n try:\n self._queue.put_nowait(callback)\n return True\n except Full:\n return False\n\n def _target(self):\n # type: () -> None\n while True:\n callback = self._queue.get()\n try:\n if callback is _TERMINATOR:\n break\n try:\n callback()\n except Exception:\n logger.error(\"Failed processing job\", exc_info=True)\n finally:\n self._queue.task_done()\n sleep(0)\n", "path": "sentry_sdk/worker.py"}]} | 1,828 | 110 |
gh_patches_debug_13158 | rasdani/github-patches | git_diff | bridgecrewio__checkov-2850 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_AWS_144 false negative after updating to AWS Provider 4.0
**Describe the issue**
After updating our AWS Provider to ~>4.0, we started getting a failure on `CKV_AWS_144` in our bucket module, despite having a properly configured `aws_s3_bucket_lifecycle_configuration` block.
**Examples**
Sample code:
```hcl
provider "aws" {
alias = "aws-primary"
region = "us-east-1"
}
provider "aws" {
alias = "aws-dr"
region = "us-west-2"
}
resource "aws_s3_bucket" "test_bucket" {
bucket = var.bucket_name
}
resource "aws_s3_bucket" "test_dr_bucket" {
provider = aws.aws-dr
bucket = "${var.bucket_name}-dr"
}
resource "aws_s3_bucket_versioning" "test_bucket_versioning" {
bucket = aws_s3_bucket.test_bucket.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_s3_bucket_versioning" "test_dr_bucket_versioning" {
provider = aws.aws-dr
bucket = aws_s3_bucket.test_dr_bucket.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_iam_role" "dr_replication" {
name_prefix = "replication"
description = "Allow S3 to assume the role for replication"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "s3ReplicationAssume",
"Effect": "Allow",
"Principal": {
"Service": "s3.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
resource "aws_iam_policy" "dr_replication" {
name_prefix = "replication"
description = "Allows reading for replication."
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:GetReplicationConfiguration",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": [
"${aws_s3_bucket.test_bucket.arn}"
]
},
{
"Action": [
"s3:GetObjectVersion",
"s3:GetObjectVersionForReplication",
"s3:GetObjectVersionAcl"
],
"Effect": "Allow",
"Resource": [
"${aws_s3_bucket.test_bucket.arn}/*"
]
},
{
"Action": [
"s3:ReplicateObject",
"s3:ReplicateTags",
"s3:ObjectOwnerOverrideToBucketOwner"
],
"Effect": "Allow",
"Resource": "${aws_s3_bucket.test_dr_bucket.arn}/*"
}
]
}
POLICY
}
resource "aws_iam_policy_attachment" "dr_replication" {
name = "replication"
roles = [aws_iam_role.dr_replication.name]
policy_arn = aws_iam_policy.dr_replication.arn
}
resource "aws_s3_bucket_replication_configuration" "dr_bucket_replication" {
# Must have bucket versioning enabled first
depends_on = [
aws_s3_bucket_versioning.test_bucket_versioning,
aws_s3_bucket_versioning.test_dr_bucket_versioning,
]
role = aws_iam_role.dr_replication.arn
bucket = aws_s3_bucket.test_bucket.id
rule {
id = "entire_bucket"
status = "Enabled"
destination {
bucket = aws_s3_bucket.test_dr_bucket.arn
storage_class = "DEEP_ARCHIVE"
}
}
}
```
Expected: `CKV_AWS_144` will pass.
Actual:
```
Check: CKV_AWS_144: "Ensure that S3 bucket has cross-region replication enabled"
FAILED for resource: aws_s3_bucket.test_bucket
File: /dr_test.tf:11-13
Guide: https://docs.bridgecrew.io/docs/ensure-that-s3-bucket-has-cross-region-replication-enabled
11 | resource "aws_s3_bucket" "test_bucket" {
12 | bucket = var.bucket_name
13 | }
```
**Desktop (please complete the following information):**
- OS: MacOS 10.14.6
- Checkov Version 2.0.1074
**Additional context**
On the surface, this looks like related to https://github.com/bridgecrewio/checkov/issues/2399 and https://github.com/bridgecrewio/checkov/pull/2724, but to the `CKV_AWS_144` rule.
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py]
1 from checkov.common.models.consts import ANY_VALUE
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3 from checkov.common.models.enums import CheckCategories
4
5
6 class S3BucketReplicationConfiguration(BaseResourceValueCheck):
7 def __init__(self):
8 name = "Ensure that S3 bucket has cross-region replication enabled"
9 id = "CKV_AWS_144"
10 supported_resources = ['aws_s3_bucket']
11 categories = [CheckCategories.GENERAL_SECURITY]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def get_inspected_key(self):
15 return "replication_configuration/[0]/role"
16
17 def get_expected_value(self):
18 return ANY_VALUE
19
20
21 check = S3BucketReplicationConfiguration()
22
[end of checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py b/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py
deleted file mode 100644
--- a/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from checkov.common.models.consts import ANY_VALUE
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
-from checkov.common.models.enums import CheckCategories
-
-
-class S3BucketReplicationConfiguration(BaseResourceValueCheck):
- def __init__(self):
- name = "Ensure that S3 bucket has cross-region replication enabled"
- id = "CKV_AWS_144"
- supported_resources = ['aws_s3_bucket']
- categories = [CheckCategories.GENERAL_SECURITY]
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
-
- def get_inspected_key(self):
- return "replication_configuration/[0]/role"
-
- def get_expected_value(self):
- return ANY_VALUE
-
-
-check = S3BucketReplicationConfiguration()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py b/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py\ndeleted file mode 100644\n--- a/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py\n+++ /dev/null\n@@ -1,21 +0,0 @@\n-from checkov.common.models.consts import ANY_VALUE\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-from checkov.common.models.enums import CheckCategories\n-\n-\n-class S3BucketReplicationConfiguration(BaseResourceValueCheck):\n- def __init__(self):\n- name = \"Ensure that S3 bucket has cross-region replication enabled\"\n- id = \"CKV_AWS_144\"\n- supported_resources = ['aws_s3_bucket']\n- categories = [CheckCategories.GENERAL_SECURITY]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n-\n- def get_inspected_key(self):\n- return \"replication_configuration/[0]/role\"\n-\n- def get_expected_value(self):\n- return ANY_VALUE\n-\n-\n-check = S3BucketReplicationConfiguration()\n", "issue": "CKV_AWS_144 false negative after updating to AWS Provider 4.0 \n**Describe the issue**\r\nAfter updating our AWS Provider to ~>4.0, we started getting a failure on `CKV_AWS_144` in our bucket module, despite having a properly configured `aws_s3_bucket_lifecycle_configuration` block.\r\n\r\n**Examples**\r\nSample code:\r\n\r\n```hcl\r\nprovider \"aws\" {\r\n alias = \"aws-primary\"\r\n region = \"us-east-1\"\r\n}\r\n\r\nprovider \"aws\" {\r\n alias = \"aws-dr\"\r\n region = \"us-west-2\"\r\n}\r\n\r\nresource \"aws_s3_bucket\" \"test_bucket\" {\r\n bucket = var.bucket_name\r\n}\r\n\r\nresource \"aws_s3_bucket\" \"test_dr_bucket\" {\r\n provider = aws.aws-dr\r\n bucket = \"${var.bucket_name}-dr\"\r\n}\r\n\r\nresource \"aws_s3_bucket_versioning\" \"test_bucket_versioning\" {\r\n bucket = aws_s3_bucket.test_bucket.id\r\n versioning_configuration {\r\n status = \"Enabled\"\r\n }\r\n}\r\n\r\nresource \"aws_s3_bucket_versioning\" \"test_dr_bucket_versioning\" {\r\n provider = aws.aws-dr\r\n bucket = aws_s3_bucket.test_dr_bucket.id\r\n versioning_configuration {\r\n status = \"Enabled\"\r\n }\r\n}\r\n\r\nresource \"aws_iam_role\" \"dr_replication\" {\r\n name_prefix = \"replication\"\r\n description = \"Allow S3 to assume the role for replication\"\r\n\r\n assume_role_policy = <<POLICY\r\n{\r\n \"Version\": \"2012-10-17\",\r\n \"Statement\": [\r\n {\r\n \"Sid\": \"s3ReplicationAssume\",\r\n \"Effect\": \"Allow\",\r\n \"Principal\": {\r\n \"Service\": \"s3.amazonaws.com\"\r\n },\r\n \"Action\": \"sts:AssumeRole\"\r\n }\r\n ]\r\n}\r\nPOLICY\r\n}\r\n\r\nresource \"aws_iam_policy\" \"dr_replication\" {\r\n name_prefix = \"replication\"\r\n description = \"Allows reading for replication.\"\r\n\r\n policy = <<POLICY\r\n{\r\n \"Version\": \"2012-10-17\",\r\n \"Statement\": [\r\n {\r\n \"Action\": [\r\n \"s3:GetReplicationConfiguration\",\r\n \"s3:ListBucket\"\r\n ],\r\n \"Effect\": \"Allow\",\r\n \"Resource\": [\r\n \"${aws_s3_bucket.test_bucket.arn}\"\r\n ]\r\n },\r\n {\r\n \"Action\": [\r\n \"s3:GetObjectVersion\",\r\n \"s3:GetObjectVersionForReplication\",\r\n \"s3:GetObjectVersionAcl\"\r\n ],\r\n \"Effect\": \"Allow\",\r\n \"Resource\": [\r\n \"${aws_s3_bucket.test_bucket.arn}/*\"\r\n ]\r\n },\r\n {\r\n \"Action\": [\r\n \"s3:ReplicateObject\",\r\n \"s3:ReplicateTags\",\r\n \"s3:ObjectOwnerOverrideToBucketOwner\"\r\n ],\r\n \"Effect\": \"Allow\",\r\n \"Resource\": \"${aws_s3_bucket.test_dr_bucket.arn}/*\"\r\n }\r\n ]\r\n}\r\nPOLICY\r\n}\r\n\r\nresource \"aws_iam_policy_attachment\" \"dr_replication\" {\r\n name = \"replication\"\r\n roles = [aws_iam_role.dr_replication.name]\r\n policy_arn = aws_iam_policy.dr_replication.arn\r\n}\r\n\r\nresource \"aws_s3_bucket_replication_configuration\" \"dr_bucket_replication\" {\r\n\r\n # Must have bucket versioning enabled first\r\n depends_on = [\r\n aws_s3_bucket_versioning.test_bucket_versioning,\r\n aws_s3_bucket_versioning.test_dr_bucket_versioning,\r\n ]\r\n\r\n role = aws_iam_role.dr_replication.arn\r\n bucket = aws_s3_bucket.test_bucket.id\r\n\r\n rule {\r\n id = \"entire_bucket\"\r\n status = \"Enabled\"\r\n\r\n destination {\r\n bucket = aws_s3_bucket.test_dr_bucket.arn\r\n storage_class = \"DEEP_ARCHIVE\"\r\n }\r\n }\r\n}\r\n\r\n```\r\n\r\nExpected: `CKV_AWS_144` will pass.\r\n\r\nActual: \r\n```\r\nCheck: CKV_AWS_144: \"Ensure that S3 bucket has cross-region replication enabled\"\r\n FAILED for resource: aws_s3_bucket.test_bucket\r\n File: /dr_test.tf:11-13\r\n Guide: https://docs.bridgecrew.io/docs/ensure-that-s3-bucket-has-cross-region-replication-enabled\r\n\r\n 11 | resource \"aws_s3_bucket\" \"test_bucket\" {\r\n 12 | bucket = var.bucket_name\r\n 13 | }\r\n```\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS 10.14.6\r\n - Checkov Version 2.0.1074\r\n\r\n**Additional context**\r\nOn the surface, this looks like related to https://github.com/bridgecrewio/checkov/issues/2399 and https://github.com/bridgecrewio/checkov/pull/2724, but to the `CKV_AWS_144` rule.\n", "before_files": [{"content": "from checkov.common.models.consts import ANY_VALUE\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass S3BucketReplicationConfiguration(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that S3 bucket has cross-region replication enabled\"\n id = \"CKV_AWS_144\"\n supported_resources = ['aws_s3_bucket']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"replication_configuration/[0]/role\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = S3BucketReplicationConfiguration()\n", "path": "checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py"}]} | 1,833 | 266 |
gh_patches_debug_38571 | rasdani/github-patches | git_diff | archlinux__archinstall-1659 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"Save configuration" Improve UX
*Feature Request*
I have some time in the next week, I may try and implement this depending on how many roadblocks I hit. Currently, to save a configuration, you are prompted to enter a directory. If it isn't valid, you have to scratch your head wondering what the exact paths are, because you can't discover any directory without exiting `archinstall`.
It would be great if there was a sort of tab completion, or even filesystem traversal similar to `ncdu` to find a directory to save the configuration in.
</issue>
<code>
[start of archinstall/lib/user_interaction/save_conf.py]
1 from __future__ import annotations
2
3 from pathlib import Path
4 from typing import Any, Dict, TYPE_CHECKING
5
6 from ..configuration import ConfigurationOutput
7 from ..menu import Menu
8 from ..menu.menu import MenuSelectionType
9 from ..output import log
10
11 if TYPE_CHECKING:
12 _: Any
13
14
15 def save_config(config: Dict):
16
17 def preview(selection: str):
18 if options['user_config'] == selection:
19 json_config = config_output.user_config_to_json()
20 return f'{config_output.user_configuration_file}\n{json_config}'
21 elif options['user_creds'] == selection:
22 if json_config := config_output.user_credentials_to_json():
23 return f'{config_output.user_credentials_file}\n{json_config}'
24 else:
25 return str(_('No configuration'))
26 elif options['disk_layout'] == selection:
27 if json_config := config_output.disk_layout_to_json():
28 return f'{config_output.disk_layout_file}\n{json_config}'
29 else:
30 return str(_('No configuration'))
31 elif options['all'] == selection:
32 output = f'{config_output.user_configuration_file}\n'
33 if json_config := config_output.user_credentials_to_json():
34 output += f'{config_output.user_credentials_file}\n'
35 if json_config := config_output.disk_layout_to_json():
36 output += f'{config_output.disk_layout_file}\n'
37 return output[:-1]
38 return None
39
40 config_output = ConfigurationOutput(config)
41
42 options = {
43 'user_config': str(_('Save user configuration')),
44 'user_creds': str(_('Save user credentials')),
45 'disk_layout': str(_('Save disk layout')),
46 'all': str(_('Save all'))
47 }
48
49 choice = Menu(
50 _('Choose which configuration to save'),
51 list(options.values()),
52 sort=False,
53 skip=True,
54 preview_size=0.75,
55 preview_command=preview
56 ).run()
57
58 if choice.type_ == MenuSelectionType.Skip:
59 return
60
61 while True:
62 path = input(_('Enter a directory for the configuration(s) to be saved: ')).strip(' ')
63 dest_path = Path(path)
64 if dest_path.exists() and dest_path.is_dir():
65 break
66 log(_('Not a valid directory: {}').format(dest_path), fg='red')
67
68 if options['user_config'] == choice.value:
69 config_output.save_user_config(dest_path)
70 elif options['user_creds'] == choice.value:
71 config_output.save_user_creds(dest_path)
72 elif options['disk_layout'] == choice.value:
73 config_output.save_disk_layout(dest_path)
74 elif options['all'] == choice.value:
75 config_output.save_user_config(dest_path)
76 config_output.save_user_creds(dest_path)
77 config_output.save_disk_layout(dest_path)
78
[end of archinstall/lib/user_interaction/save_conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/archinstall/lib/user_interaction/save_conf.py b/archinstall/lib/user_interaction/save_conf.py
--- a/archinstall/lib/user_interaction/save_conf.py
+++ b/archinstall/lib/user_interaction/save_conf.py
@@ -1,9 +1,12 @@
from __future__ import annotations
+import logging
+
from pathlib import Path
from typing import Any, Dict, TYPE_CHECKING
from ..configuration import ConfigurationOutput
+from ..general import SysCommand
from ..menu import Menu
from ..menu.menu import MenuSelectionType
from ..output import log
@@ -58,20 +61,75 @@
if choice.type_ == MenuSelectionType.Skip:
return
- while True:
- path = input(_('Enter a directory for the configuration(s) to be saved: ')).strip(' ')
- dest_path = Path(path)
- if dest_path.exists() and dest_path.is_dir():
- break
- log(_('Not a valid directory: {}').format(dest_path), fg='red')
-
- if options['user_config'] == choice.value:
- config_output.save_user_config(dest_path)
- elif options['user_creds'] == choice.value:
- config_output.save_user_creds(dest_path)
- elif options['disk_layout'] == choice.value:
- config_output.save_disk_layout(dest_path)
- elif options['all'] == choice.value:
- config_output.save_user_config(dest_path)
- config_output.save_user_creds(dest_path)
- config_output.save_disk_layout(dest_path)
+ dirs_to_exclude = [
+ '/bin',
+ '/dev',
+ '/lib',
+ '/lib64',
+ '/lost+found',
+ '/opt',
+ '/proc',
+ '/run',
+ '/sbin',
+ '/srv',
+ '/sys',
+ '/usr',
+ '/var',
+ ]
+ log(
+ _('When picking a directory to save configuration files to,'
+ ' by default we will ignore the following folders: ') + ','.join(dirs_to_exclude),
+ level=logging.DEBUG
+ )
+
+ log(_('Finding possible directories to save configuration files ...'), level=logging.INFO)
+
+ find_exclude = '-path ' + ' -prune -o -path '.join(dirs_to_exclude) + ' -prune '
+ file_picker_command = f'find / {find_exclude} -o -type d -print0'
+ possible_save_dirs = list(
+ filter(None, SysCommand(file_picker_command).decode().split('\x00'))
+ )
+
+ selection = Menu(
+ _('Select directory (or directories) for saving configuration files'),
+ possible_save_dirs,
+ multi=True,
+ skip=True,
+ allow_reset=False,
+ ).run()
+
+ match selection.type_:
+ case MenuSelectionType.Skip:
+ return
+ case _:
+ save_dirs = selection.value
+
+ prompt = _('Do you want to save {} configuration file(s) in the following locations?\n\n{}').format(
+ list(options.keys())[list(options.values()).index(choice.value)],
+ save_dirs
+ )
+ save_confirmation = Menu(prompt, Menu.yes_no(), default_option=Menu.yes()).run()
+ if save_confirmation == Menu.no():
+ return
+
+ log(
+ _('Saving {} configuration files to {}').format(
+ list(options.keys())[list(options.values()).index(choice.value)],
+ save_dirs
+ ),
+ level=logging.DEBUG
+ )
+
+ if save_dirs is not None:
+ for save_dir_str in save_dirs:
+ save_dir = Path(save_dir_str)
+ if options['user_config'] == choice.value:
+ config_output.save_user_config(save_dir)
+ elif options['user_creds'] == choice.value:
+ config_output.save_user_creds(save_dir)
+ elif options['disk_layout'] == choice.value:
+ config_output.save_disk_layout(save_dir)
+ elif options['all'] == choice.value:
+ config_output.save_user_config(save_dir)
+ config_output.save_user_creds(save_dir)
+ config_output.save_disk_layout(save_dir)
| {"golden_diff": "diff --git a/archinstall/lib/user_interaction/save_conf.py b/archinstall/lib/user_interaction/save_conf.py\n--- a/archinstall/lib/user_interaction/save_conf.py\n+++ b/archinstall/lib/user_interaction/save_conf.py\n@@ -1,9 +1,12 @@\n from __future__ import annotations\n \n+import logging\n+\n from pathlib import Path\n from typing import Any, Dict, TYPE_CHECKING\n \n from ..configuration import ConfigurationOutput\n+from ..general import SysCommand\n from ..menu import Menu\n from ..menu.menu import MenuSelectionType\n from ..output import log\n@@ -58,20 +61,75 @@\n \tif choice.type_ == MenuSelectionType.Skip:\n \t\treturn\n \n-\twhile True:\n-\t\tpath = input(_('Enter a directory for the configuration(s) to be saved: ')).strip(' ')\n-\t\tdest_path = Path(path)\n-\t\tif dest_path.exists() and dest_path.is_dir():\n-\t\t\tbreak\n-\t\tlog(_('Not a valid directory: {}').format(dest_path), fg='red')\n-\n-\tif options['user_config'] == choice.value:\n-\t\tconfig_output.save_user_config(dest_path)\n-\telif options['user_creds'] == choice.value:\n-\t\tconfig_output.save_user_creds(dest_path)\n-\telif options['disk_layout'] == choice.value:\n-\t\tconfig_output.save_disk_layout(dest_path)\n-\telif options['all'] == choice.value:\n-\t\tconfig_output.save_user_config(dest_path)\n-\t\tconfig_output.save_user_creds(dest_path)\n-\t\tconfig_output.save_disk_layout(dest_path)\n+\tdirs_to_exclude = [\n+\t\t'/bin',\n+\t\t'/dev',\n+\t\t'/lib',\n+\t\t'/lib64',\n+\t\t'/lost+found',\n+\t\t'/opt',\n+\t\t'/proc',\n+\t\t'/run',\n+\t\t'/sbin',\n+\t\t'/srv',\n+\t\t'/sys',\n+\t\t'/usr',\n+\t\t'/var',\n+\t]\n+\tlog(\n+\t\t_('When picking a directory to save configuration files to,'\n+\t\t' by default we will ignore the following folders: ') + ','.join(dirs_to_exclude),\n+\t\tlevel=logging.DEBUG\n+\t)\n+\n+\tlog(_('Finding possible directories to save configuration files ...'), level=logging.INFO)\n+\t\n+\tfind_exclude = '-path ' + ' -prune -o -path '.join(dirs_to_exclude) + ' -prune '\n+\tfile_picker_command = f'find / {find_exclude} -o -type d -print0'\n+\tpossible_save_dirs = list(\n+\t\tfilter(None, SysCommand(file_picker_command).decode().split('\\x00'))\n+\t)\n+\n+\tselection = Menu(\n+\t\t_('Select directory (or directories) for saving configuration files'),\n+\t\tpossible_save_dirs,\n+\t\tmulti=True,\n+\t\tskip=True,\n+\t\tallow_reset=False,\n+\t).run()\n+\n+\tmatch selection.type_:\n+\t\tcase MenuSelectionType.Skip:\n+\t\t\treturn\n+\t\tcase _:\n+\t\t\tsave_dirs = selection.value\n+\n+\tprompt = _('Do you want to save {} configuration file(s) in the following locations?\\n\\n{}').format(\n+\t\tlist(options.keys())[list(options.values()).index(choice.value)],\n+\t\tsave_dirs\n+\t)\n+\tsave_confirmation = Menu(prompt, Menu.yes_no(), default_option=Menu.yes()).run()\n+\tif save_confirmation == Menu.no():\n+\t\treturn\n+\t\n+\tlog(\n+\t\t_('Saving {} configuration files to {}').format(\n+\t\t\tlist(options.keys())[list(options.values()).index(choice.value)],\n+\t\t\tsave_dirs\n+\t\t),\n+\t\tlevel=logging.DEBUG\n+\t)\n+\t\n+\tif save_dirs is not None:\n+\t\tfor save_dir_str in save_dirs:\n+\t\t\tsave_dir = Path(save_dir_str)\n+\t\t\tif options['user_config'] == choice.value:\n+\t\t\t\tconfig_output.save_user_config(save_dir)\n+\t\t\telif options['user_creds'] == choice.value:\n+\t\t\t\tconfig_output.save_user_creds(save_dir)\n+\t\t\telif options['disk_layout'] == choice.value:\n+\t\t\t\tconfig_output.save_disk_layout(save_dir)\n+\t\t\telif options['all'] == choice.value:\n+\t\t\t\tconfig_output.save_user_config(save_dir)\n+\t\t\t\tconfig_output.save_user_creds(save_dir)\n+\t\t\t\tconfig_output.save_disk_layout(save_dir)\n", "issue": "\"Save configuration\" Improve UX\n*Feature Request*\r\n\r\nI have some time in the next week, I may try and implement this depending on how many roadblocks I hit. Currently, to save a configuration, you are prompted to enter a directory. If it isn't valid, you have to scratch your head wondering what the exact paths are, because you can't discover any directory without exiting `archinstall`. \r\n\r\nIt would be great if there was a sort of tab completion, or even filesystem traversal similar to `ncdu` to find a directory to save the configuration in.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Any, Dict, TYPE_CHECKING\n\nfrom ..configuration import ConfigurationOutput\nfrom ..menu import Menu\nfrom ..menu.menu import MenuSelectionType\nfrom ..output import log\n\nif TYPE_CHECKING:\n\t_: Any\n\n\ndef save_config(config: Dict):\n\n\tdef preview(selection: str):\n\t\tif options['user_config'] == selection:\n\t\t\tjson_config = config_output.user_config_to_json()\n\t\t\treturn f'{config_output.user_configuration_file}\\n{json_config}'\n\t\telif options['user_creds'] == selection:\n\t\t\tif json_config := config_output.user_credentials_to_json():\n\t\t\t\treturn f'{config_output.user_credentials_file}\\n{json_config}'\n\t\t\telse:\n\t\t\t\treturn str(_('No configuration'))\n\t\telif options['disk_layout'] == selection:\n\t\t\tif json_config := config_output.disk_layout_to_json():\n\t\t\t\treturn f'{config_output.disk_layout_file}\\n{json_config}'\n\t\t\telse:\n\t\t\t\treturn str(_('No configuration'))\n\t\telif options['all'] == selection:\n\t\t\toutput = f'{config_output.user_configuration_file}\\n'\n\t\t\tif json_config := config_output.user_credentials_to_json():\n\t\t\t\toutput += f'{config_output.user_credentials_file}\\n'\n\t\t\tif json_config := config_output.disk_layout_to_json():\n\t\t\t\toutput += f'{config_output.disk_layout_file}\\n'\n\t\t\treturn output[:-1]\n\t\treturn None\n\n\tconfig_output = ConfigurationOutput(config)\n\n\toptions = {\n\t\t'user_config': str(_('Save user configuration')),\n\t\t'user_creds': str(_('Save user credentials')),\n\t\t'disk_layout': str(_('Save disk layout')),\n\t\t'all': str(_('Save all'))\n\t}\n\n\tchoice = Menu(\n\t\t_('Choose which configuration to save'),\n\t\tlist(options.values()),\n\t\tsort=False,\n\t\tskip=True,\n\t\tpreview_size=0.75,\n\t\tpreview_command=preview\n\t).run()\n\n\tif choice.type_ == MenuSelectionType.Skip:\n\t\treturn\n\n\twhile True:\n\t\tpath = input(_('Enter a directory for the configuration(s) to be saved: ')).strip(' ')\n\t\tdest_path = Path(path)\n\t\tif dest_path.exists() and dest_path.is_dir():\n\t\t\tbreak\n\t\tlog(_('Not a valid directory: {}').format(dest_path), fg='red')\n\n\tif options['user_config'] == choice.value:\n\t\tconfig_output.save_user_config(dest_path)\n\telif options['user_creds'] == choice.value:\n\t\tconfig_output.save_user_creds(dest_path)\n\telif options['disk_layout'] == choice.value:\n\t\tconfig_output.save_disk_layout(dest_path)\n\telif options['all'] == choice.value:\n\t\tconfig_output.save_user_config(dest_path)\n\t\tconfig_output.save_user_creds(dest_path)\n\t\tconfig_output.save_disk_layout(dest_path)\n", "path": "archinstall/lib/user_interaction/save_conf.py"}]} | 1,396 | 924 |
gh_patches_debug_14642 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-6819 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mitmdump does not exit
#### Problem Description
Mitmdump does not exit automatically when executing:
`mitmdump -nr infile -w outfile
`
Until version 10.0.0 it was working properly and when running mitmdump with "-n" the process finished automatically once the outfile was written.
#### Steps to reproduce the behavior:
1. Generate a mitm file
2. Execute mitmdump -nr infile -w outfile
#### System Information
Mitmproxy: 10.3.0 binary
Python: 3.12.3
OpenSSL: OpenSSL 3.2.1 30 Jan 2024
Platform: Linux-6.5.0-27-generic-x86_64-with-glibc2.35
</issue>
<code>
[start of mitmproxy/addons/readfile.py]
1 import asyncio
2 import logging
3 import os.path
4 import sys
5 from typing import BinaryIO
6 from typing import Optional
7
8 from mitmproxy import command
9 from mitmproxy import ctx
10 from mitmproxy import exceptions
11 from mitmproxy import flowfilter
12 from mitmproxy import io
13
14 logger = logging.getLogger(__name__)
15
16
17 class ReadFile:
18 """
19 An addon that handles reading from file on startup.
20 """
21
22 def __init__(self):
23 self.filter = None
24 self._read_task: asyncio.Task | None = None
25
26 def load(self, loader):
27 loader.add_option("rfile", Optional[str], None, "Read flows from file.")
28 loader.add_option(
29 "readfile_filter", Optional[str], None, "Read only matching flows."
30 )
31
32 def configure(self, updated):
33 if "readfile_filter" in updated:
34 if ctx.options.readfile_filter:
35 try:
36 self.filter = flowfilter.parse(ctx.options.readfile_filter)
37 except ValueError as e:
38 raise exceptions.OptionsError(str(e)) from e
39 else:
40 self.filter = None
41
42 async def load_flows(self, fo: BinaryIO) -> int:
43 cnt = 0
44 freader = io.FlowReader(fo)
45 try:
46 for flow in freader.stream():
47 if self.filter and not self.filter(flow):
48 continue
49 await ctx.master.load_flow(flow)
50 cnt += 1
51 except (OSError, exceptions.FlowReadException) as e:
52 if cnt:
53 logging.warning("Flow file corrupted - loaded %i flows." % cnt)
54 else:
55 logging.error("Flow file corrupted.")
56 raise exceptions.FlowReadException(str(e)) from e
57 else:
58 return cnt
59
60 async def load_flows_from_path(self, path: str) -> int:
61 path = os.path.expanduser(path)
62 try:
63 with open(path, "rb") as f:
64 return await self.load_flows(f)
65 except OSError as e:
66 logging.error(f"Cannot load flows: {e}")
67 raise exceptions.FlowReadException(str(e)) from e
68
69 async def doread(self, rfile: str) -> None:
70 try:
71 await self.load_flows_from_path(rfile)
72 except exceptions.FlowReadException as e:
73 logger.exception(f"Failed to read {ctx.options.rfile}: {e}")
74 finally:
75 self._read_task = None
76
77 def running(self):
78 if ctx.options.rfile:
79 self._read_task = asyncio.create_task(self.doread(ctx.options.rfile))
80
81 @command.command("readfile.reading")
82 def reading(self) -> bool:
83 return bool(self._read_task)
84
85
86 class ReadFileStdin(ReadFile):
87 """Support the special case of "-" for reading from stdin"""
88
89 async def load_flows_from_path(self, path: str) -> int:
90 if path == "-": # pragma: no cover
91 # Need to think about how to test this. This function is scheduled
92 # onto the event loop, where a sys.stdin mock has no effect.
93 return await self.load_flows(sys.stdin.buffer)
94 else:
95 return await super().load_flows_from_path(path)
96
[end of mitmproxy/addons/readfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/addons/readfile.py b/mitmproxy/addons/readfile.py
--- a/mitmproxy/addons/readfile.py
+++ b/mitmproxy/addons/readfile.py
@@ -71,8 +71,6 @@
await self.load_flows_from_path(rfile)
except exceptions.FlowReadException as e:
logger.exception(f"Failed to read {ctx.options.rfile}: {e}")
- finally:
- self._read_task = None
def running(self):
if ctx.options.rfile:
@@ -80,7 +78,7 @@
@command.command("readfile.reading")
def reading(self) -> bool:
- return bool(self._read_task)
+ return bool(self._read_task and not self._read_task.done())
class ReadFileStdin(ReadFile):
| {"golden_diff": "diff --git a/mitmproxy/addons/readfile.py b/mitmproxy/addons/readfile.py\n--- a/mitmproxy/addons/readfile.py\n+++ b/mitmproxy/addons/readfile.py\n@@ -71,8 +71,6 @@\n await self.load_flows_from_path(rfile)\n except exceptions.FlowReadException as e:\n logger.exception(f\"Failed to read {ctx.options.rfile}: {e}\")\n- finally:\n- self._read_task = None\n \n def running(self):\n if ctx.options.rfile:\n@@ -80,7 +78,7 @@\n \n @command.command(\"readfile.reading\")\n def reading(self) -> bool:\n- return bool(self._read_task)\n+ return bool(self._read_task and not self._read_task.done())\n \n \n class ReadFileStdin(ReadFile):\n", "issue": "Mitmdump does not exit\n#### Problem Description\r\nMitmdump does not exit automatically when executing:\r\n\r\n`mitmdump -nr infile -w outfile\r\n`\r\n\r\nUntil version 10.0.0 it was working properly and when running mitmdump with \"-n\" the process finished automatically once the outfile was written.\r\n\r\n#### Steps to reproduce the behavior:\r\n1. Generate a mitm file\r\n2. Execute mitmdump -nr infile -w outfile\r\n\r\n#### System Information\r\nMitmproxy: 10.3.0 binary\r\nPython: 3.12.3\r\nOpenSSL: OpenSSL 3.2.1 30 Jan 2024\r\nPlatform: Linux-6.5.0-27-generic-x86_64-with-glibc2.35\r\n\n", "before_files": [{"content": "import asyncio\nimport logging\nimport os.path\nimport sys\nfrom typing import BinaryIO\nfrom typing import Optional\n\nfrom mitmproxy import command\nfrom mitmproxy import ctx\nfrom mitmproxy import exceptions\nfrom mitmproxy import flowfilter\nfrom mitmproxy import io\n\nlogger = logging.getLogger(__name__)\n\n\nclass ReadFile:\n \"\"\"\n An addon that handles reading from file on startup.\n \"\"\"\n\n def __init__(self):\n self.filter = None\n self._read_task: asyncio.Task | None = None\n\n def load(self, loader):\n loader.add_option(\"rfile\", Optional[str], None, \"Read flows from file.\")\n loader.add_option(\n \"readfile_filter\", Optional[str], None, \"Read only matching flows.\"\n )\n\n def configure(self, updated):\n if \"readfile_filter\" in updated:\n if ctx.options.readfile_filter:\n try:\n self.filter = flowfilter.parse(ctx.options.readfile_filter)\n except ValueError as e:\n raise exceptions.OptionsError(str(e)) from e\n else:\n self.filter = None\n\n async def load_flows(self, fo: BinaryIO) -> int:\n cnt = 0\n freader = io.FlowReader(fo)\n try:\n for flow in freader.stream():\n if self.filter and not self.filter(flow):\n continue\n await ctx.master.load_flow(flow)\n cnt += 1\n except (OSError, exceptions.FlowReadException) as e:\n if cnt:\n logging.warning(\"Flow file corrupted - loaded %i flows.\" % cnt)\n else:\n logging.error(\"Flow file corrupted.\")\n raise exceptions.FlowReadException(str(e)) from e\n else:\n return cnt\n\n async def load_flows_from_path(self, path: str) -> int:\n path = os.path.expanduser(path)\n try:\n with open(path, \"rb\") as f:\n return await self.load_flows(f)\n except OSError as e:\n logging.error(f\"Cannot load flows: {e}\")\n raise exceptions.FlowReadException(str(e)) from e\n\n async def doread(self, rfile: str) -> None:\n try:\n await self.load_flows_from_path(rfile)\n except exceptions.FlowReadException as e:\n logger.exception(f\"Failed to read {ctx.options.rfile}: {e}\")\n finally:\n self._read_task = None\n\n def running(self):\n if ctx.options.rfile:\n self._read_task = asyncio.create_task(self.doread(ctx.options.rfile))\n\n @command.command(\"readfile.reading\")\n def reading(self) -> bool:\n return bool(self._read_task)\n\n\nclass ReadFileStdin(ReadFile):\n \"\"\"Support the special case of \"-\" for reading from stdin\"\"\"\n\n async def load_flows_from_path(self, path: str) -> int:\n if path == \"-\": # pragma: no cover\n # Need to think about how to test this. This function is scheduled\n # onto the event loop, where a sys.stdin mock has no effect.\n return await self.load_flows(sys.stdin.buffer)\n else:\n return await super().load_flows_from_path(path)\n", "path": "mitmproxy/addons/readfile.py"}]} | 1,588 | 185 |
gh_patches_debug_34322 | rasdani/github-patches | git_diff | networkx__networkx-2525 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create bridges.py
Contains three simple functions for detection and retrieval of bridges or local bridges in a undirected network.
</issue>
<code>
[start of networkx/algorithms/bridges.py]
1 # -*- coding: utf-8 -*-
2 # bridges.py - bridge-finding algorithms
3 #
4 # Copyright 2004-2016 NetworkX developers.
5 #
6 # This file is part of NetworkX.
7 #
8 # NetworkX is distributed under a BSD license; see LICENSE.txt for more
9 # information.
10 """Bridge-finding algorithms."""
11 from itertools import chain
12
13 import networkx as nx
14 from networkx.utils import not_implemented_for
15
16 __all__ = ['bridges', 'has_bridges']
17
18
19 @not_implemented_for('multigraph')
20 @not_implemented_for('directed')
21 def bridges(G, root=None):
22 """Generate all bridges in a graph.
23
24 A *bridge* in a graph is an edge whose removal causes the number of
25 connected components of the graph to increase.
26
27 Parameters
28 ----------
29 G : undirected graph
30
31 root : node (optional)
32 A node in the graph `G`. If specified, only the bridges in the
33 connected component containing this node will be returned.
34
35 Yields
36 ------
37 e : edge
38 An edge in the graph whose removal disconnects the graph (or
39 causes the number of connected components to increase).
40
41 Raises
42 ------
43 NodeNotFound
44 If `root` is not in the graph `G`.
45
46 Examples
47 --------
48 The barbell graph with parameter zero has a single bridge::
49
50 >>> G = nx.barbell_graph(10, 0)
51 >>> list(nx.bridges(G))
52 [(9, 10)]
53
54 Notes
55 -----
56 This implementation uses the :func:`networkx.chain_decomposition`
57 function, so it shares its worst-case time complexity, :math:`O(m +
58 n)`, ignoring polylogarithmic factors, where *n* is the number of
59 nodes in the graph and *m* is the number of edges.
60
61 """
62 chains = nx.chain_decomposition(G, root=root)
63 chain_edges = set(chain.from_iterable(chains))
64 for u, v in G.edges():
65 if (u, v) not in chain_edges and (v, u) not in chain_edges:
66 yield u, v
67
68
69 @not_implemented_for('multigraph')
70 @not_implemented_for('directed')
71 def has_bridges(G, root=None):
72 """Decide whether a graph has any bridges.
73
74 A *bridge* in a graph is an edge whose removal causes the number of
75 connected components of the graph to increase.
76
77 Parameters
78 ----------
79 G : undirected graph
80
81 root : node (optional)
82 A node in the graph `G`. If specified, only the bridges in the
83 connected component containing this node will be considered.
84
85 Returns
86 -------
87 bool
88 Whether the graph (or the connected component containing `root`)
89 has any bridges.
90
91 Raises
92 ------
93 NodeNotFound
94 If `root` is not in the graph `G`.
95
96 Examples
97 --------
98 The barbell graph with parameter zero has a single bridge::
99
100 >>> G = nx.barbell_graph(10, 0)
101 >>> nx.has_bridges(G)
102 True
103
104 On the other hand, the cycle graph has no bridges::
105
106 >>> G = nx.cycle_graph(5)
107 >>> nx.has_bridges(G)
108 False
109
110 Notes
111 -----
112 This implementation uses the :func:`networkx.bridges` function, so
113 it shares its worst-case time complexity, :math:`O(m + n)`, ignoring
114 polylogarithmic factors, where *n* is the number of nodes in the
115 graph and *m* is the number of edges.
116
117 """
118 try:
119 next(bridges(G))
120 except StopIteration:
121 return False
122 else:
123 return True
124
[end of networkx/algorithms/bridges.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/networkx/algorithms/bridges.py b/networkx/algorithms/bridges.py
--- a/networkx/algorithms/bridges.py
+++ b/networkx/algorithms/bridges.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# bridges.py - bridge-finding algorithms
#
-# Copyright 2004-2016 NetworkX developers.
+# Copyright 2004-2017 NetworkX developers.
#
# This file is part of NetworkX.
#
@@ -13,7 +13,7 @@
import networkx as nx
from networkx.utils import not_implemented_for
-__all__ = ['bridges', 'has_bridges']
+__all__ = ['bridges', 'has_bridges', 'local_bridges']
@not_implemented_for('multigraph')
@@ -121,3 +121,61 @@
return False
else:
return True
+
+
+@not_implemented_for('multigraph')
+@not_implemented_for('directed')
+def local_bridges(G, with_span=True, weight=None):
+ """Iterate over local bridges of `G` optionally computing the span
+
+ A *local bridge* is an edge whose endpoints have no common neighbors.
+ That is, the edge is not part of a triangle in the graph.
+
+ The *span* of a *local bridge* is the shortest path length between
+ the endpoints if the local bridge is removed.
+
+ Parameters
+ ----------
+ G : undirected graph
+
+ with_span : bool
+ If True, yield a 3-tuple `(u, v, span)`
+
+ weight : function, string or None (default: None)
+ If function, used to compute edge weights for the span.
+ If string, the edge data attribute used in calculating span.
+ If None, all edges have weight 1.
+
+ Yields
+ ------
+ e : edge
+ The local bridges as an edge 2-tuple of nodes `(u, v)` or
+ as a 3-tuple `(u, v, span)` when `with_span is True`.
+
+ Examples
+ --------
+ A cycle graph has every edge a local bridge with span N-1.
+
+ >>> G = nx.cycle_graph(9)
+ >>> (0, 8, 8) in set(nx.local_bridges(G))
+ True
+ """
+ if with_span is not True:
+ for u, v in G.edges:
+ if not (set(G[u]) & set(G[v])):
+ yield u, v
+ else:
+ wt = nx.weighted._weight_function(G, weight)
+ for u, v in G.edges:
+ if not (set(G[u]) & set(G[v])):
+ enodes = {u, v}
+ def hide_edge(n, nbr, d):
+ if n not in enodes or nbr not in enodes:
+ return wt(n, nbr, d)
+ return None
+
+ try:
+ span = nx.shortest_path_length(G, u, v, weight=hide_edge)
+ yield u, v, span
+ except nx.NetworkXNoPath:
+ yield u, v, float('inf')
| {"golden_diff": "diff --git a/networkx/algorithms/bridges.py b/networkx/algorithms/bridges.py\n--- a/networkx/algorithms/bridges.py\n+++ b/networkx/algorithms/bridges.py\n@@ -1,7 +1,7 @@\n # -*- coding: utf-8 -*-\n # bridges.py - bridge-finding algorithms\n #\n-# Copyright 2004-2016 NetworkX developers.\n+# Copyright 2004-2017 NetworkX developers.\n #\n # This file is part of NetworkX.\n #\n@@ -13,7 +13,7 @@\n import networkx as nx\n from networkx.utils import not_implemented_for\n \n-__all__ = ['bridges', 'has_bridges']\n+__all__ = ['bridges', 'has_bridges', 'local_bridges']\n \n \n @not_implemented_for('multigraph')\n@@ -121,3 +121,61 @@\n return False\n else:\n return True\n+\n+\n+@not_implemented_for('multigraph')\n+@not_implemented_for('directed')\n+def local_bridges(G, with_span=True, weight=None):\n+ \"\"\"Iterate over local bridges of `G` optionally computing the span\n+\n+ A *local bridge* is an edge whose endpoints have no common neighbors.\n+ That is, the edge is not part of a triangle in the graph.\n+\n+ The *span* of a *local bridge* is the shortest path length between\n+ the endpoints if the local bridge is removed.\n+\n+ Parameters\n+ ----------\n+ G : undirected graph\n+\n+ with_span : bool\n+ If True, yield a 3-tuple `(u, v, span)`\n+\n+ weight : function, string or None (default: None)\n+ If function, used to compute edge weights for the span.\n+ If string, the edge data attribute used in calculating span.\n+ If None, all edges have weight 1.\n+\n+ Yields\n+ ------\n+ e : edge\n+ The local bridges as an edge 2-tuple of nodes `(u, v)` or\n+ as a 3-tuple `(u, v, span)` when `with_span is True`.\n+\n+ Examples\n+ --------\n+ A cycle graph has every edge a local bridge with span N-1.\n+\n+ >>> G = nx.cycle_graph(9)\n+ >>> (0, 8, 8) in set(nx.local_bridges(G))\n+ True\n+ \"\"\"\n+ if with_span is not True:\n+ for u, v in G.edges:\n+ if not (set(G[u]) & set(G[v])):\n+ yield u, v\n+ else:\n+ wt = nx.weighted._weight_function(G, weight)\n+ for u, v in G.edges:\n+ if not (set(G[u]) & set(G[v])):\n+ enodes = {u, v}\n+ def hide_edge(n, nbr, d):\n+ if n not in enodes or nbr not in enodes:\n+ return wt(n, nbr, d)\n+ return None\n+\n+ try:\n+ span = nx.shortest_path_length(G, u, v, weight=hide_edge)\n+ yield u, v, span\n+ except nx.NetworkXNoPath:\n+ yield u, v, float('inf')\n", "issue": "Create bridges.py\nContains three simple functions for detection and retrieval of bridges or local bridges in a undirected network.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# bridges.py - bridge-finding algorithms\n#\n# Copyright 2004-2016 NetworkX developers.\n#\n# This file is part of NetworkX.\n#\n# NetworkX is distributed under a BSD license; see LICENSE.txt for more\n# information.\n\"\"\"Bridge-finding algorithms.\"\"\"\nfrom itertools import chain\n\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\n\n__all__ = ['bridges', 'has_bridges']\n\n\n@not_implemented_for('multigraph')\n@not_implemented_for('directed')\ndef bridges(G, root=None):\n \"\"\"Generate all bridges in a graph.\n\n A *bridge* in a graph is an edge whose removal causes the number of\n connected components of the graph to increase.\n\n Parameters\n ----------\n G : undirected graph\n\n root : node (optional)\n A node in the graph `G`. If specified, only the bridges in the\n connected component containing this node will be returned.\n\n Yields\n ------\n e : edge\n An edge in the graph whose removal disconnects the graph (or\n causes the number of connected components to increase).\n\n Raises\n ------\n NodeNotFound\n If `root` is not in the graph `G`.\n\n Examples\n --------\n The barbell graph with parameter zero has a single bridge::\n\n >>> G = nx.barbell_graph(10, 0)\n >>> list(nx.bridges(G))\n [(9, 10)]\n\n Notes\n -----\n This implementation uses the :func:`networkx.chain_decomposition`\n function, so it shares its worst-case time complexity, :math:`O(m +\n n)`, ignoring polylogarithmic factors, where *n* is the number of\n nodes in the graph and *m* is the number of edges.\n\n \"\"\"\n chains = nx.chain_decomposition(G, root=root)\n chain_edges = set(chain.from_iterable(chains))\n for u, v in G.edges():\n if (u, v) not in chain_edges and (v, u) not in chain_edges:\n yield u, v\n\n\n@not_implemented_for('multigraph')\n@not_implemented_for('directed')\ndef has_bridges(G, root=None):\n \"\"\"Decide whether a graph has any bridges.\n\n A *bridge* in a graph is an edge whose removal causes the number of\n connected components of the graph to increase.\n\n Parameters\n ----------\n G : undirected graph\n\n root : node (optional)\n A node in the graph `G`. If specified, only the bridges in the\n connected component containing this node will be considered.\n\n Returns\n -------\n bool\n Whether the graph (or the connected component containing `root`)\n has any bridges.\n\n Raises\n ------\n NodeNotFound\n If `root` is not in the graph `G`.\n\n Examples\n --------\n The barbell graph with parameter zero has a single bridge::\n\n >>> G = nx.barbell_graph(10, 0)\n >>> nx.has_bridges(G)\n True\n\n On the other hand, the cycle graph has no bridges::\n\n >>> G = nx.cycle_graph(5)\n >>> nx.has_bridges(G)\n False\n\n Notes\n -----\n This implementation uses the :func:`networkx.bridges` function, so\n it shares its worst-case time complexity, :math:`O(m + n)`, ignoring\n polylogarithmic factors, where *n* is the number of nodes in the\n graph and *m* is the number of edges.\n\n \"\"\"\n try:\n next(bridges(G))\n except StopIteration:\n return False\n else:\n return True\n", "path": "networkx/algorithms/bridges.py"}]} | 1,657 | 745 |
gh_patches_debug_37021 | rasdani/github-patches | git_diff | Cloud-CV__EvalAI-855 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add nullable attribute to the Team Model
Following changes are required in the Team model:
- [x] Convert the URLFields to CharField with URL
- [x] Change the fields `email`, `github_url`, `linkedin_url`, `personal_website` to nullable fields
</issue>
<code>
[start of apps/web/models.py]
1 from __future__ import unicode_literals
2
3 from django.db import models
4
5 from base.models import (TimeStampedModel, )
6
7
8 class Contact(TimeStampedModel):
9 """Model representing details of User submitting queries."""
10 name = models.CharField(max_length=100,)
11 email = models.EmailField(max_length=70,)
12 message = models.CharField(max_length=500,)
13
14 def __unicode__(self):
15 return "%s: %s: %s" % (self.name, self.email, self.message)
16
17 class Meta:
18 app_label = 'web'
19 db_table = 'contact'
20
21
22 class Team(models.Model):
23 """Model representing details of Team"""
24
25 # Team Type Options
26 CORE_TEAM = 'Core Team'
27 CONTRIBUTOR = 'Contributor'
28
29 TEAM_TYPE_OPTIONS = (
30 (CORE_TEAM, CORE_TEAM),
31 (CONTRIBUTOR, CONTRIBUTOR),
32 )
33
34 name = models.CharField(max_length=100)
35 email = models.EmailField(max_length=70, null=True)
36 description = models.TextField(null=True)
37 headshot = models.ImageField(upload_to="headshots", null=True, blank=True)
38 visible = models.BooleanField(default=True)
39 github_url = models.CharField(max_length=200, null=True)
40 linkedin_url = models.CharField(max_length=200, null=True)
41 personal_website = models.CharField(max_length=200, null=True)
42 background_image = models.ImageField(upload_to="bg-images", null=True, blank=True)
43 team_type = models.CharField(choices=TEAM_TYPE_OPTIONS, max_length=50)
44
45 def __unicode__(self):
46 return self.name
47
48 class Meta:
49 app_label = 'web'
50 db_table = 'teams'
51
[end of apps/web/models.py]
[start of apps/web/migrations/0004_change_team_model_field_type.py]
1 # -*- coding: utf-8 -*-
2 # Generated by Django 1.10.2 on 2017-03-19 21:06
3 from __future__ import unicode_literals
4
5 from django.db import migrations, models
6
7
8 class Migration(migrations.Migration):
9
10 dependencies = [
11 ('web', '0003_added_description_and_background_image_to_team_model'),
12 ]
13
14 operations = [
15 migrations.AlterField(
16 model_name='team',
17 name='email',
18 field=models.EmailField(max_length=70, null=True),
19 ),
20 migrations.AlterField(
21 model_name='team',
22 name='github_url',
23 field=models.CharField(max_length=200, null=True),
24 ),
25 migrations.AlterField(
26 model_name='team',
27 name='linkedin_url',
28 field=models.CharField(max_length=200, null=True),
29 ),
30 migrations.AlterField(
31 model_name='team',
32 name='personal_website',
33 field=models.CharField(max_length=200, null=True),
34 ),
35 ]
36
[end of apps/web/migrations/0004_change_team_model_field_type.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/web/migrations/0004_change_team_model_field_type.py b/apps/web/migrations/0004_change_team_model_field_type.py
--- a/apps/web/migrations/0004_change_team_model_field_type.py
+++ b/apps/web/migrations/0004_change_team_model_field_type.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Generated by Django 1.10.2 on 2017-03-19 21:06
+# Generated by Django 1.10.2 on 2017-03-19 21:58
from __future__ import unicode_literals
from django.db import migrations, models
@@ -15,21 +15,21 @@
migrations.AlterField(
model_name='team',
name='email',
- field=models.EmailField(max_length=70, null=True),
+ field=models.EmailField(blank=True, max_length=70, null=True),
),
migrations.AlterField(
model_name='team',
name='github_url',
- field=models.CharField(max_length=200, null=True),
+ field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='team',
name='linkedin_url',
- field=models.CharField(max_length=200, null=True),
+ field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='team',
name='personal_website',
- field=models.CharField(max_length=200, null=True),
+ field=models.CharField(blank=True, max_length=200, null=True),
),
]
diff --git a/apps/web/models.py b/apps/web/models.py
--- a/apps/web/models.py
+++ b/apps/web/models.py
@@ -32,13 +32,13 @@
)
name = models.CharField(max_length=100)
- email = models.EmailField(max_length=70, null=True)
+ email = models.EmailField(max_length=70, null=True, blank=True)
description = models.TextField(null=True)
headshot = models.ImageField(upload_to="headshots", null=True, blank=True)
visible = models.BooleanField(default=True)
- github_url = models.CharField(max_length=200, null=True)
- linkedin_url = models.CharField(max_length=200, null=True)
- personal_website = models.CharField(max_length=200, null=True)
+ github_url = models.CharField(max_length=200, null=True, blank=True)
+ linkedin_url = models.CharField(max_length=200, null=True, blank=True)
+ personal_website = models.CharField(max_length=200, null=True, blank=True)
background_image = models.ImageField(upload_to="bg-images", null=True, blank=True)
team_type = models.CharField(choices=TEAM_TYPE_OPTIONS, max_length=50)
| {"golden_diff": "diff --git a/apps/web/migrations/0004_change_team_model_field_type.py b/apps/web/migrations/0004_change_team_model_field_type.py\n--- a/apps/web/migrations/0004_change_team_model_field_type.py\n+++ b/apps/web/migrations/0004_change_team_model_field_type.py\n@@ -1,5 +1,5 @@\n # -*- coding: utf-8 -*-\n-# Generated by Django 1.10.2 on 2017-03-19 21:06\n+# Generated by Django 1.10.2 on 2017-03-19 21:58\n from __future__ import unicode_literals\n \n from django.db import migrations, models\n@@ -15,21 +15,21 @@\n migrations.AlterField(\n model_name='team',\n name='email',\n- field=models.EmailField(max_length=70, null=True),\n+ field=models.EmailField(blank=True, max_length=70, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='github_url',\n- field=models.CharField(max_length=200, null=True),\n+ field=models.CharField(blank=True, max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='linkedin_url',\n- field=models.CharField(max_length=200, null=True),\n+ field=models.CharField(blank=True, max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='personal_website',\n- field=models.CharField(max_length=200, null=True),\n+ field=models.CharField(blank=True, max_length=200, null=True),\n ),\n ]\ndiff --git a/apps/web/models.py b/apps/web/models.py\n--- a/apps/web/models.py\n+++ b/apps/web/models.py\n@@ -32,13 +32,13 @@\n )\n \n name = models.CharField(max_length=100)\n- email = models.EmailField(max_length=70, null=True)\n+ email = models.EmailField(max_length=70, null=True, blank=True)\n description = models.TextField(null=True)\n headshot = models.ImageField(upload_to=\"headshots\", null=True, blank=True)\n visible = models.BooleanField(default=True)\n- github_url = models.CharField(max_length=200, null=True)\n- linkedin_url = models.CharField(max_length=200, null=True)\n- personal_website = models.CharField(max_length=200, null=True)\n+ github_url = models.CharField(max_length=200, null=True, blank=True)\n+ linkedin_url = models.CharField(max_length=200, null=True, blank=True)\n+ personal_website = models.CharField(max_length=200, null=True, blank=True)\n background_image = models.ImageField(upload_to=\"bg-images\", null=True, blank=True)\n team_type = models.CharField(choices=TEAM_TYPE_OPTIONS, max_length=50)\n", "issue": "Add nullable attribute to the Team Model\nFollowing changes are required in the Team model: \r\n\r\n- [x] Convert the URLFields to CharField with URL\r\n\r\n- [x] Change the fields `email`, `github_url`, `linkedin_url`, `personal_website` to nullable fields\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.db import models\n\nfrom base.models import (TimeStampedModel, )\n\n\nclass Contact(TimeStampedModel):\n \"\"\"Model representing details of User submitting queries.\"\"\"\n name = models.CharField(max_length=100,)\n email = models.EmailField(max_length=70,)\n message = models.CharField(max_length=500,)\n\n def __unicode__(self):\n return \"%s: %s: %s\" % (self.name, self.email, self.message)\n\n class Meta:\n app_label = 'web'\n db_table = 'contact'\n\n\nclass Team(models.Model):\n \"\"\"Model representing details of Team\"\"\"\n\n # Team Type Options\n CORE_TEAM = 'Core Team'\n CONTRIBUTOR = 'Contributor'\n\n TEAM_TYPE_OPTIONS = (\n (CORE_TEAM, CORE_TEAM),\n (CONTRIBUTOR, CONTRIBUTOR),\n )\n\n name = models.CharField(max_length=100)\n email = models.EmailField(max_length=70, null=True)\n description = models.TextField(null=True)\n headshot = models.ImageField(upload_to=\"headshots\", null=True, blank=True)\n visible = models.BooleanField(default=True)\n github_url = models.CharField(max_length=200, null=True)\n linkedin_url = models.CharField(max_length=200, null=True)\n personal_website = models.CharField(max_length=200, null=True)\n background_image = models.ImageField(upload_to=\"bg-images\", null=True, blank=True)\n team_type = models.CharField(choices=TEAM_TYPE_OPTIONS, max_length=50)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n app_label = 'web'\n db_table = 'teams'\n", "path": "apps/web/models.py"}, {"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.2 on 2017-03-19 21:06\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('web', '0003_added_description_and_background_image_to_team_model'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='team',\n name='email',\n field=models.EmailField(max_length=70, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='github_url',\n field=models.CharField(max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='linkedin_url',\n field=models.CharField(max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='personal_website',\n field=models.CharField(max_length=200, null=True),\n ),\n ]\n", "path": "apps/web/migrations/0004_change_team_model_field_type.py"}]} | 1,385 | 666 |
gh_patches_debug_15190 | rasdani/github-patches | git_diff | mirumee__ariadne-490 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release Ariadne 0.13
# TODO
- [x] Update dependencies
- [x] Fix linters errors on repo
- [x] #483
- [x] Update changelog
- [x] Write [release notes](https://github.com/mirumee/ariadne-website/pull/75)
- [x] Reach to our amazing art team for tweet graphics
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2 import os
3 from setuptools import setup
4
5 CLASSIFIERS = [
6 "Development Status :: 4 - Beta",
7 "Intended Audience :: Developers",
8 "License :: OSI Approved :: BSD License",
9 "Operating System :: OS Independent",
10 "Programming Language :: Python",
11 "Programming Language :: Python :: 3.6",
12 "Programming Language :: Python :: 3.7",
13 "Programming Language :: Python :: 3.8",
14 "Topic :: Software Development :: Libraries :: Python Modules",
15 ]
16
17 README_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md")
18 with open(README_PATH, "r", encoding="utf8") as f:
19 README = f.read()
20
21 setup(
22 name="ariadne",
23 author="Mirumee Software",
24 author_email="[email protected]",
25 description="Ariadne is a Python library for implementing GraphQL servers.",
26 long_description=README,
27 long_description_content_type="text/markdown",
28 license="BSD",
29 version="0.12.0",
30 url="https://github.com/mirumee/ariadne",
31 packages=["ariadne"],
32 include_package_data=True,
33 install_requires=[
34 "graphql-core>=3.1.0",
35 "starlette<0.15",
36 "typing_extensions>=3.6.0",
37 ],
38 extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]},
39 classifiers=CLASSIFIERS,
40 platforms=["any"],
41 zip_safe=False,
42 )
43
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -11,6 +11,7 @@
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries :: Python Modules",
]
@@ -26,7 +27,7 @@
long_description=README,
long_description_content_type="text/markdown",
license="BSD",
- version="0.12.0",
+ version="0.13.0",
url="https://github.com/mirumee/ariadne",
packages=["ariadne"],
include_package_data=True,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,6 +11,7 @@\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ]\n \n@@ -26,7 +27,7 @@\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n- version=\"0.12.0\",\n+ version=\"0.13.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n", "issue": "Release Ariadne 0.13\n# TODO\r\n\r\n- [x] Update dependencies\r\n- [x] Fix linters errors on repo\r\n- [x] #483 \r\n- [x] Update changelog\r\n- [x] Write [release notes](https://github.com/mirumee/ariadne-website/pull/75)\r\n- [x] Reach to our amazing art team for tweet graphics\n", "before_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\", encoding=\"utf8\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.12.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.1.0\",\n \"starlette<0.15\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,038 | 180 |
gh_patches_debug_25353 | rasdani/github-patches | git_diff | OpenMined__PySyft-3759 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement Negation operation for FV HE Scheme
## Feature Description
Negation operations of FV Scheme need to be implemented.
It should Negate a ciphertext object and return the result in ciphertext form.
</issue>
<code>
[start of syft/frameworks/torch/he/fv/evaluator.py]
1 import copy
2
3 from syft.frameworks.torch.he.fv.util.operations import poly_add_mod
4 from syft.frameworks.torch.he.fv.util.operations import multiply_add_plain_with_delta
5 from syft.frameworks.torch.he.fv.ciphertext import CipherText
6 from syft.frameworks.torch.he.fv.plaintext import PlainText
7
8
9 class Evaluator:
10 def __init__(self, context):
11 self.context = context
12 self.coeff_modulus = context.param.coeff_modulus
13 self.plain_modulus = context.param.plain_modulus
14
15 def add(self, op1, op2):
16 """Adds two operands using FV scheme.
17
18 Args:
19 op1 (Ciphertext/Plaintext): First argument.
20 op2 (Ciphertext/Plaintext): Second argument.
21
22 Returns:
23 If both arguments are Plaintext elements then the result will be a Plaintext object
24 otherwise a Ciphertext object with value equivalent to the result of addition
25 operation of two provided arguments.
26 """
27 if isinstance(op1, CipherText) and isinstance(op2, CipherText):
28 return self._add_cipher_cipher(op1, op2)
29
30 elif isinstance(op1, PlainText) and isinstance(op2, PlainText):
31 return self._add_plain_plain(op1, op2)
32
33 elif isinstance(op1, PlainText) and isinstance(op2, CipherText):
34 return self._add_plain_cipher(op1, op2)
35
36 elif isinstance(op1, CipherText) and isinstance(op2, PlainText):
37 return self._add_plain_cipher(op2, op1)
38
39 else:
40 raise TypeError(f"Addition Operation not supported between {type(op1)} and {type(op2)}")
41
42 def _add_cipher_cipher(self, ct1, ct2):
43 """Adds two ciphertexts.
44
45 Args:
46 ct1 (Ciphertext): First argument.
47 ct2 (Ciphertext): Second argument.
48
49 Returns:
50 A Ciphertext object with value equivalent to result of addition of two provided
51 arguments.
52 """
53 ct1, ct2 = copy.deepcopy(ct1.data), copy.deepcopy(ct2.data)
54 result = ct2 if len(ct2) > len(ct1) else ct1
55
56 for i in range(min(len(ct1), len(ct2))):
57 for j in range(len(self.coeff_modulus)):
58 result[i][j] = poly_add_mod(ct1[i][j], ct2[i][j], self.coeff_modulus[j])
59
60 return CipherText(result)
61
62 def _add_plain_cipher(self, pt, ct):
63 """Adds a ciphertext and a plaintext.
64
65 Args:
66 pt (Plaintext): First argument.
67 ct (Ciphertext): Second argument.
68 Returns:
69 A Ciphertext object with value equivalent to result of addition of two provided
70 arguments.
71 """
72 ct = copy.deepcopy(ct)
73 return multiply_add_plain_with_delta(ct, pt, self.context)
74
75 def _add_plain_plain(self, pt1, pt2):
76 """Adds two plaintexts object.
77
78 Args:
79 pt1 (Plaintext): First argument.
80 pt2 (Plaintext): Second argument.
81
82 Returns:
83 A Plaintext object with value equivalent to result of addition of two provided
84 arguments.
85 """
86 pt1, pt2 = copy.deepcopy(pt1), copy.deepcopy(pt2)
87 return PlainText(poly_add_mod(pt1.data, pt2.data, self.plain_modulus))
88
[end of syft/frameworks/torch/he/fv/evaluator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/syft/frameworks/torch/he/fv/evaluator.py b/syft/frameworks/torch/he/fv/evaluator.py
--- a/syft/frameworks/torch/he/fv/evaluator.py
+++ b/syft/frameworks/torch/he/fv/evaluator.py
@@ -1,6 +1,7 @@
import copy
from syft.frameworks.torch.he.fv.util.operations import poly_add_mod
+from syft.frameworks.torch.he.fv.util.operations import negate_mod
from syft.frameworks.torch.he.fv.util.operations import multiply_add_plain_with_delta
from syft.frameworks.torch.he.fv.ciphertext import CipherText
from syft.frameworks.torch.he.fv.plaintext import PlainText
@@ -39,6 +40,24 @@
else:
raise TypeError(f"Addition Operation not supported between {type(op1)} and {type(op2)}")
+ def negate(self, ct):
+ """Negate a cipher i.e -(ct_value)
+
+ Args:
+ ct (Ciphertext): Ciphertext to be negated.
+
+ Returns:
+ A Ciphertext object with value equivalent to result of -(ct_value).
+ """
+ result = copy.deepcopy(ct.data)
+
+ for i in range(len(result)):
+ for j in range(len(result[i])):
+ for k in range(len(result[i][j])):
+ result[i][j][k] = negate_mod(ct.data[i][j][k], self.coeff_modulus[j])
+
+ return CipherText(result)
+
def _add_cipher_cipher(self, ct1, ct2):
"""Adds two ciphertexts.
| {"golden_diff": "diff --git a/syft/frameworks/torch/he/fv/evaluator.py b/syft/frameworks/torch/he/fv/evaluator.py\n--- a/syft/frameworks/torch/he/fv/evaluator.py\n+++ b/syft/frameworks/torch/he/fv/evaluator.py\n@@ -1,6 +1,7 @@\n import copy\n \n from syft.frameworks.torch.he.fv.util.operations import poly_add_mod\n+from syft.frameworks.torch.he.fv.util.operations import negate_mod\n from syft.frameworks.torch.he.fv.util.operations import multiply_add_plain_with_delta\n from syft.frameworks.torch.he.fv.ciphertext import CipherText\n from syft.frameworks.torch.he.fv.plaintext import PlainText\n@@ -39,6 +40,24 @@\n else:\n raise TypeError(f\"Addition Operation not supported between {type(op1)} and {type(op2)}\")\n \n+ def negate(self, ct):\n+ \"\"\"Negate a cipher i.e -(ct_value)\n+\n+ Args:\n+ ct (Ciphertext): Ciphertext to be negated.\n+\n+ Returns:\n+ A Ciphertext object with value equivalent to result of -(ct_value).\n+ \"\"\"\n+ result = copy.deepcopy(ct.data)\n+\n+ for i in range(len(result)):\n+ for j in range(len(result[i])):\n+ for k in range(len(result[i][j])):\n+ result[i][j][k] = negate_mod(ct.data[i][j][k], self.coeff_modulus[j])\n+\n+ return CipherText(result)\n+\n def _add_cipher_cipher(self, ct1, ct2):\n \"\"\"Adds two ciphertexts.\n", "issue": "Implement Negation operation for FV HE Scheme\n## Feature Description\r\nNegation operations of FV Scheme need to be implemented.\r\n\r\nIt should Negate a ciphertext object and return the result in ciphertext form.\n", "before_files": [{"content": "import copy\n\nfrom syft.frameworks.torch.he.fv.util.operations import poly_add_mod\nfrom syft.frameworks.torch.he.fv.util.operations import multiply_add_plain_with_delta\nfrom syft.frameworks.torch.he.fv.ciphertext import CipherText\nfrom syft.frameworks.torch.he.fv.plaintext import PlainText\n\n\nclass Evaluator:\n def __init__(self, context):\n self.context = context\n self.coeff_modulus = context.param.coeff_modulus\n self.plain_modulus = context.param.plain_modulus\n\n def add(self, op1, op2):\n \"\"\"Adds two operands using FV scheme.\n\n Args:\n op1 (Ciphertext/Plaintext): First argument.\n op2 (Ciphertext/Plaintext): Second argument.\n\n Returns:\n If both arguments are Plaintext elements then the result will be a Plaintext object\n otherwise a Ciphertext object with value equivalent to the result of addition\n operation of two provided arguments.\n \"\"\"\n if isinstance(op1, CipherText) and isinstance(op2, CipherText):\n return self._add_cipher_cipher(op1, op2)\n\n elif isinstance(op1, PlainText) and isinstance(op2, PlainText):\n return self._add_plain_plain(op1, op2)\n\n elif isinstance(op1, PlainText) and isinstance(op2, CipherText):\n return self._add_plain_cipher(op1, op2)\n\n elif isinstance(op1, CipherText) and isinstance(op2, PlainText):\n return self._add_plain_cipher(op2, op1)\n\n else:\n raise TypeError(f\"Addition Operation not supported between {type(op1)} and {type(op2)}\")\n\n def _add_cipher_cipher(self, ct1, ct2):\n \"\"\"Adds two ciphertexts.\n\n Args:\n ct1 (Ciphertext): First argument.\n ct2 (Ciphertext): Second argument.\n\n Returns:\n A Ciphertext object with value equivalent to result of addition of two provided\n arguments.\n \"\"\"\n ct1, ct2 = copy.deepcopy(ct1.data), copy.deepcopy(ct2.data)\n result = ct2 if len(ct2) > len(ct1) else ct1\n\n for i in range(min(len(ct1), len(ct2))):\n for j in range(len(self.coeff_modulus)):\n result[i][j] = poly_add_mod(ct1[i][j], ct2[i][j], self.coeff_modulus[j])\n\n return CipherText(result)\n\n def _add_plain_cipher(self, pt, ct):\n \"\"\"Adds a ciphertext and a plaintext.\n\n Args:\n pt (Plaintext): First argument.\n ct (Ciphertext): Second argument.\n Returns:\n A Ciphertext object with value equivalent to result of addition of two provided\n arguments.\n \"\"\"\n ct = copy.deepcopy(ct)\n return multiply_add_plain_with_delta(ct, pt, self.context)\n\n def _add_plain_plain(self, pt1, pt2):\n \"\"\"Adds two plaintexts object.\n\n Args:\n pt1 (Plaintext): First argument.\n pt2 (Plaintext): Second argument.\n\n Returns:\n A Plaintext object with value equivalent to result of addition of two provided\n arguments.\n \"\"\"\n pt1, pt2 = copy.deepcopy(pt1), copy.deepcopy(pt2)\n return PlainText(poly_add_mod(pt1.data, pt2.data, self.plain_modulus))\n", "path": "syft/frameworks/torch/he/fv/evaluator.py"}]} | 1,502 | 369 |
gh_patches_debug_28241 | rasdani/github-patches | git_diff | svthalia__concrexit-2589 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError: PromotionRequestAdmin.mark_finished() takes 2 positional arguments but 3 were given
Sentry Issue: [CONCREXIT-JD](https://sentry.io/organizations/thalia/issues/3668103253/?referrer=github_integration)
```
TypeError: PromotionRequestAdmin.mark_finished() takes 2 positional arguments but 3 were given
(5 additional frame(s) were not displayed)
...
File "django/contrib/admin/sites.py", line 242, in inner
return view(request, *args, **kwargs)
File "django/utils/decorators.py", line 46, in _wrapper
return bound_method(*args, **kwargs)
File "django/utils/decorators.py", line 133, in _wrapped_view
response = view_func(request, *args, **kwargs)
File "django/contrib/admin/options.py", line 1959, in changelist_view
response = self.response_action(
File "django/contrib/admin/options.py", line 1588, in response_action
response = func(self, request, queryset)
```
</issue>
<code>
[start of website/promotion/admin.py]
1 """Registers admin interfaces for the models defined in this module."""
2 from django.contrib import admin
3 from django.contrib.admin import ModelAdmin
4
5 from promotion.forms import PromotionRequestForm
6 from events.services import is_organiser
7
8 from .models import PromotionChannel, PromotionRequest
9
10
11 @admin.register(PromotionRequest)
12 class PromotionRequestAdmin(admin.ModelAdmin):
13 """This manages the admin interface for the model items."""
14
15 list_display = ("event", "publish_date", "channel", "assigned_to", "status")
16 list_filter = (
17 "publish_date",
18 "assigned_to",
19 "status",
20 )
21 date_hierarchy = "publish_date"
22 form = PromotionRequestForm
23 actions = ["mark_not_started", "mark_started", "mark_finished", "mark_published"]
24
25 def has_change_permission(self, request, obj=None):
26 if obj is not None and not is_organiser(request.member, obj.event):
27 return False
28 return super().has_change_permission(request, obj)
29
30 def mark_not_started(self, queryset):
31 """Change the status of the event to published."""
32 self._change_published(queryset, PromotionRequest.NOT_STARTED)
33
34 mark_not_started.short_description = "Mark requests as not started"
35
36 def mark_started(self, queryset):
37 """Change the status of the event to published."""
38 self._change_published(queryset, PromotionRequest.STARTED)
39
40 mark_started.short_description = "Mark requests as started"
41
42 def mark_finished(self, queryset):
43 """Change the status of the event to published."""
44 self._change_published(queryset, PromotionRequest.FINISHED)
45
46 mark_finished.short_description = "Mark requests as finished"
47
48 def mark_published(self, queryset):
49 """Change the status of the event to published."""
50 self._change_published(queryset, PromotionRequest.PUBLISHED)
51
52 mark_published.short_description = "Mark requests as published"
53
54 @staticmethod
55 def _change_published(queryset, status):
56 queryset.update(status=status)
57
58
59 @admin.register(PromotionChannel)
60 class PromotionChannelAdmin(ModelAdmin):
61 pass
62
[end of website/promotion/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/promotion/admin.py b/website/promotion/admin.py
--- a/website/promotion/admin.py
+++ b/website/promotion/admin.py
@@ -27,25 +27,25 @@
return False
return super().has_change_permission(request, obj)
- def mark_not_started(self, queryset):
+ def mark_not_started(self, request, queryset):
"""Change the status of the event to published."""
self._change_published(queryset, PromotionRequest.NOT_STARTED)
mark_not_started.short_description = "Mark requests as not started"
- def mark_started(self, queryset):
+ def mark_started(self, request, queryset):
"""Change the status of the event to published."""
self._change_published(queryset, PromotionRequest.STARTED)
mark_started.short_description = "Mark requests as started"
- def mark_finished(self, queryset):
+ def mark_finished(self, request, queryset):
"""Change the status of the event to published."""
self._change_published(queryset, PromotionRequest.FINISHED)
mark_finished.short_description = "Mark requests as finished"
- def mark_published(self, queryset):
+ def mark_published(self, request, queryset):
"""Change the status of the event to published."""
self._change_published(queryset, PromotionRequest.PUBLISHED)
| {"golden_diff": "diff --git a/website/promotion/admin.py b/website/promotion/admin.py\n--- a/website/promotion/admin.py\n+++ b/website/promotion/admin.py\n@@ -27,25 +27,25 @@\n return False\n return super().has_change_permission(request, obj)\n \n- def mark_not_started(self, queryset):\n+ def mark_not_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.NOT_STARTED)\n \n mark_not_started.short_description = \"Mark requests as not started\"\n \n- def mark_started(self, queryset):\n+ def mark_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.STARTED)\n \n mark_started.short_description = \"Mark requests as started\"\n \n- def mark_finished(self, queryset):\n+ def mark_finished(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.FINISHED)\n \n mark_finished.short_description = \"Mark requests as finished\"\n \n- def mark_published(self, queryset):\n+ def mark_published(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.PUBLISHED)\n", "issue": "TypeError: PromotionRequestAdmin.mark_finished() takes 2 positional arguments but 3 were given\nSentry Issue: [CONCREXIT-JD](https://sentry.io/organizations/thalia/issues/3668103253/?referrer=github_integration)\n\n```\nTypeError: PromotionRequestAdmin.mark_finished() takes 2 positional arguments but 3 were given\n(5 additional frame(s) were not displayed)\n...\n File \"django/contrib/admin/sites.py\", line 242, in inner\n return view(request, *args, **kwargs)\n File \"django/utils/decorators.py\", line 46, in _wrapper\n return bound_method(*args, **kwargs)\n File \"django/utils/decorators.py\", line 133, in _wrapped_view\n response = view_func(request, *args, **kwargs)\n File \"django/contrib/admin/options.py\", line 1959, in changelist_view\n response = self.response_action(\n File \"django/contrib/admin/options.py\", line 1588, in response_action\n response = func(self, request, queryset)\n```\n", "before_files": [{"content": "\"\"\"Registers admin interfaces for the models defined in this module.\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\n\nfrom promotion.forms import PromotionRequestForm\nfrom events.services import is_organiser\n\nfrom .models import PromotionChannel, PromotionRequest\n\n\[email protected](PromotionRequest)\nclass PromotionRequestAdmin(admin.ModelAdmin):\n \"\"\"This manages the admin interface for the model items.\"\"\"\n\n list_display = (\"event\", \"publish_date\", \"channel\", \"assigned_to\", \"status\")\n list_filter = (\n \"publish_date\",\n \"assigned_to\",\n \"status\",\n )\n date_hierarchy = \"publish_date\"\n form = PromotionRequestForm\n actions = [\"mark_not_started\", \"mark_started\", \"mark_finished\", \"mark_published\"]\n\n def has_change_permission(self, request, obj=None):\n if obj is not None and not is_organiser(request.member, obj.event):\n return False\n return super().has_change_permission(request, obj)\n\n def mark_not_started(self, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.NOT_STARTED)\n\n mark_not_started.short_description = \"Mark requests as not started\"\n\n def mark_started(self, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.STARTED)\n\n mark_started.short_description = \"Mark requests as started\"\n\n def mark_finished(self, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.FINISHED)\n\n mark_finished.short_description = \"Mark requests as finished\"\n\n def mark_published(self, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.PUBLISHED)\n\n mark_published.short_description = \"Mark requests as published\"\n\n @staticmethod\n def _change_published(queryset, status):\n queryset.update(status=status)\n\n\[email protected](PromotionChannel)\nclass PromotionChannelAdmin(ModelAdmin):\n pass\n", "path": "website/promotion/admin.py"}]} | 1,334 | 291 |
gh_patches_debug_13577 | rasdani/github-patches | git_diff | localstack__localstack-1397 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Service "s3" not yet available, retrying...
Hello there
After installing localstack and trying to start several services on my machine, s3 always failed to start
The command I am using to start them up is
`SERVICES=sqs,sns,s3,lambda DEBUG=1 localstack start`
(With `DEBUG=1` in place already for debugging)
First few lines of the output are:
```
2018-06-19T10:05:57:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:00:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:05:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:08:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:12:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:15:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:19:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:22:ERROR:localstack.services.s3.s3_starter: S3 health check failed: An error occurred (ExpiredToken) when calling the AssumeRole operation: The security token included in the request is expired Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/localstack/services/infra.py", line 344, in check_infra
raise e
File "/usr/local/lib/python3.6/site-packages/localstack/services/infra.py", line 341, in check_infra
plugin.check(expect_shutdown=expect_shutdown, print_error=print_error)
File "/usr/local/lib/python3.6/site-packages/localstack/services/infra.py", line 80, in check
return self.check_function(expect_shutdown=expect_shutdown, print_error=print_error)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_starter.py", line 23, in check_s3
assert isinstance(out['Buckets'], list)
TypeError: 'NoneType' object is not subscriptable
During handling of the above exception, another exception occurred:
...
```
I have been trying to tackle this problem for a few hours already, without any success, I tried the latest verion of localstack as well as 0.8.6.1 which works on another machine
I am installing it trough pip (`pip install localstack`)
Thanks for any help in advance!
┆Issue is synchronized with this [Jira Bug](https://localstack.atlassian.net/browse/LOC-309) by [Unito](https://www.unito.io/learn-more)
</issue>
<code>
[start of localstack/services/s3/s3_starter.py]
1 import sys
2 import logging
3 import traceback
4 from moto.s3 import models as s3_models
5 from moto.server import main as moto_main
6 from localstack import config
7 from localstack.constants import DEFAULT_PORT_S3_BACKEND
8 from localstack.utils.aws import aws_stack
9 from localstack.utils.common import wait_for_port_open
10 from localstack.services.infra import (
11 get_service_protocol, start_proxy_for_service, do_run, setup_logging)
12
13 LOGGER = logging.getLogger(__name__)
14
15 # max file size for S3 objects (in MB)
16 S3_MAX_FILE_SIZE_MB = 128
17
18
19 def check_s3(expect_shutdown=False, print_error=False):
20 out = None
21 try:
22 # wait for port to be opened
23 wait_for_port_open(DEFAULT_PORT_S3_BACKEND)
24 # check S3
25 out = aws_stack.connect_to_service(service_name='s3').list_buckets()
26 except Exception as e:
27 if print_error:
28 LOGGER.error('S3 health check failed: %s %s' % (e, traceback.format_exc()))
29 if expect_shutdown:
30 assert out is None
31 else:
32 assert isinstance(out['Buckets'], list)
33
34
35 def start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):
36 port = port or config.PORT_S3
37 backend_port = DEFAULT_PORT_S3_BACKEND
38 cmd = 'python "%s" s3 -p %s -H 0.0.0.0' % (__file__, backend_port)
39 print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))
40 start_proxy_for_service('s3', port, backend_port, update_listener)
41 env_vars = {'PYTHONPATH': ':'.join(sys.path)}
42 return do_run(cmd, asynchronous, env_vars=env_vars)
43
44
45 def apply_patches():
46 s3_models.DEFAULT_KEY_BUFFER_SIZE = S3_MAX_FILE_SIZE_MB * 1024 * 1024
47
48 def init(self, name, value, storage='STANDARD', etag=None, is_versioned=False, version_id=0, max_buffer_size=None):
49 return original_init(self, name, value, storage=storage, etag=etag, is_versioned=is_versioned,
50 version_id=version_id, max_buffer_size=s3_models.DEFAULT_KEY_BUFFER_SIZE)
51
52 original_init = s3_models.FakeKey.__init__
53 s3_models.FakeKey.__init__ = init
54
55
56 def main():
57 setup_logging()
58 # patch moto implementation
59 apply_patches()
60 # start API
61 sys.exit(moto_main())
62
63
64 if __name__ == '__main__':
65 main()
66
[end of localstack/services/s3/s3_starter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/localstack/services/s3/s3_starter.py b/localstack/services/s3/s3_starter.py
--- a/localstack/services/s3/s3_starter.py
+++ b/localstack/services/s3/s3_starter.py
@@ -35,7 +35,7 @@
def start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):
port = port or config.PORT_S3
backend_port = DEFAULT_PORT_S3_BACKEND
- cmd = 'python "%s" s3 -p %s -H 0.0.0.0' % (__file__, backend_port)
+ cmd = '%s "%s" s3 -p %s -H 0.0.0.0' % (sys.executable, __file__, backend_port)
print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))
start_proxy_for_service('s3', port, backend_port, update_listener)
env_vars = {'PYTHONPATH': ':'.join(sys.path)}
| {"golden_diff": "diff --git a/localstack/services/s3/s3_starter.py b/localstack/services/s3/s3_starter.py\n--- a/localstack/services/s3/s3_starter.py\n+++ b/localstack/services/s3/s3_starter.py\n@@ -35,7 +35,7 @@\n def start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):\n port = port or config.PORT_S3\n backend_port = DEFAULT_PORT_S3_BACKEND\n- cmd = 'python \"%s\" s3 -p %s -H 0.0.0.0' % (__file__, backend_port)\n+ cmd = '%s \"%s\" s3 -p %s -H 0.0.0.0' % (sys.executable, __file__, backend_port)\n print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))\n start_proxy_for_service('s3', port, backend_port, update_listener)\n env_vars = {'PYTHONPATH': ':'.join(sys.path)}\n", "issue": "Service \"s3\" not yet available, retrying...\nHello there\n\nAfter installing localstack and trying to start several services on my machine, s3 always failed to start\nThe command I am using to start them up is\n\n`SERVICES=sqs,sns,s3,lambda DEBUG=1 localstack start`\n(With `DEBUG=1` in place already for debugging)\n\nFirst few lines of the output are:\n\n```\n2018-06-19T10:05:57:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:00:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:05:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:08:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:12:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:15:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:19:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:22:ERROR:localstack.services.s3.s3_starter: S3 health check failed: An error occurred (ExpiredToken) when calling the AssumeRole operation: The security token included in the request is expired Traceback (most recent call last):\n File \"/usr/local/lib/python3.6/site-packages/localstack/services/infra.py\", line 344, in check_infra\n raise e\n File \"/usr/local/lib/python3.6/site-packages/localstack/services/infra.py\", line 341, in check_infra\n plugin.check(expect_shutdown=expect_shutdown, print_error=print_error)\n File \"/usr/local/lib/python3.6/site-packages/localstack/services/infra.py\", line 80, in check\n return self.check_function(expect_shutdown=expect_shutdown, print_error=print_error)\n File \"/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_starter.py\", line 23, in check_s3\n assert isinstance(out['Buckets'], list)\nTypeError: 'NoneType' object is not subscriptable\n\nDuring handling of the above exception, another exception occurred:\n...\n```\n\nI have been trying to tackle this problem for a few hours already, without any success, I tried the latest verion of localstack as well as 0.8.6.1 which works on another machine\n\nI am installing it trough pip (`pip install localstack`)\n\nThanks for any help in advance!\n\n\n\n\u2506Issue is synchronized with this [Jira Bug](https://localstack.atlassian.net/browse/LOC-309) by [Unito](https://www.unito.io/learn-more)\n\n", "before_files": [{"content": "import sys\nimport logging\nimport traceback\nfrom moto.s3 import models as s3_models\nfrom moto.server import main as moto_main\nfrom localstack import config\nfrom localstack.constants import DEFAULT_PORT_S3_BACKEND\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import wait_for_port_open\nfrom localstack.services.infra import (\n get_service_protocol, start_proxy_for_service, do_run, setup_logging)\n\nLOGGER = logging.getLogger(__name__)\n\n# max file size for S3 objects (in MB)\nS3_MAX_FILE_SIZE_MB = 128\n\n\ndef check_s3(expect_shutdown=False, print_error=False):\n out = None\n try:\n # wait for port to be opened\n wait_for_port_open(DEFAULT_PORT_S3_BACKEND)\n # check S3\n out = aws_stack.connect_to_service(service_name='s3').list_buckets()\n except Exception as e:\n if print_error:\n LOGGER.error('S3 health check failed: %s %s' % (e, traceback.format_exc()))\n if expect_shutdown:\n assert out is None\n else:\n assert isinstance(out['Buckets'], list)\n\n\ndef start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):\n port = port or config.PORT_S3\n backend_port = DEFAULT_PORT_S3_BACKEND\n cmd = 'python \"%s\" s3 -p %s -H 0.0.0.0' % (__file__, backend_port)\n print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))\n start_proxy_for_service('s3', port, backend_port, update_listener)\n env_vars = {'PYTHONPATH': ':'.join(sys.path)}\n return do_run(cmd, asynchronous, env_vars=env_vars)\n\n\ndef apply_patches():\n s3_models.DEFAULT_KEY_BUFFER_SIZE = S3_MAX_FILE_SIZE_MB * 1024 * 1024\n\n def init(self, name, value, storage='STANDARD', etag=None, is_versioned=False, version_id=0, max_buffer_size=None):\n return original_init(self, name, value, storage=storage, etag=etag, is_versioned=is_versioned,\n version_id=version_id, max_buffer_size=s3_models.DEFAULT_KEY_BUFFER_SIZE)\n\n original_init = s3_models.FakeKey.__init__\n s3_models.FakeKey.__init__ = init\n\n\ndef main():\n setup_logging()\n # patch moto implementation\n apply_patches()\n # start API\n sys.exit(moto_main())\n\n\nif __name__ == '__main__':\n main()\n", "path": "localstack/services/s3/s3_starter.py"}]} | 1,951 | 227 |
gh_patches_debug_6625 | rasdani/github-patches | git_diff | ray-project__ray-2784 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[xray] Users get no warning for infeasible tasks.
Start Ray with
```
RAY_USE_XRAY=1 ray start --head --redis-port=6379 --num-gpus=0
```
Then start `RAY_USE_XRAY=1 ipython` and run
```python
import ray
ray.init(redis_address='localhost:6379')
@ray.remote(num_gpus=1)
def f():
return 1
f.remote()
```
`f` will never execute because it is infeasible, and yet the user will get no warning.
</issue>
<code>
[start of python/ray/ray_constants.py]
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4 """Ray constants used in the Python code."""
5
6 import os
7
8 import ray
9
10
11 def env_integer(key, default):
12 if key in os.environ:
13 return int(os.environ[key])
14 return default
15
16
17 ID_SIZE = 20
18 NIL_JOB_ID = ray.ObjectID(ID_SIZE * b"\x00")
19
20 # If a remote function or actor (or some other export) has serialized size
21 # greater than this quantity, print an warning.
22 PICKLE_OBJECT_WARNING_SIZE = 10**7
23
24 # The maximum resource quantity that is allowed. TODO(rkn): This could be
25 # relaxed, but the current implementation of the node manager will be slower
26 # for large resource quantities due to bookkeeping of specific resource IDs.
27 MAX_RESOURCE_QUANTITY = 512
28
29 # Different types of Ray errors that can be pushed to the driver.
30 # TODO(rkn): These should be defined in flatbuffers and must be synced with
31 # the existing C++ definitions.
32 WAIT_FOR_CLASS_PUSH_ERROR = "wait_for_class"
33 PICKLING_LARGE_OBJECT_PUSH_ERROR = "pickling_large_object"
34 WAIT_FOR_FUNCTION_PUSH_ERROR = "wait_for_function"
35 TASK_PUSH_ERROR = "task"
36 REGISTER_REMOTE_FUNCTION_PUSH_ERROR = "register_remote_function"
37 FUNCTION_TO_RUN_PUSH_ERROR = "function_to_run"
38 VERSION_MISMATCH_PUSH_ERROR = "version_mismatch"
39 CHECKPOINT_PUSH_ERROR = "checkpoint"
40 REGISTER_ACTOR_PUSH_ERROR = "register_actor"
41 WORKER_CRASH_PUSH_ERROR = "worker_crash"
42 WORKER_DIED_PUSH_ERROR = "worker_died"
43 PUT_RECONSTRUCTION_PUSH_ERROR = "put_reconstruction"
44 HASH_MISMATCH_PUSH_ERROR = "object_hash_mismatch"
45
46 # Abort autoscaling if more than this number of errors are encountered. This
47 # is a safety feature to prevent e.g. runaway node launches.
48 AUTOSCALER_MAX_NUM_FAILURES = env_integer("AUTOSCALER_MAX_NUM_FAILURES", 5)
49
50 # The maximum number of nodes to launch in a single request.
51 # Multiple requests may be made for this batch size, up to
52 # the limit of AUTOSCALER_MAX_CONCURRENT_LAUNCHES.
53 AUTOSCALER_MAX_LAUNCH_BATCH = env_integer("AUTOSCALER_MAX_LAUNCH_BATCH", 5)
54
55 # Max number of nodes to launch at a time.
56 AUTOSCALER_MAX_CONCURRENT_LAUNCHES = env_integer(
57 "AUTOSCALER_MAX_CONCURRENT_LAUNCHES", 10)
58
59 # Interval at which to perform autoscaling updates.
60 AUTOSCALER_UPDATE_INTERVAL_S = env_integer("AUTOSCALER_UPDATE_INTERVAL_S", 5)
61
62 # The autoscaler will attempt to restart Ray on nodes it hasn't heard from
63 # in more than this interval.
64 AUTOSCALER_HEARTBEAT_TIMEOUT_S = env_integer("AUTOSCALER_HEARTBEAT_TIMEOUT_S",
65 30)
66
67 # Max number of retries to AWS (default is 5, time increases exponentially)
68 BOTO_MAX_RETRIES = env_integer("BOTO_MAX_RETRIES", 12)
69
70 # Default logger format: only contains the message.
71 LOGGER_FORMAT = "%(message)s"
72 LOGGER_FORMAT_HELP = "The logging format. default='%(message)s'"
73 LOGGER_LEVEL = "info"
74 LOGGER_LEVEL_CHOICES = ['debug', 'info', 'warning', 'error', 'critical']
75 LOGGER_LEVEL_HELP = ("The logging level threshold, choices=['debug', 'info',"
76 " 'warning', 'error', 'critical'], default='info'")
77
[end of python/ray/ray_constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/ray/ray_constants.py b/python/ray/ray_constants.py
--- a/python/ray/ray_constants.py
+++ b/python/ray/ray_constants.py
@@ -42,6 +42,7 @@
WORKER_DIED_PUSH_ERROR = "worker_died"
PUT_RECONSTRUCTION_PUSH_ERROR = "put_reconstruction"
HASH_MISMATCH_PUSH_ERROR = "object_hash_mismatch"
+INFEASIBLE_TASK_ERROR = "infeasible_task"
# Abort autoscaling if more than this number of errors are encountered. This
# is a safety feature to prevent e.g. runaway node launches.
| {"golden_diff": "diff --git a/python/ray/ray_constants.py b/python/ray/ray_constants.py\n--- a/python/ray/ray_constants.py\n+++ b/python/ray/ray_constants.py\n@@ -42,6 +42,7 @@\n WORKER_DIED_PUSH_ERROR = \"worker_died\"\n PUT_RECONSTRUCTION_PUSH_ERROR = \"put_reconstruction\"\n HASH_MISMATCH_PUSH_ERROR = \"object_hash_mismatch\"\n+INFEASIBLE_TASK_ERROR = \"infeasible_task\"\n \n # Abort autoscaling if more than this number of errors are encountered. This\n # is a safety feature to prevent e.g. runaway node launches.\n", "issue": "[xray] Users get no warning for infeasible tasks.\nStart Ray with \r\n\r\n```\r\nRAY_USE_XRAY=1 ray start --head --redis-port=6379 --num-gpus=0\r\n```\r\n\r\nThen start `RAY_USE_XRAY=1 ipython` and run\r\n\r\n```python\r\nimport ray\r\n\r\nray.init(redis_address='localhost:6379')\r\n\r\[email protected](num_gpus=1)\r\ndef f():\r\n return 1\r\n\r\nf.remote()\r\n```\r\n\r\n`f` will never execute because it is infeasible, and yet the user will get no warning.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\"\"\"Ray constants used in the Python code.\"\"\"\n\nimport os\n\nimport ray\n\n\ndef env_integer(key, default):\n if key in os.environ:\n return int(os.environ[key])\n return default\n\n\nID_SIZE = 20\nNIL_JOB_ID = ray.ObjectID(ID_SIZE * b\"\\x00\")\n\n# If a remote function or actor (or some other export) has serialized size\n# greater than this quantity, print an warning.\nPICKLE_OBJECT_WARNING_SIZE = 10**7\n\n# The maximum resource quantity that is allowed. TODO(rkn): This could be\n# relaxed, but the current implementation of the node manager will be slower\n# for large resource quantities due to bookkeeping of specific resource IDs.\nMAX_RESOURCE_QUANTITY = 512\n\n# Different types of Ray errors that can be pushed to the driver.\n# TODO(rkn): These should be defined in flatbuffers and must be synced with\n# the existing C++ definitions.\nWAIT_FOR_CLASS_PUSH_ERROR = \"wait_for_class\"\nPICKLING_LARGE_OBJECT_PUSH_ERROR = \"pickling_large_object\"\nWAIT_FOR_FUNCTION_PUSH_ERROR = \"wait_for_function\"\nTASK_PUSH_ERROR = \"task\"\nREGISTER_REMOTE_FUNCTION_PUSH_ERROR = \"register_remote_function\"\nFUNCTION_TO_RUN_PUSH_ERROR = \"function_to_run\"\nVERSION_MISMATCH_PUSH_ERROR = \"version_mismatch\"\nCHECKPOINT_PUSH_ERROR = \"checkpoint\"\nREGISTER_ACTOR_PUSH_ERROR = \"register_actor\"\nWORKER_CRASH_PUSH_ERROR = \"worker_crash\"\nWORKER_DIED_PUSH_ERROR = \"worker_died\"\nPUT_RECONSTRUCTION_PUSH_ERROR = \"put_reconstruction\"\nHASH_MISMATCH_PUSH_ERROR = \"object_hash_mismatch\"\n\n# Abort autoscaling if more than this number of errors are encountered. This\n# is a safety feature to prevent e.g. runaway node launches.\nAUTOSCALER_MAX_NUM_FAILURES = env_integer(\"AUTOSCALER_MAX_NUM_FAILURES\", 5)\n\n# The maximum number of nodes to launch in a single request.\n# Multiple requests may be made for this batch size, up to\n# the limit of AUTOSCALER_MAX_CONCURRENT_LAUNCHES.\nAUTOSCALER_MAX_LAUNCH_BATCH = env_integer(\"AUTOSCALER_MAX_LAUNCH_BATCH\", 5)\n\n# Max number of nodes to launch at a time.\nAUTOSCALER_MAX_CONCURRENT_LAUNCHES = env_integer(\n \"AUTOSCALER_MAX_CONCURRENT_LAUNCHES\", 10)\n\n# Interval at which to perform autoscaling updates.\nAUTOSCALER_UPDATE_INTERVAL_S = env_integer(\"AUTOSCALER_UPDATE_INTERVAL_S\", 5)\n\n# The autoscaler will attempt to restart Ray on nodes it hasn't heard from\n# in more than this interval.\nAUTOSCALER_HEARTBEAT_TIMEOUT_S = env_integer(\"AUTOSCALER_HEARTBEAT_TIMEOUT_S\",\n 30)\n\n# Max number of retries to AWS (default is 5, time increases exponentially)\nBOTO_MAX_RETRIES = env_integer(\"BOTO_MAX_RETRIES\", 12)\n\n# Default logger format: only contains the message.\nLOGGER_FORMAT = \"%(message)s\"\nLOGGER_FORMAT_HELP = \"The logging format. default='%(message)s'\"\nLOGGER_LEVEL = \"info\"\nLOGGER_LEVEL_CHOICES = ['debug', 'info', 'warning', 'error', 'critical']\nLOGGER_LEVEL_HELP = (\"The logging level threshold, choices=['debug', 'info',\"\n \" 'warning', 'error', 'critical'], default='info'\")\n", "path": "python/ray/ray_constants.py"}]} | 1,560 | 134 |
gh_patches_debug_17784 | rasdani/github-patches | git_diff | lmfit__lmfit-py-150 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot deploy to pypi repo dues to tuples in the `setup.py` attributes
Due to a python-bug (http://bugs.python.org/issue19610) i cannot install and deploy lmfit with `python setup install`
I discovered this issue while trying to fix #149
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # from distutils.core import setup
3 from setuptools import setup
4
5 import lmfit as lmfit
6 import numpy, scipy
7
8 long_desc = """A library for least-squares minimization and data fitting in
9 Python. Built on top of scipy.optimize, lmfit provides a Parameter object
10 which can be set as fixed or free, can have upper and/or lower bounds, or
11 can be written in terms of algebraic constraints of other Parameters. The
12 user writes a function to be minimized as a function of these Parameters,
13 and the scipy.optimize methods are used to find the optimal values for the
14 Parameters. The Levenberg-Marquardt (leastsq) is the default minimization
15 algorithm, and provides estimated standard errors and correlations between
16 varied Parameters. Other minimization methods, including Nelder-Mead's
17 downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
18 others are also supported. Bounds and contraints can be placed on
19 Parameters for all of these methods.
20
21 In addition, methods for explicitly calculating confidence intervals are
22 provided for exploring minmization problems where the approximation of
23 estimating Parameter uncertainties from the covariance matrix is
24 questionable. """
25
26
27 setup(name = 'lmfit',
28 version = lmfit.__version__,
29 author = 'LMFit Development Team',
30 author_email = '[email protected]',
31 url = 'http://lmfit.github.io/lmfit-py/',
32 download_url = 'http://lmfit.github.io//lmfit-py/',
33 requires = ('numpy', 'scipy'),
34 license = 'BSD',
35 description = "Least-Squares Minimization with Bounds and Constraints",
36 long_description = long_desc,
37 platforms = ('Windows', 'Linux', 'Mac OS X'),
38 classifiers=['Intended Audience :: Science/Research',
39 'Operating System :: OS Independent',
40 'Programming Language :: Python',
41 'Topic :: Scientific/Engineering',
42 ],
43 # test_suite='nose.collector',
44 # test_requires=['Nose'],
45 package_dir = {'lmfit': 'lmfit'},
46 packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],
47 )
48
49
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,11 +30,11 @@
author_email = '[email protected]',
url = 'http://lmfit.github.io/lmfit-py/',
download_url = 'http://lmfit.github.io//lmfit-py/',
- requires = ('numpy', 'scipy'),
+ requires = ['numpy', 'scipy'],
license = 'BSD',
description = "Least-Squares Minimization with Bounds and Constraints",
long_description = long_desc,
- platforms = ('Windows', 'Linux', 'Mac OS X'),
+ platforms = ['Windows', 'Linux', 'Mac OS X'],
classifiers=['Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,11 +30,11 @@\n author_email = '[email protected]',\n url = 'http://lmfit.github.io/lmfit-py/',\n download_url = 'http://lmfit.github.io//lmfit-py/',\n- requires = ('numpy', 'scipy'),\n+ requires = ['numpy', 'scipy'],\n license = 'BSD',\n description = \"Least-Squares Minimization with Bounds and Constraints\",\n long_description = long_desc,\n- platforms = ('Windows', 'Linux', 'Mac OS X'),\n+ platforms = ['Windows', 'Linux', 'Mac OS X'],\n classifiers=['Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n", "issue": "Cannot deploy to pypi repo dues to tuples in the `setup.py` attributes\nDue to a python-bug (http://bugs.python.org/issue19610) i cannot install and deploy lmfit with `python setup install`\n\nI discovered this issue while trying to fix #149 \n\n", "before_files": [{"content": "#!/usr/bin/env python\n# from distutils.core import setup\nfrom setuptools import setup\n\nimport lmfit as lmfit\nimport numpy, scipy\n\nlong_desc = \"\"\"A library for least-squares minimization and data fitting in\nPython. Built on top of scipy.optimize, lmfit provides a Parameter object\nwhich can be set as fixed or free, can have upper and/or lower bounds, or\ncan be written in terms of algebraic constraints of other Parameters. The\nuser writes a function to be minimized as a function of these Parameters,\nand the scipy.optimize methods are used to find the optimal values for the\nParameters. The Levenberg-Marquardt (leastsq) is the default minimization\nalgorithm, and provides estimated standard errors and correlations between\nvaried Parameters. Other minimization methods, including Nelder-Mead's\ndownhill simplex, Powell's method, BFGS, Sequential Least Squares, and\nothers are also supported. Bounds and contraints can be placed on\nParameters for all of these methods.\n\nIn addition, methods for explicitly calculating confidence intervals are\nprovided for exploring minmization problems where the approximation of\nestimating Parameter uncertainties from the covariance matrix is\nquestionable. \"\"\"\n\n\nsetup(name = 'lmfit',\n version = lmfit.__version__,\n author = 'LMFit Development Team',\n author_email = '[email protected]',\n url = 'http://lmfit.github.io/lmfit-py/',\n download_url = 'http://lmfit.github.io//lmfit-py/',\n requires = ('numpy', 'scipy'),\n license = 'BSD',\n description = \"Least-Squares Minimization with Bounds and Constraints\",\n long_description = long_desc,\n platforms = ('Windows', 'Linux', 'Mac OS X'),\n classifiers=['Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n ],\n # test_suite='nose.collector',\n # test_requires=['Nose'],\n package_dir = {'lmfit': 'lmfit'},\n packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],\n )\n\n", "path": "setup.py"}]} | 1,152 | 186 |
gh_patches_debug_22010 | rasdani/github-patches | git_diff | ckan__ckan-561 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Postgresql 8.4 error when running paster db init
When running the paster db init command with the CKAN 2.0 beta, there is an error encountered that appears to be related to use of the left() string function in ckan/migration/versions/067_turn_extras_to_strings.py. According to the documentation and my own simple test, this function is not support in Postgresql 8.4. For a stack trace, see: https://gist.github.com/thriuin/5067819.
Is there a new minimum version of Postgresql required -- documentation still says 8.4 which unfortunately is what comes with RedHat Enterprise.
</issue>
<code>
[start of ckan/migration/versions/067_turn_extras_to_strings.py]
1 import json
2
3 def upgrade(migrate_engine):
4
5 with migrate_engine.begin() as connection:
6 tables = 'package_extra group_extra'
7 revision_tables = 'package_extra_revision group_extra_revision'
8
9 for table in tables.split():
10 sql = """select id, value from {table} where left(value,1) = '"' """.format(table=table)
11 results = connection.execute(sql)
12 for result in results:
13 id, value = result
14 update_sql = 'update {table} set value = %s where id = %s'
15 connection.execute(update_sql.format(table=table),
16 json.loads(value), id)
17
18 for table in revision_tables.split():
19 sql = """select id, revision_id, value from {table} where left(value,1) = '"' """.format(table=table)
20
21 results = connection.execute(sql)
22 for result in results:
23 id, revision_id, value = result
24 update_sql = 'update {table} set value = %s where id = %s and revision_id = %s'
25 connection.execute(update_sql.format(table=table),
26 json.loads(value), id, revision_id)
27
28
29
[end of ckan/migration/versions/067_turn_extras_to_strings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckan/migration/versions/067_turn_extras_to_strings.py b/ckan/migration/versions/067_turn_extras_to_strings.py
--- a/ckan/migration/versions/067_turn_extras_to_strings.py
+++ b/ckan/migration/versions/067_turn_extras_to_strings.py
@@ -7,7 +7,7 @@
revision_tables = 'package_extra_revision group_extra_revision'
for table in tables.split():
- sql = """select id, value from {table} where left(value,1) = '"' """.format(table=table)
+ sql = """select id, value from {table} where substr(value,0,1) = '"' """.format(table=table)
results = connection.execute(sql)
for result in results:
id, value = result
@@ -16,7 +16,7 @@
json.loads(value), id)
for table in revision_tables.split():
- sql = """select id, revision_id, value from {table} where left(value,1) = '"' """.format(table=table)
+ sql = """select id, revision_id, value from {table} where substr(value,0,1) = '"' """.format(table=table)
results = connection.execute(sql)
for result in results:
| {"golden_diff": "diff --git a/ckan/migration/versions/067_turn_extras_to_strings.py b/ckan/migration/versions/067_turn_extras_to_strings.py\n--- a/ckan/migration/versions/067_turn_extras_to_strings.py\n+++ b/ckan/migration/versions/067_turn_extras_to_strings.py\n@@ -7,7 +7,7 @@\n revision_tables = 'package_extra_revision group_extra_revision'\n \n for table in tables.split():\n- sql = \"\"\"select id, value from {table} where left(value,1) = '\"' \"\"\".format(table=table)\n+ sql = \"\"\"select id, value from {table} where substr(value,0,1) = '\"' \"\"\".format(table=table)\n results = connection.execute(sql)\n for result in results:\n id, value = result\n@@ -16,7 +16,7 @@\n json.loads(value), id)\n \n for table in revision_tables.split():\n- sql = \"\"\"select id, revision_id, value from {table} where left(value,1) = '\"' \"\"\".format(table=table)\n+ sql = \"\"\"select id, revision_id, value from {table} where substr(value,0,1) = '\"' \"\"\".format(table=table)\n \n results = connection.execute(sql)\n for result in results:\n", "issue": "Postgresql 8.4 error when running paster db init\nWhen running the paster db init command with the CKAN 2.0 beta, there is an error encountered that appears to be related to use of the left() string function in ckan/migration/versions/067_turn_extras_to_strings.py. According to the documentation and my own simple test, this function is not support in Postgresql 8.4. For a stack trace, see: https://gist.github.com/thriuin/5067819.\n\nIs there a new minimum version of Postgresql required -- documentation still says 8.4 which unfortunately is what comes with RedHat Enterprise.\n\n", "before_files": [{"content": "import json\n\ndef upgrade(migrate_engine):\n\n with migrate_engine.begin() as connection:\n tables = 'package_extra group_extra'\n revision_tables = 'package_extra_revision group_extra_revision'\n\n for table in tables.split():\n sql = \"\"\"select id, value from {table} where left(value,1) = '\"' \"\"\".format(table=table)\n results = connection.execute(sql)\n for result in results:\n id, value = result\n update_sql = 'update {table} set value = %s where id = %s'\n connection.execute(update_sql.format(table=table),\n json.loads(value), id)\n\n for table in revision_tables.split():\n sql = \"\"\"select id, revision_id, value from {table} where left(value,1) = '\"' \"\"\".format(table=table)\n\n results = connection.execute(sql)\n for result in results:\n id, revision_id, value = result\n update_sql = 'update {table} set value = %s where id = %s and revision_id = %s'\n connection.execute(update_sql.format(table=table),\n json.loads(value), id, revision_id)\n\n\n", "path": "ckan/migration/versions/067_turn_extras_to_strings.py"}]} | 992 | 291 |
gh_patches_debug_5351 | rasdani/github-patches | git_diff | coala__coala-2795 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make exception tracebacks default
Instead of asking the user to run coala with `-L DEBUG`
</issue>
<code>
[start of coalib/output/printers/LogPrinter.py]
1 import traceback
2
3 from pyprint.ColorPrinter import ColorPrinter
4
5 from coalib.output.printers.LOG_LEVEL import LOG_LEVEL, LOG_LEVEL_COLORS
6 from coalib.processes.communication.LogMessage import LogMessage
7
8
9 class LogPrinter:
10 """
11 The LogPrinter class allows to print log messages to an underlying Printer.
12
13 This class is an adapter, means you can create a LogPrinter from every
14 existing Printer instance.
15 """
16
17 def __init__(self,
18 printer,
19 log_level=LOG_LEVEL.INFO,
20 timestamp_format="%X"):
21 """
22 Creates a new log printer from an existing Printer.
23
24 :param printer: The underlying Printer where log messages
25 shall be written to. If you inherit from
26 LogPrinter, set it to self.
27 :param log_level: The minimum log level, everything below will
28 not be logged.
29 :param timestamp_format: The format string for the
30 datetime.today().strftime(format) method.
31 """
32 self._printer = printer
33 self.log_level = log_level
34 self.timestamp_format = timestamp_format
35
36 @property
37 def printer(self):
38 """
39 Returns the underlying printer where logs are printed to.
40 """
41 return self._printer
42
43 def _get_log_prefix(self, log_level, timestamp):
44 datetime_string = timestamp.strftime(self.timestamp_format)
45
46 if datetime_string != "":
47 datetime_string = "[" + datetime_string + "]"
48
49 return '[{}]{}'.format(LOG_LEVEL.reverse.get(log_level, "ERROR"),
50 datetime_string)
51
52 def debug(self, *messages, delimiter=" ", timestamp=None, **kwargs):
53 self.log_message(LogMessage(LOG_LEVEL.DEBUG,
54 *messages,
55 delimiter=delimiter,
56 timestamp=timestamp),
57 **kwargs)
58
59 def info(self, *messages, delimiter=" ", timestamp=None, **kwargs):
60 self.log_message(LogMessage(LOG_LEVEL.INFO,
61 *messages,
62 delimiter=delimiter,
63 timestamp=timestamp),
64 **kwargs)
65
66 def warn(self, *messages, delimiter=" ", timestamp=None, **kwargs):
67 self.log_message(LogMessage(LOG_LEVEL.WARNING,
68 *messages,
69 delimiter=delimiter,
70 timestamp=timestamp),
71 **kwargs)
72
73 def err(self, *messages, delimiter=" ", timestamp=None, **kwargs):
74 self.log_message(LogMessage(LOG_LEVEL.ERROR,
75 *messages,
76 delimiter=delimiter,
77 timestamp=timestamp),
78 **kwargs)
79
80 def log(self, log_level, message, timestamp=None, **kwargs):
81 self.log_message(LogMessage(log_level,
82 message,
83 timestamp=timestamp),
84 **kwargs)
85
86 def log_exception(self,
87 message,
88 exception,
89 log_level=LOG_LEVEL.ERROR,
90 timestamp=None,
91 **kwargs):
92 """
93 If the log_level of the printer is greater than DEBUG, it prints
94 only the message. If it is DEBUG or lower, it shows the message
95 along with the traceback of the exception.
96
97 :param message: The message to print.
98 :param exception: The exception to print.
99 :param log_level: The log_level of this message (not used when
100 logging the traceback. Tracebacks always have
101 a level of DEBUG).
102 :param timestamp: The time at which this log occurred. Defaults to
103 the current time.
104 :param kwargs: Keyword arguments to be passed when logging the
105 message (not used when logging the traceback).
106 """
107 if not isinstance(exception, BaseException):
108 raise TypeError("log_exception can only log derivatives of "
109 "BaseException.")
110
111 traceback_str = "\n".join(
112 traceback.format_exception(type(exception),
113 exception,
114 exception.__traceback__))
115
116 self.log(log_level, message, timestamp=timestamp, **kwargs)
117 self.log_message(
118 LogMessage(LOG_LEVEL.DEBUG,
119 "Exception was:" + "\n" + traceback_str,
120 timestamp=timestamp),
121 **kwargs)
122
123 def log_message(self, log_message, **kwargs):
124 if not isinstance(log_message, LogMessage):
125 raise TypeError("log_message should be of type LogMessage.")
126
127 if log_message.log_level < self.log_level:
128 return
129
130 self._print_log_message(
131 self._get_log_prefix(log_message.log_level, log_message.timestamp),
132 log_message,
133 **kwargs)
134
135 def _print_log_message(self, prefix, log_message, **kwargs):
136 """
137 Override this if you want to influence how the log message is printed.
138
139 If the underlying printer is a ColorPrinter, then colored logging is
140 used. You can turn it off in the underlying ColorPrinter if you want to
141 print uncolored.
142
143 :param prefix: The prefix to print (as string).
144 :param log_message: The LogMessage object to print.
145 :param kwargs: Any other keyword arguments.
146 """
147 if isinstance(self._printer, ColorPrinter):
148 self.printer.print(prefix,
149 end=" ",
150 color=LOG_LEVEL_COLORS[log_message.log_level],
151 **kwargs)
152 self.printer.print(log_message.message, **kwargs)
153 else:
154 self.printer.print(prefix, log_message.message, **kwargs)
155
[end of coalib/output/printers/LogPrinter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/coalib/output/printers/LogPrinter.py b/coalib/output/printers/LogPrinter.py
--- a/coalib/output/printers/LogPrinter.py
+++ b/coalib/output/printers/LogPrinter.py
@@ -115,7 +115,7 @@
self.log(log_level, message, timestamp=timestamp, **kwargs)
self.log_message(
- LogMessage(LOG_LEVEL.DEBUG,
+ LogMessage(LOG_LEVEL.INFO,
"Exception was:" + "\n" + traceback_str,
timestamp=timestamp),
**kwargs)
| {"golden_diff": "diff --git a/coalib/output/printers/LogPrinter.py b/coalib/output/printers/LogPrinter.py\n--- a/coalib/output/printers/LogPrinter.py\n+++ b/coalib/output/printers/LogPrinter.py\n@@ -115,7 +115,7 @@\n \n self.log(log_level, message, timestamp=timestamp, **kwargs)\n self.log_message(\n- LogMessage(LOG_LEVEL.DEBUG,\n+ LogMessage(LOG_LEVEL.INFO,\n \"Exception was:\" + \"\\n\" + traceback_str,\n timestamp=timestamp),\n **kwargs)\n", "issue": "Make exception tracebacks default\nInstead of asking the user to run coala with `-L DEBUG`\n\n", "before_files": [{"content": "import traceback\n\nfrom pyprint.ColorPrinter import ColorPrinter\n\nfrom coalib.output.printers.LOG_LEVEL import LOG_LEVEL, LOG_LEVEL_COLORS\nfrom coalib.processes.communication.LogMessage import LogMessage\n\n\nclass LogPrinter:\n \"\"\"\n The LogPrinter class allows to print log messages to an underlying Printer.\n\n This class is an adapter, means you can create a LogPrinter from every\n existing Printer instance.\n \"\"\"\n\n def __init__(self,\n printer,\n log_level=LOG_LEVEL.INFO,\n timestamp_format=\"%X\"):\n \"\"\"\n Creates a new log printer from an existing Printer.\n\n :param printer: The underlying Printer where log messages\n shall be written to. If you inherit from\n LogPrinter, set it to self.\n :param log_level: The minimum log level, everything below will\n not be logged.\n :param timestamp_format: The format string for the\n datetime.today().strftime(format) method.\n \"\"\"\n self._printer = printer\n self.log_level = log_level\n self.timestamp_format = timestamp_format\n\n @property\n def printer(self):\n \"\"\"\n Returns the underlying printer where logs are printed to.\n \"\"\"\n return self._printer\n\n def _get_log_prefix(self, log_level, timestamp):\n datetime_string = timestamp.strftime(self.timestamp_format)\n\n if datetime_string != \"\":\n datetime_string = \"[\" + datetime_string + \"]\"\n\n return '[{}]{}'.format(LOG_LEVEL.reverse.get(log_level, \"ERROR\"),\n datetime_string)\n\n def debug(self, *messages, delimiter=\" \", timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.DEBUG,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def info(self, *messages, delimiter=\" \", timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.INFO,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def warn(self, *messages, delimiter=\" \", timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.WARNING,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def err(self, *messages, delimiter=\" \", timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.ERROR,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def log(self, log_level, message, timestamp=None, **kwargs):\n self.log_message(LogMessage(log_level,\n message,\n timestamp=timestamp),\n **kwargs)\n\n def log_exception(self,\n message,\n exception,\n log_level=LOG_LEVEL.ERROR,\n timestamp=None,\n **kwargs):\n \"\"\"\n If the log_level of the printer is greater than DEBUG, it prints\n only the message. If it is DEBUG or lower, it shows the message\n along with the traceback of the exception.\n\n :param message: The message to print.\n :param exception: The exception to print.\n :param log_level: The log_level of this message (not used when\n logging the traceback. Tracebacks always have\n a level of DEBUG).\n :param timestamp: The time at which this log occurred. Defaults to\n the current time.\n :param kwargs: Keyword arguments to be passed when logging the\n message (not used when logging the traceback).\n \"\"\"\n if not isinstance(exception, BaseException):\n raise TypeError(\"log_exception can only log derivatives of \"\n \"BaseException.\")\n\n traceback_str = \"\\n\".join(\n traceback.format_exception(type(exception),\n exception,\n exception.__traceback__))\n\n self.log(log_level, message, timestamp=timestamp, **kwargs)\n self.log_message(\n LogMessage(LOG_LEVEL.DEBUG,\n \"Exception was:\" + \"\\n\" + traceback_str,\n timestamp=timestamp),\n **kwargs)\n\n def log_message(self, log_message, **kwargs):\n if not isinstance(log_message, LogMessage):\n raise TypeError(\"log_message should be of type LogMessage.\")\n\n if log_message.log_level < self.log_level:\n return\n\n self._print_log_message(\n self._get_log_prefix(log_message.log_level, log_message.timestamp),\n log_message,\n **kwargs)\n\n def _print_log_message(self, prefix, log_message, **kwargs):\n \"\"\"\n Override this if you want to influence how the log message is printed.\n\n If the underlying printer is a ColorPrinter, then colored logging is\n used. You can turn it off in the underlying ColorPrinter if you want to\n print uncolored.\n\n :param prefix: The prefix to print (as string).\n :param log_message: The LogMessage object to print.\n :param kwargs: Any other keyword arguments.\n \"\"\"\n if isinstance(self._printer, ColorPrinter):\n self.printer.print(prefix,\n end=\" \",\n color=LOG_LEVEL_COLORS[log_message.log_level],\n **kwargs)\n self.printer.print(log_message.message, **kwargs)\n else:\n self.printer.print(prefix, log_message.message, **kwargs)\n", "path": "coalib/output/printers/LogPrinter.py"}]} | 2,008 | 126 |
gh_patches_debug_21305 | rasdani/github-patches | git_diff | pre-commit__pre-commit-335 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Latest virtualenv breaks pre-commit
See also #299
Failure looks like:
```
17:00:19 hookid: sort-simple-yaml
17:00:19
17:00:19 bash: /nail/home/push/.pre-commit/reposkzFrD//tmp/tmp.cEk6TCoZOS/srv-configs/py_env-default/bin/activate: No such file or directory
```
```
$ pip install virtualenv --upgrade
Downloading/unpacking virtualenv
Downloading virtualenv-14.0.0-py2.py3-none-any.whl (1.8MB): 1.8MB downloaded
Installing collected packages: virtualenv
Successfully installed virtualenv
Cleaning up...
$ python
Python 2.6.7 (r267:88850, Dec 2 2011, 20:27:26)
[GCC 4.4.3] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import virtualenv
>>> virtualenv.path_locations('foo')
('/nail/home/asottile/foo', '/nail/home/asottile/foo/lib/python2.6', '/nail/home/asottile/foo/include/python2.6', '/nail/home/asottile/foo/bin')
>>>
$ pip install virtualenv==1.11.5
Downloading/unpacking virtualenv==1.11.5
Downloading virtualenv-1.11.5.tar.gz (1.8MB): 1.8MB downloaded
Running setup.py (path:/nail/home/asottile/venv/build/virtualenv/setup.py) egg_info for package virtualenv
warning: no previously-included files matching '*' found under directory 'docs/_templates'
warning: no previously-included files matching '*' found under directory 'docs/_build'
Installing collected packages: virtualenv
Found existing installation: virtualenv 14.0.0
Uninstalling virtualenv:
Successfully uninstalled virtualenv
Running setup.py install for virtualenv
warning: no previously-included files matching '*' found under directory 'docs/_templates'
warning: no previously-included files matching '*' found under directory 'docs/_build'
Installing virtualenv script to /nail/home/asottile/venv/bin
Installing virtualenv-2.6 script to /nail/home/asottile/venv/bin
Successfully installed virtualenv
Cleaning up...
$ python
Python 2.6.7 (r267:88850, Dec 2 2011, 20:27:26)
[GCC 4.4.3] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import virtualenv
>>> virtualenv.path_locations('foo')
('foo', 'foo/lib/python2.6', 'foo/include/python2.6', 'foo/bin')
>>>
```
</issue>
<code>
[start of pre_commit/languages/python.py]
1 from __future__ import unicode_literals
2
3 import contextlib
4 import distutils.spawn
5 import os
6 import sys
7
8 import virtualenv
9
10 from pre_commit.languages import helpers
11 from pre_commit.util import clean_path_on_failure
12 from pre_commit.util import shell_escape
13
14
15 ENVIRONMENT_DIR = 'py_env'
16
17
18 class PythonEnv(helpers.Environment):
19 @property
20 def env_prefix(self):
21 return ". '{{prefix}}{0}activate' &&".format(
22 virtualenv.path_locations(
23 helpers.environment_dir(ENVIRONMENT_DIR, self.language_version)
24 )[-1].rstrip(os.sep) + os.sep,
25 )
26
27
28 @contextlib.contextmanager
29 def in_env(repo_cmd_runner, language_version):
30 yield PythonEnv(repo_cmd_runner, language_version)
31
32
33 def norm_version(version):
34 if os.name == 'nt': # pragma: no cover (windows)
35 # Try looking up by name
36 if distutils.spawn.find_executable(version):
37 return version
38
39 # If it is in the form pythonx.x search in the default
40 # place on windows
41 if version.startswith('python'):
42 return r'C:\{0}\python.exe'.format(version.replace('.', ''))
43
44 # Otherwise assume it is a path
45 return os.path.expanduser(version)
46
47
48 def install_environment(
49 repo_cmd_runner,
50 version='default',
51 additional_dependencies=None,
52 ):
53 assert repo_cmd_runner.exists('setup.py')
54 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
55
56 # Install a virtualenv
57 with clean_path_on_failure(repo_cmd_runner.path(directory)):
58 venv_cmd = [
59 sys.executable, '-m', 'virtualenv',
60 '{{prefix}}{0}'.format(directory)
61 ]
62 if version != 'default':
63 venv_cmd.extend(['-p', norm_version(version)])
64 repo_cmd_runner.run(venv_cmd)
65 with in_env(repo_cmd_runner, version) as env:
66 env.run("cd '{prefix}' && pip install .", encoding=None)
67 if additional_dependencies:
68 env.run(
69 "cd '{prefix}' && pip install " +
70 ' '.join(
71 shell_escape(dep) for dep in additional_dependencies
72 ),
73 encoding=None,
74 )
75
76
77 def run_hook(repo_cmd_runner, hook, file_args):
78 with in_env(repo_cmd_runner, hook['language_version']) as env:
79 return helpers.run_hook(env, hook, file_args)
80
[end of pre_commit/languages/python.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py
--- a/pre_commit/languages/python.py
+++ b/pre_commit/languages/python.py
@@ -5,8 +5,6 @@
import os
import sys
-import virtualenv
-
from pre_commit.languages import helpers
from pre_commit.util import clean_path_on_failure
from pre_commit.util import shell_escape
@@ -15,13 +13,22 @@
ENVIRONMENT_DIR = 'py_env'
+def bin_dir(venv):
+ """On windows there's a different directory for the virtualenv"""
+ if os.name == 'nt': # pragma: no cover (windows)
+ return os.path.join(venv, 'Scripts')
+ else:
+ return os.path.join(venv, 'bin')
+
+
class PythonEnv(helpers.Environment):
@property
def env_prefix(self):
- return ". '{{prefix}}{0}activate' &&".format(
- virtualenv.path_locations(
+ return ". '{{prefix}}{0}{1}activate' &&".format(
+ bin_dir(
helpers.environment_dir(ENVIRONMENT_DIR, self.language_version)
- )[-1].rstrip(os.sep) + os.sep,
+ ),
+ os.sep,
)
| {"golden_diff": "diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py\n--- a/pre_commit/languages/python.py\n+++ b/pre_commit/languages/python.py\n@@ -5,8 +5,6 @@\n import os\n import sys\n \n-import virtualenv\n-\n from pre_commit.languages import helpers\n from pre_commit.util import clean_path_on_failure\n from pre_commit.util import shell_escape\n@@ -15,13 +13,22 @@\n ENVIRONMENT_DIR = 'py_env'\n \n \n+def bin_dir(venv):\n+ \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n+ if os.name == 'nt': # pragma: no cover (windows)\n+ return os.path.join(venv, 'Scripts')\n+ else:\n+ return os.path.join(venv, 'bin')\n+\n+\n class PythonEnv(helpers.Environment):\n @property\n def env_prefix(self):\n- return \". '{{prefix}}{0}activate' &&\".format(\n- virtualenv.path_locations(\n+ return \". '{{prefix}}{0}{1}activate' &&\".format(\n+ bin_dir(\n helpers.environment_dir(ENVIRONMENT_DIR, self.language_version)\n- )[-1].rstrip(os.sep) + os.sep,\n+ ),\n+ os.sep,\n )\n", "issue": "Latest virtualenv breaks pre-commit\nSee also #299 \n\nFailure looks like:\n\n```\n17:00:19 hookid: sort-simple-yaml\n17:00:19 \n17:00:19 bash: /nail/home/push/.pre-commit/reposkzFrD//tmp/tmp.cEk6TCoZOS/srv-configs/py_env-default/bin/activate: No such file or directory\n```\n\n```\n$ pip install virtualenv --upgrade\nDownloading/unpacking virtualenv\n Downloading virtualenv-14.0.0-py2.py3-none-any.whl (1.8MB): 1.8MB downloaded\nInstalling collected packages: virtualenv\nSuccessfully installed virtualenv\nCleaning up...\n$ python\nPython 2.6.7 (r267:88850, Dec 2 2011, 20:27:26) \n[GCC 4.4.3] on linux2\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> import virtualenv\n>>> virtualenv.path_locations('foo')\n('/nail/home/asottile/foo', '/nail/home/asottile/foo/lib/python2.6', '/nail/home/asottile/foo/include/python2.6', '/nail/home/asottile/foo/bin')\n>>> \n$ pip install virtualenv==1.11.5\nDownloading/unpacking virtualenv==1.11.5\n Downloading virtualenv-1.11.5.tar.gz (1.8MB): 1.8MB downloaded\n Running setup.py (path:/nail/home/asottile/venv/build/virtualenv/setup.py) egg_info for package virtualenv\n warning: no previously-included files matching '*' found under directory 'docs/_templates'\n warning: no previously-included files matching '*' found under directory 'docs/_build'\nInstalling collected packages: virtualenv\n Found existing installation: virtualenv 14.0.0\n Uninstalling virtualenv:\n Successfully uninstalled virtualenv\n Running setup.py install for virtualenv\n warning: no previously-included files matching '*' found under directory 'docs/_templates'\n warning: no previously-included files matching '*' found under directory 'docs/_build'\n Installing virtualenv script to /nail/home/asottile/venv/bin\n Installing virtualenv-2.6 script to /nail/home/asottile/venv/bin\nSuccessfully installed virtualenv\nCleaning up...\n$ python\nPython 2.6.7 (r267:88850, Dec 2 2011, 20:27:26) \n[GCC 4.4.3] on linux2\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> import virtualenv\n>>> virtualenv.path_locations('foo')\n('foo', 'foo/lib/python2.6', 'foo/include/python2.6', 'foo/bin')\n>>>\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport distutils.spawn\nimport os\nimport sys\n\nimport virtualenv\n\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import shell_escape\n\n\nENVIRONMENT_DIR = 'py_env'\n\n\nclass PythonEnv(helpers.Environment):\n @property\n def env_prefix(self):\n return \". '{{prefix}}{0}activate' &&\".format(\n virtualenv.path_locations(\n helpers.environment_dir(ENVIRONMENT_DIR, self.language_version)\n )[-1].rstrip(os.sep) + os.sep,\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner, language_version):\n yield PythonEnv(repo_cmd_runner, language_version)\n\n\ndef norm_version(version):\n if os.name == 'nt': # pragma: no cover (windows)\n # Try looking up by name\n if distutils.spawn.find_executable(version):\n return version\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n return r'C:\\{0}\\python.exe'.format(version.replace('.', ''))\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\ndef install_environment(\n repo_cmd_runner,\n version='default',\n additional_dependencies=None,\n):\n assert repo_cmd_runner.exists('setup.py')\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n # Install a virtualenv\n with clean_path_on_failure(repo_cmd_runner.path(directory)):\n venv_cmd = [\n sys.executable, '-m', 'virtualenv',\n '{{prefix}}{0}'.format(directory)\n ]\n if version != 'default':\n venv_cmd.extend(['-p', norm_version(version)])\n repo_cmd_runner.run(venv_cmd)\n with in_env(repo_cmd_runner, version) as env:\n env.run(\"cd '{prefix}' && pip install .\", encoding=None)\n if additional_dependencies:\n env.run(\n \"cd '{prefix}' && pip install \" +\n ' '.join(\n shell_escape(dep) for dep in additional_dependencies\n ),\n encoding=None,\n )\n\n\ndef run_hook(repo_cmd_runner, hook, file_args):\n with in_env(repo_cmd_runner, hook['language_version']) as env:\n return helpers.run_hook(env, hook, file_args)\n", "path": "pre_commit/languages/python.py"}]} | 1,863 | 279 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.