problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_37530 | rasdani/github-patches | git_diff | keras-team__autokeras-286 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a class to generate an MLP
<!---
Please label your issue with `new_task_module`.
-->
### Suggested Name
<!---
-->
MlpGenerator
### Task Description
<!---
A clear and concise description of the machine learning task to be added, its problem statement and learning outcome.
-->
Add a class named MlpGenerator. Create a superclass that would be inherited by CnnGenerator and MlpGenerator.
</issue>
<code>
[start of autokeras/constant.py]
1 class Constant:
2 # Data
3
4 VALIDATION_SET_SIZE = 0.08333
5
6 # Searcher
7
8 MAX_MODEL_NUM = 1000
9 BETA = 2.576
10 KERNEL_LAMBDA = 0.1
11 T_MIN = 0.0001
12 N_NEIGHBOURS = 8
13 MAX_MODEL_SIZE = (1 << 25)
14 MAX_LAYER_WIDTH = 4096
15 MAX_LAYERS = 100
16
17 # Model Defaults
18
19 DENSE_DROPOUT_RATE = 0.5
20 CONV_DROPOUT_RATE = 0.25
21 CONV_BLOCK_DISTANCE = 2
22 DENSE_BLOCK_DISTANCE = 1
23 MODEL_LEN = 3
24 MODEL_WIDTH = 64
25
26 # ModelTrainer
27
28 DATA_AUGMENTATION = True
29 MAX_ITER_NUM = 200
30 MIN_LOSS_DEC = 1e-4
31 MAX_NO_IMPROVEMENT_NUM = 5
32 MAX_BATCH_SIZE = 128
33 LIMIT_MEMORY = False
34 SEARCH_MAX_ITER = 200
35
36 # text preprocessor
37
38 EMBEDDING_DIM = 100
39 MAX_SEQUENCE_LENGTH = 400
40 MAX_NB_WORDS = 5000
41 EXTRACT_PATH = "glove/"
42 # Download file name
43 FILE_PATH = "glove.zip"
44 PRE_TRAIN_FILE_LINK = "http://nlp.stanford.edu/data/glove.6B.zip"
45 PRE_TRAIN_FILE_NAME = "glove.6B.100d.txt"
46
[end of autokeras/constant.py]
[start of autokeras/nn/generator.py]
1 from autokeras.constant import Constant
2 from autokeras.nn.graph import Graph
3 from autokeras.nn.layers import StubBatchNormalization, StubConv, StubDropout, StubPooling, StubDense, StubFlatten, \
4 StubReLU, StubGlobalPooling
5
6
7 class CnnGenerator:
8 def __init__(self, n_output_node, input_shape):
9 self.n_output_node = n_output_node
10 self.input_shape = input_shape
11 if len(self.input_shape) > 4:
12 raise ValueError('The input dimension is too high.')
13 if len(self.input_shape) < 2:
14 raise ValueError('The input dimension is too low.')
15
16 def generate(self, model_len=Constant.MODEL_LEN, model_width=Constant.MODEL_WIDTH):
17 pooling_len = int(model_len / 4)
18 graph = Graph(self.input_shape, False)
19 temp_input_channel = self.input_shape[-1]
20 output_node_id = 0
21 for i in range(model_len):
22 output_node_id = graph.add_layer(StubReLU(), output_node_id)
23 output_node_id = graph.add_layer(StubConv(temp_input_channel, model_width, kernel_size=3), output_node_id)
24 output_node_id = graph.add_layer(StubBatchNormalization(model_width), output_node_id)
25 temp_input_channel = model_width
26 if pooling_len == 0 or ((i + 1) % pooling_len == 0 and i != model_len - 1):
27 output_node_id = graph.add_layer(StubPooling(), output_node_id)
28
29 output_node_id = graph.add_layer(StubGlobalPooling(), output_node_id)
30 output_node_id = graph.add_layer(StubDropout(Constant.CONV_DROPOUT_RATE), output_node_id)
31 output_node_id = graph.add_layer(StubDense(graph.node_list[output_node_id].shape[0], model_width),
32 output_node_id)
33 output_node_id = graph.add_layer(StubReLU(), output_node_id)
34 graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id)
35 return graph
36
[end of autokeras/nn/generator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/autokeras/constant.py b/autokeras/constant.py
--- a/autokeras/constant.py
+++ b/autokeras/constant.py
@@ -18,9 +18,12 @@
DENSE_DROPOUT_RATE = 0.5
CONV_DROPOUT_RATE = 0.25
+ MLP_DROPOUT_RATE = 0.25
CONV_BLOCK_DISTANCE = 2
DENSE_BLOCK_DISTANCE = 1
MODEL_LEN = 3
+ MLP_MODEL_LEN = 3
+ MLP_MODEL_WIDTH = 5
MODEL_WIDTH = 64
# ModelTrainer
diff --git a/autokeras/nn/generator.py b/autokeras/nn/generator.py
--- a/autokeras/nn/generator.py
+++ b/autokeras/nn/generator.py
@@ -2,12 +2,22 @@
from autokeras.nn.graph import Graph
from autokeras.nn.layers import StubBatchNormalization, StubConv, StubDropout, StubPooling, StubDense, StubFlatten, \
StubReLU, StubGlobalPooling
+from abc import abstractmethod
-class CnnGenerator:
+class NetworkGenerator:
def __init__(self, n_output_node, input_shape):
self.n_output_node = n_output_node
self.input_shape = input_shape
+
+ @abstractmethod
+ def generate(self, model_len, model_width):
+ pass
+
+
+class CnnGenerator(NetworkGenerator):
+ def __init__(self, n_output_node, input_shape):
+ super(CnnGenerator, self).__init__(n_output_node, input_shape)
if len(self.input_shape) > 4:
raise ValueError('The input dimension is too high.')
if len(self.input_shape) < 2:
@@ -33,3 +43,28 @@
output_node_id = graph.add_layer(StubReLU(), output_node_id)
graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id)
return graph
+
+
+class MlpGenerator(NetworkGenerator):
+ def __init__(self, n_output_node, input_shape):
+ super(MlpGenerator, self).__init__(n_output_node, input_shape)
+ if len(self.input_shape) > 1:
+ raise ValueError('The input dimension is too high.')
+
+ def generate(self, model_len=Constant.MLP_MODEL_LEN, model_width=Constant.MLP_MODEL_WIDTH):
+ if type(model_width) is list and not len(model_width) == model_len:
+ raise ValueError('The length of \'model_width\' does not match \'model_len\'')
+ elif type(model_width) is int:
+ model_width = [model_width] * model_len
+
+ graph = Graph(self.input_shape[0], False)
+ output_node_id = 0
+ n_nodes_prev_layer = self.input_shape[0]
+ for width in model_width:
+ output_node_id = graph.add_layer(StubDense(n_nodes_prev_layer, width), output_node_id)
+ output_node_id = graph.add_layer(StubDropout(Constant.MLP_DROPOUT_RATE), output_node_id)
+ output_node_id = graph.add_layer(StubReLU(), output_node_id)
+ n_nodes_prev_layer = width
+
+ graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node), output_node_id)
+ return graph
| {"golden_diff": "diff --git a/autokeras/constant.py b/autokeras/constant.py\n--- a/autokeras/constant.py\n+++ b/autokeras/constant.py\n@@ -18,9 +18,12 @@\n \n DENSE_DROPOUT_RATE = 0.5\n CONV_DROPOUT_RATE = 0.25\n+ MLP_DROPOUT_RATE = 0.25\n CONV_BLOCK_DISTANCE = 2\n DENSE_BLOCK_DISTANCE = 1\n MODEL_LEN = 3\n+ MLP_MODEL_LEN = 3\n+ MLP_MODEL_WIDTH = 5\n MODEL_WIDTH = 64\n \n # ModelTrainer\ndiff --git a/autokeras/nn/generator.py b/autokeras/nn/generator.py\n--- a/autokeras/nn/generator.py\n+++ b/autokeras/nn/generator.py\n@@ -2,12 +2,22 @@\n from autokeras.nn.graph import Graph\n from autokeras.nn.layers import StubBatchNormalization, StubConv, StubDropout, StubPooling, StubDense, StubFlatten, \\\n StubReLU, StubGlobalPooling\n+from abc import abstractmethod\n \n \n-class CnnGenerator:\n+class NetworkGenerator:\n def __init__(self, n_output_node, input_shape):\n self.n_output_node = n_output_node\n self.input_shape = input_shape\n+\n+ @abstractmethod\n+ def generate(self, model_len, model_width):\n+ pass\n+\n+\n+class CnnGenerator(NetworkGenerator):\n+ def __init__(self, n_output_node, input_shape):\n+ super(CnnGenerator, self).__init__(n_output_node, input_shape)\n if len(self.input_shape) > 4:\n raise ValueError('The input dimension is too high.')\n if len(self.input_shape) < 2:\n@@ -33,3 +43,28 @@\n output_node_id = graph.add_layer(StubReLU(), output_node_id)\n graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id)\n return graph\n+\n+\n+class MlpGenerator(NetworkGenerator):\n+ def __init__(self, n_output_node, input_shape):\n+ super(MlpGenerator, self).__init__(n_output_node, input_shape)\n+ if len(self.input_shape) > 1:\n+ raise ValueError('The input dimension is too high.')\n+\n+ def generate(self, model_len=Constant.MLP_MODEL_LEN, model_width=Constant.MLP_MODEL_WIDTH):\n+ if type(model_width) is list and not len(model_width) == model_len:\n+ raise ValueError('The length of \\'model_width\\' does not match \\'model_len\\'')\n+ elif type(model_width) is int:\n+ model_width = [model_width] * model_len\n+\n+ graph = Graph(self.input_shape[0], False)\n+ output_node_id = 0\n+ n_nodes_prev_layer = self.input_shape[0]\n+ for width in model_width:\n+ output_node_id = graph.add_layer(StubDense(n_nodes_prev_layer, width), output_node_id)\n+ output_node_id = graph.add_layer(StubDropout(Constant.MLP_DROPOUT_RATE), output_node_id)\n+ output_node_id = graph.add_layer(StubReLU(), output_node_id)\n+ n_nodes_prev_layer = width\n+\n+ graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node), output_node_id)\n+ return graph\n", "issue": "Add a class to generate an MLP\n<!---\r\nPlease label your issue with `new_task_module`.\r\n-->\r\n\r\n### Suggested Name\r\n<!---\r\n-->\r\nMlpGenerator\r\n\r\n### Task Description\r\n<!---\r\nA clear and concise description of the machine learning task to be added, its problem statement and learning outcome.\r\n-->\r\nAdd a class named MlpGenerator. Create a superclass that would be inherited by CnnGenerator and MlpGenerator.\r\n\n", "before_files": [{"content": "class Constant:\n # Data\n\n VALIDATION_SET_SIZE = 0.08333\n\n # Searcher\n\n MAX_MODEL_NUM = 1000\n BETA = 2.576\n KERNEL_LAMBDA = 0.1\n T_MIN = 0.0001\n N_NEIGHBOURS = 8\n MAX_MODEL_SIZE = (1 << 25)\n MAX_LAYER_WIDTH = 4096\n MAX_LAYERS = 100\n\n # Model Defaults\n\n DENSE_DROPOUT_RATE = 0.5\n CONV_DROPOUT_RATE = 0.25\n CONV_BLOCK_DISTANCE = 2\n DENSE_BLOCK_DISTANCE = 1\n MODEL_LEN = 3\n MODEL_WIDTH = 64\n\n # ModelTrainer\n\n DATA_AUGMENTATION = True\n MAX_ITER_NUM = 200\n MIN_LOSS_DEC = 1e-4\n MAX_NO_IMPROVEMENT_NUM = 5\n MAX_BATCH_SIZE = 128\n LIMIT_MEMORY = False\n SEARCH_MAX_ITER = 200\n\n # text preprocessor\n\n EMBEDDING_DIM = 100\n MAX_SEQUENCE_LENGTH = 400\n MAX_NB_WORDS = 5000\n EXTRACT_PATH = \"glove/\"\n # Download file name\n FILE_PATH = \"glove.zip\"\n PRE_TRAIN_FILE_LINK = \"http://nlp.stanford.edu/data/glove.6B.zip\"\n PRE_TRAIN_FILE_NAME = \"glove.6B.100d.txt\"\n", "path": "autokeras/constant.py"}, {"content": "from autokeras.constant import Constant\nfrom autokeras.nn.graph import Graph\nfrom autokeras.nn.layers import StubBatchNormalization, StubConv, StubDropout, StubPooling, StubDense, StubFlatten, \\\n StubReLU, StubGlobalPooling\n\n\nclass CnnGenerator:\n def __init__(self, n_output_node, input_shape):\n self.n_output_node = n_output_node\n self.input_shape = input_shape\n if len(self.input_shape) > 4:\n raise ValueError('The input dimension is too high.')\n if len(self.input_shape) < 2:\n raise ValueError('The input dimension is too low.')\n\n def generate(self, model_len=Constant.MODEL_LEN, model_width=Constant.MODEL_WIDTH):\n pooling_len = int(model_len / 4)\n graph = Graph(self.input_shape, False)\n temp_input_channel = self.input_shape[-1]\n output_node_id = 0\n for i in range(model_len):\n output_node_id = graph.add_layer(StubReLU(), output_node_id)\n output_node_id = graph.add_layer(StubConv(temp_input_channel, model_width, kernel_size=3), output_node_id)\n output_node_id = graph.add_layer(StubBatchNormalization(model_width), output_node_id)\n temp_input_channel = model_width\n if pooling_len == 0 or ((i + 1) % pooling_len == 0 and i != model_len - 1):\n output_node_id = graph.add_layer(StubPooling(), output_node_id)\n\n output_node_id = graph.add_layer(StubGlobalPooling(), output_node_id)\n output_node_id = graph.add_layer(StubDropout(Constant.CONV_DROPOUT_RATE), output_node_id)\n output_node_id = graph.add_layer(StubDense(graph.node_list[output_node_id].shape[0], model_width),\n output_node_id)\n output_node_id = graph.add_layer(StubReLU(), output_node_id)\n graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id)\n return graph\n", "path": "autokeras/nn/generator.py"}]} | 1,604 | 767 |
gh_patches_debug_9892 | rasdani/github-patches | git_diff | docker__docker-py-2795 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
version requirements for cryptography should be consistent
Hi
It seems that version requirements for cryptography in setup.py and requirements.txt are not consistent
In setup.py, it is cryptography>=1.3.4
In requirements.txt, it is cryptography==3.2
Note that in pypi, the version of cryptography is always updating( now 3.4.6). Inconsistent version requirements will result in installing different version for cryptography if I use different ways of installation.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 from __future__ import print_function
3
4 import codecs
5 import os
6
7 from setuptools import find_packages
8 from setuptools import setup
9
10 ROOT_DIR = os.path.dirname(__file__)
11 SOURCE_DIR = os.path.join(ROOT_DIR)
12
13 requirements = [
14 'websocket-client >= 0.32.0',
15 'requests >= 2.14.2, != 2.18.0',
16 ]
17
18 extras_require = {
19 # win32 APIs if on Windows (required for npipe support)
20 ':sys_platform == "win32"': 'pywin32==227',
21
22 # If using docker-py over TLS, highly recommend this option is
23 # pip-installed or pinned.
24
25 # TODO: if pip installing both "requests" and "requests[security]", the
26 # extra package from the "security" option are not installed (see
27 # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
28 # installing the extra dependencies, install the following instead:
29 # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
30 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=1.3.4', 'idna>=2.0.0'],
31
32 # Only required when connecting using the ssh:// protocol
33 'ssh': ['paramiko>=2.4.2'],
34
35 }
36
37 version = None
38 exec(open('docker/version.py').read())
39
40 with open('./test-requirements.txt') as test_reqs_txt:
41 test_requirements = [line for line in test_reqs_txt]
42
43
44 long_description = ''
45 with codecs.open('./README.md', encoding='utf-8') as readme_md:
46 long_description = readme_md.read()
47
48 setup(
49 name="docker",
50 version=version,
51 description="A Python library for the Docker Engine API.",
52 long_description=long_description,
53 long_description_content_type='text/markdown',
54 url='https://github.com/docker/docker-py',
55 project_urls={
56 'Documentation': 'https://docker-py.readthedocs.io',
57 'Changelog': 'https://docker-py.readthedocs.io/en/stable/change-log.html', # noqa: E501
58 'Source': 'https://github.com/docker/docker-py',
59 'Tracker': 'https://github.com/docker/docker-py/issues',
60 },
61 packages=find_packages(exclude=["tests.*", "tests"]),
62 install_requires=requirements,
63 tests_require=test_requirements,
64 extras_require=extras_require,
65 python_requires='>=3.6',
66 zip_safe=False,
67 test_suite='tests',
68 classifiers=[
69 'Development Status :: 5 - Production/Stable',
70 'Environment :: Other Environment',
71 'Intended Audience :: Developers',
72 'Operating System :: OS Independent',
73 'Programming Language :: Python',
74 'Programming Language :: Python :: 3',
75 'Programming Language :: Python :: 3.6',
76 'Programming Language :: Python :: 3.7',
77 'Programming Language :: Python :: 3.8',
78 'Programming Language :: Python :: 3.9',
79 'Topic :: Software Development',
80 'Topic :: Utilities',
81 'License :: OSI Approved :: Apache Software License',
82 ],
83 maintainer='Joffrey F',
84 maintainer_email='[email protected]',
85 )
86
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,7 @@
# https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
# installing the extra dependencies, install the following instead:
# 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
- 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=1.3.4', 'idna>=2.0.0'],
+ 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.4.7', 'idna>=2.0.0'],
# Only required when connecting using the ssh:// protocol
'ssh': ['paramiko>=2.4.2'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,7 +27,7 @@\n # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of\n # installing the extra dependencies, install the following instead:\n # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'\n- 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=1.3.4', 'idna>=2.0.0'],\n+ 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.4.7', 'idna>=2.0.0'],\n \n # Only required when connecting using the ssh:// protocol\n 'ssh': ['paramiko>=2.4.2'],\n", "issue": "version requirements for cryptography should be consistent\nHi\r\nIt seems that version requirements for cryptography in setup.py and requirements.txt are not consistent\r\nIn setup.py, it is cryptography>=1.3.4\r\nIn requirements.txt, it is cryptography==3.2\r\nNote that in pypi, the version of cryptography is always updating( now 3.4.6). Inconsistent version requirements will result in installing different version for cryptography if I use different ways of installation. \n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport codecs\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'websocket-client >= 0.32.0',\n 'requests >= 2.14.2, != 2.18.0',\n]\n\nextras_require = {\n # win32 APIs if on Windows (required for npipe support)\n ':sys_platform == \"win32\"': 'pywin32==227',\n\n # If using docker-py over TLS, highly recommend this option is\n # pip-installed or pinned.\n\n # TODO: if pip installing both \"requests\" and \"requests[security]\", the\n # extra package from the \"security\" option are not installed (see\n # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of\n # installing the extra dependencies, install the following instead:\n # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'\n 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=1.3.4', 'idna>=2.0.0'],\n\n # Only required when connecting using the ssh:// protocol\n 'ssh': ['paramiko>=2.4.2'],\n\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nlong_description = ''\nwith codecs.open('./README.md', encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nsetup(\n name=\"docker\",\n version=version,\n description=\"A Python library for the Docker Engine API.\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/docker/docker-py',\n project_urls={\n 'Documentation': 'https://docker-py.readthedocs.io',\n 'Changelog': 'https://docker-py.readthedocs.io/en/stable/change-log.html', # noqa: E501\n 'Source': 'https://github.com/docker/docker-py',\n 'Tracker': 'https://github.com/docker/docker-py/issues',\n },\n packages=find_packages(exclude=[\"tests.*\", \"tests\"]),\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n python_requires='>=3.6',\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n maintainer='Joffrey F',\n maintainer_email='[email protected]',\n)\n", "path": "setup.py"}]} | 1,534 | 206 |
gh_patches_debug_56080 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-3107 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]: typing miss of kwargs
### 🐛 Describe the bug
When I tried to train prompts with opt model, the following error was occurred.
```
Traceback (most recent call last):
File "/home/xxx/workspace/ColossalAI/applications/ChatGPT/examples/train_prompts.py", line 127, in <module>
main(args)
File "/home/xxx/workspace/ColossalAI/applications/ChatGPT/examples/train_prompts.py", line 42, in main
critic = OPTCritic(lora_rank=args.lora_rank).cuda()
File "/home/xxx/.pyenv/versions/3.9.9/lib/python3.9/site-packages/chatgpt/models/opt/opt_critic.py", line 38, in __init__
super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)
NameError: name 'kwargs' is not defined
```
To Reproduce
```
torchrun --standalone --nproc_per_node=2 train_prompts.py prompts.csv --model opt --strategy colossalai_zero2
```
### Environment
_No response_
</issue>
<code>
[start of applications/ChatGPT/chatgpt/models/opt/opt_critic.py]
1 from typing import Optional
2
3 import torch.nn as nn
4 from transformers.models.opt.configuration_opt import OPTConfig
5 from transformers.models.opt.modeling_opt import OPTModel
6
7 from ..base import Critic
8
9
10 class OPTCritic(Critic):
11 """
12 OPT Critic model.
13
14 Args:
15 pretrained (str): Pretrained model name or path.
16 config (OPTConfig): Model config.
17 checkpoint (bool): Enable gradient checkpointing.
18 lora_rank (int): Rank of the low-rank approximation.
19 lora_train_bias (str): LoRA bias training mode.
20 """
21
22 def __init__(self,
23 pretrained: Optional[str] = None,
24 config: Optional[OPTConfig] = None,
25 checkpoint: bool = False,
26 lora_rank: int = 0,
27 lora_train_bias: str = 'none',
28 **kargs) -> None:
29 if pretrained is not None:
30 model = OPTModel.from_pretrained(pretrained)
31 elif config is not None:
32 model = OPTModel(config)
33 else:
34 model = OPTModel(OPTConfig())
35 if checkpoint:
36 model.gradient_checkpointing_enable()
37 value_head = nn.Linear(model.config.hidden_size, 1)
38 super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)
39
[end of applications/ChatGPT/chatgpt/models/opt/opt_critic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py
--- a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py
+++ b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py
@@ -25,7 +25,7 @@
checkpoint: bool = False,
lora_rank: int = 0,
lora_train_bias: str = 'none',
- **kargs) -> None:
+ **kwargs) -> None:
if pretrained is not None:
model = OPTModel.from_pretrained(pretrained)
elif config is not None:
| {"golden_diff": "diff --git a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py\n--- a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py\n+++ b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py\n@@ -25,7 +25,7 @@\n checkpoint: bool = False,\n lora_rank: int = 0,\n lora_train_bias: str = 'none',\n- **kargs) -> None:\n+ **kwargs) -> None:\n if pretrained is not None:\n model = OPTModel.from_pretrained(pretrained)\n elif config is not None:\n", "issue": "[BUG]: typing miss of kwargs\n### \ud83d\udc1b Describe the bug\n\nWhen I tried to train prompts with opt model, the following error was occurred.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/xxx/workspace/ColossalAI/applications/ChatGPT/examples/train_prompts.py\", line 127, in <module>\r\n main(args)\r\n File \"/home/xxx/workspace/ColossalAI/applications/ChatGPT/examples/train_prompts.py\", line 42, in main\r\n critic = OPTCritic(lora_rank=args.lora_rank).cuda()\r\n File \"/home/xxx/.pyenv/versions/3.9.9/lib/python3.9/site-packages/chatgpt/models/opt/opt_critic.py\", line 38, in __init__\r\n super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)\r\nNameError: name 'kwargs' is not defined\r\n```\r\n\r\nTo Reproduce\r\n\r\n```\r\ntorchrun --standalone --nproc_per_node=2 train_prompts.py prompts.csv --model opt --strategy colossalai_zero2\r\n```\r\n\r\n\n\n### Environment\n\n_No response_\n", "before_files": [{"content": "from typing import Optional\n\nimport torch.nn as nn\nfrom transformers.models.opt.configuration_opt import OPTConfig\nfrom transformers.models.opt.modeling_opt import OPTModel\n\nfrom ..base import Critic\n\n\nclass OPTCritic(Critic):\n \"\"\"\n OPT Critic model.\n\n Args:\n pretrained (str): Pretrained model name or path.\n config (OPTConfig): Model config.\n checkpoint (bool): Enable gradient checkpointing.\n lora_rank (int): Rank of the low-rank approximation.\n lora_train_bias (str): LoRA bias training mode.\n \"\"\"\n\n def __init__(self,\n pretrained: Optional[str] = None,\n config: Optional[OPTConfig] = None,\n checkpoint: bool = False,\n lora_rank: int = 0,\n lora_train_bias: str = 'none',\n **kargs) -> None:\n if pretrained is not None:\n model = OPTModel.from_pretrained(pretrained)\n elif config is not None:\n model = OPTModel(config)\n else:\n model = OPTModel(OPTConfig())\n if checkpoint:\n model.gradient_checkpointing_enable()\n value_head = nn.Linear(model.config.hidden_size, 1)\n super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)\n", "path": "applications/ChatGPT/chatgpt/models/opt/opt_critic.py"}]} | 1,150 | 155 |
gh_patches_debug_9664 | rasdani/github-patches | git_diff | conda__conda-build-2271 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Set target in package section [feature request]
## Problem
Currently the only way to change the target platform is using the built in way of handling variants. This involves including an additional file `conda_build_config.yaml`. When you try to set `target` in the package section it is completely ignored and when you try to set `target` in the outputs section it throws an error. Something like:
```
Expecting win-64 got linux-64
```
## Request
Ideally we would like to be able to set target directly in the package section, but our needs could be met as long as that functionality is exposed and not bound to the use of `conda_build_config.yaml`. I took a look at doing this myself but I am unfamiliar with the code base and the reliance on `target_platform` seems to be entrenched in the variants logic and spread across quite a few files.
Please let me know what you think!
</issue>
<code>
[start of conda_build/tarcheck.py]
1 from __future__ import absolute_import, division, print_function
2
3 import json
4 from os.path import basename
5 import re
6 import tarfile
7
8 from conda_build.utils import codec
9
10
11 def dist_fn(fn):
12 if fn.endswith('.tar'):
13 return fn[:-4]
14 elif fn.endswith('.tar.bz2'):
15 return fn[:-8]
16 else:
17 raise Exception('did not expect filename: %r' % fn)
18
19
20 class TarCheck(object):
21 def __init__(self, path, config):
22 self.t = tarfile.open(path)
23 self.paths = set(m.path for m in self.t.getmembers())
24 self.dist = dist_fn(basename(path))
25 self.name, self.version, self.build = self.dist.split('::', 1)[-1].rsplit('-', 2)
26 self.config = config
27
28 def __enter__(self):
29 return self
30
31 def __exit__(self, e_type, e_value, traceback):
32 self.t.close()
33
34 def info_files(self):
35 if re.search('pyh[0-9a-f]{%d}_' % self.config.hash_length, self.build):
36 return
37 lista = [p.strip().decode('utf-8') for p in
38 self.t.extractfile('info/files').readlines()]
39 seta = set(lista)
40 if len(lista) != len(seta):
41 raise Exception('info/files: duplicates')
42
43 listb = [m.path for m in self.t.getmembers()
44 if not (m.path.startswith('info/') or m.isdir())]
45 setb = set(listb)
46 if len(listb) != len(setb):
47 raise Exception('info_files: duplicate members')
48
49 if seta == setb:
50 return
51 for p in sorted(seta | setb):
52 if p not in seta:
53 print('%r not in info/files' % p)
54 if p not in setb:
55 print('%r not in tarball' % p)
56 raise Exception('info/files')
57
58 def index_json(self):
59 info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))
60 for varname in 'name', 'version':
61 if info[varname] != getattr(self, varname):
62 raise Exception('%s: %r != %r' % (varname, info[varname],
63 getattr(self, varname)))
64 assert isinstance(info['build_number'], int)
65
66 def prefix_length(self):
67 prefix_length = None
68 if 'info/has_prefix' in self.t.getnames():
69 prefix_files = self.t.extractfile('info/has_prefix').readlines()
70 for line in prefix_files:
71 try:
72 prefix, file_type, _ = line.split()
73 # lines not conforming to the split
74 except ValueError:
75 continue
76 if hasattr(file_type, 'decode'):
77 file_type = file_type.decode(codec)
78 if file_type == 'binary':
79 prefix_length = len(prefix)
80 break
81 return prefix_length
82
83 def correct_subdir(self):
84 info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))
85 assert info['subdir'] in [self.config.host_subdir, 'noarch'], \
86 ("Inconsistent subdir in package - index.json expecting {0},"
87 " got {1}".format(self.config.host_subdir, info['subdir']))
88
89
90 def check_all(path, config):
91 x = TarCheck(path, config)
92 x.info_files()
93 x.index_json()
94 x.correct_subdir()
95 x.t.close()
96
97
98 def check_prefix_lengths(files, config):
99 lengths = {}
100 for f in files:
101 length = TarCheck(f, config).prefix_length()
102 if length and length < config.prefix_length:
103 lengths[f] = length
104 return lengths
105
[end of conda_build/tarcheck.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_build/tarcheck.py b/conda_build/tarcheck.py
--- a/conda_build/tarcheck.py
+++ b/conda_build/tarcheck.py
@@ -82,7 +82,7 @@
def correct_subdir(self):
info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))
- assert info['subdir'] in [self.config.host_subdir, 'noarch'], \
+ assert info['subdir'] in [self.config.host_subdir, 'noarch', self.config.target_subdir], \
("Inconsistent subdir in package - index.json expecting {0},"
" got {1}".format(self.config.host_subdir, info['subdir']))
| {"golden_diff": "diff --git a/conda_build/tarcheck.py b/conda_build/tarcheck.py\n--- a/conda_build/tarcheck.py\n+++ b/conda_build/tarcheck.py\n@@ -82,7 +82,7 @@\n \n def correct_subdir(self):\n info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))\n- assert info['subdir'] in [self.config.host_subdir, 'noarch'], \\\n+ assert info['subdir'] in [self.config.host_subdir, 'noarch', self.config.target_subdir], \\\n (\"Inconsistent subdir in package - index.json expecting {0},\"\n \" got {1}\".format(self.config.host_subdir, info['subdir']))\n", "issue": "Set target in package section [feature request]\n## Problem\r\n\r\nCurrently the only way to change the target platform is using the built in way of handling variants. This involves including an additional file `conda_build_config.yaml`. When you try to set `target` in the package section it is completely ignored and when you try to set `target` in the outputs section it throws an error. Something like:\r\n```\r\nExpecting win-64 got linux-64\r\n```\r\n\r\n## Request\r\n\r\nIdeally we would like to be able to set target directly in the package section, but our needs could be met as long as that functionality is exposed and not bound to the use of `conda_build_config.yaml`. I took a look at doing this myself but I am unfamiliar with the code base and the reliance on `target_platform` seems to be entrenched in the variants logic and spread across quite a few files.\r\n\r\nPlease let me know what you think!\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport json\nfrom os.path import basename\nimport re\nimport tarfile\n\nfrom conda_build.utils import codec\n\n\ndef dist_fn(fn):\n if fn.endswith('.tar'):\n return fn[:-4]\n elif fn.endswith('.tar.bz2'):\n return fn[:-8]\n else:\n raise Exception('did not expect filename: %r' % fn)\n\n\nclass TarCheck(object):\n def __init__(self, path, config):\n self.t = tarfile.open(path)\n self.paths = set(m.path for m in self.t.getmembers())\n self.dist = dist_fn(basename(path))\n self.name, self.version, self.build = self.dist.split('::', 1)[-1].rsplit('-', 2)\n self.config = config\n\n def __enter__(self):\n return self\n\n def __exit__(self, e_type, e_value, traceback):\n self.t.close()\n\n def info_files(self):\n if re.search('pyh[0-9a-f]{%d}_' % self.config.hash_length, self.build):\n return\n lista = [p.strip().decode('utf-8') for p in\n self.t.extractfile('info/files').readlines()]\n seta = set(lista)\n if len(lista) != len(seta):\n raise Exception('info/files: duplicates')\n\n listb = [m.path for m in self.t.getmembers()\n if not (m.path.startswith('info/') or m.isdir())]\n setb = set(listb)\n if len(listb) != len(setb):\n raise Exception('info_files: duplicate members')\n\n if seta == setb:\n return\n for p in sorted(seta | setb):\n if p not in seta:\n print('%r not in info/files' % p)\n if p not in setb:\n print('%r not in tarball' % p)\n raise Exception('info/files')\n\n def index_json(self):\n info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))\n for varname in 'name', 'version':\n if info[varname] != getattr(self, varname):\n raise Exception('%s: %r != %r' % (varname, info[varname],\n getattr(self, varname)))\n assert isinstance(info['build_number'], int)\n\n def prefix_length(self):\n prefix_length = None\n if 'info/has_prefix' in self.t.getnames():\n prefix_files = self.t.extractfile('info/has_prefix').readlines()\n for line in prefix_files:\n try:\n prefix, file_type, _ = line.split()\n # lines not conforming to the split\n except ValueError:\n continue\n if hasattr(file_type, 'decode'):\n file_type = file_type.decode(codec)\n if file_type == 'binary':\n prefix_length = len(prefix)\n break\n return prefix_length\n\n def correct_subdir(self):\n info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))\n assert info['subdir'] in [self.config.host_subdir, 'noarch'], \\\n (\"Inconsistent subdir in package - index.json expecting {0},\"\n \" got {1}\".format(self.config.host_subdir, info['subdir']))\n\n\ndef check_all(path, config):\n x = TarCheck(path, config)\n x.info_files()\n x.index_json()\n x.correct_subdir()\n x.t.close()\n\n\ndef check_prefix_lengths(files, config):\n lengths = {}\n for f in files:\n length = TarCheck(f, config).prefix_length()\n if length and length < config.prefix_length:\n lengths[f] = length\n return lengths\n", "path": "conda_build/tarcheck.py"}]} | 1,761 | 168 |
gh_patches_debug_21519 | rasdani/github-patches | git_diff | google__flax-596 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
QoL: better print for FrozenDict
The best way I'm aware of to get an overview of model shape is via `jax.tree_map(jnp.shape, params)`. FrozenDicts have no concept of pretty printing the way dicts do, so large models are unwieldy to parse at a glance.
</issue>
<code>
[start of flax/core/frozen_dict.py]
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Frozen Dictionary."""
16
17 from typing import Any, TypeVar, Mapping, Dict, Tuple
18
19 from flax import serialization
20 import jax
21
22
23 K = TypeVar('K')
24 V = TypeVar('V')
25
26
27 @jax.tree_util.register_pytree_node_class
28 class FrozenDict(Mapping[K, V]):
29 """An immutable variant of the Python dict."""
30 __slots__ = ('_dict', '_hash')
31
32 def __init__(self, *args, **kwargs):
33 # make sure the dict is as
34 xs = dict(*args, **kwargs)
35 self._dict = _prepare_freeze(xs)
36
37 self._hash = None
38
39 def __getitem__(self, key):
40 v = self._dict[key]
41 if isinstance(v, dict):
42 return FrozenDict(v)
43 return v
44
45 def __setitem__(self, key, value):
46 raise ValueError('FrozenDict is immutable.')
47
48 def __contains__(self, key):
49 return key in self._dict
50
51 def __iter__(self):
52 return iter(self._dict)
53
54 def __len__(self):
55 return len(self._dict)
56
57 def __repr__(self):
58 return 'FrozenDict(%r)' % self._dict
59
60 def __hash__(self):
61 if self._hash is None:
62 h = 0
63 for key, value in self.items():
64 h ^= hash((key, value))
65 self._hash = h
66 return self._hash
67
68 def copy(self, add_or_replace: Mapping[K, V]) -> 'FrozenDict[K, V]':
69 """Create a new FrozenDict with additional or replaced entries."""
70 return type(self)(self, **unfreeze(add_or_replace))
71
72 def items(self):
73 for key in self._dict:
74 yield (key, self[key])
75
76 def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:
77 """Create a new FrozenDict where one entry is removed.
78
79 Example::
80
81 state, params = variables.pop('params')
82
83 Args:
84 key: the key to remove from the dict
85 Returns:
86 A pair with the new FrozenDict and the removed value.
87 """
88 value = self[key]
89 new_dict = dict(self._dict)
90 new_dict.pop(key)
91 new_self = type(self)(new_dict)
92 return new_self, value
93
94 def unfreeze(self) -> Dict[K, V]:
95 return unfreeze(self)
96
97 def tree_flatten(self):
98 return (self._dict,), ()
99
100 @classmethod
101 def tree_unflatten(cls, _, data):
102 return cls(*data)
103
104
105 def _prepare_freeze(xs: Any) -> Any:
106 """Deep copy unfrozen dicts to make the dictionary FrozenDict safe."""
107 if isinstance(xs, FrozenDict):
108 # we can safely ref share the internal state of a FrozenDict
109 # because it is immutable.
110 return xs._dict # pylint: disable=protected-access
111 if not isinstance(xs, dict):
112 # return a leaf as is.
113 return xs
114 # recursively copy dictionary to avoid ref sharing
115 return {key: _prepare_freeze(val) for key, val in xs.items()}
116
117
118 def freeze(xs: Dict[K, V]) -> FrozenDict[K, V]:
119 """Freeze a nested dict.
120
121 Makes a nested `dict` immutable by transforming it into `FrozenDict`.
122 """
123 return FrozenDict(xs)
124
125
126 def unfreeze(x: FrozenDict[K, V]) -> Dict[K, V]:
127 """Unfreeze a FrozenDict.
128
129 Makes a mutable copy of a `FrozenDict` mutable by transforming
130 it into (nested) dict.
131 """
132 if not isinstance(x, (FrozenDict, dict)):
133 return x
134 ys = {}
135 for key, value in x.items():
136 ys[key] = unfreeze(value)
137 return ys
138
139
140 def _frozen_dict_state_dict(xs):
141 return {key: serialization.to_state_dict(value) for key, value in xs.items()}
142
143
144 def _restore_frozen_dict(xs, states):
145 return FrozenDict(
146 {key: serialization.from_state_dict(value, states[key])
147 for key, value in xs.items()})
148
149
150 serialization.register_serialization_state(
151 FrozenDict,
152 _frozen_dict_state_dict,
153 _restore_frozen_dict)
154
[end of flax/core/frozen_dict.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/core/frozen_dict.py b/flax/core/frozen_dict.py
--- a/flax/core/frozen_dict.py
+++ b/flax/core/frozen_dict.py
@@ -24,6 +24,14 @@
V = TypeVar('V')
+def _indent(x, num_spaces):
+ indent_str = ' ' * num_spaces
+ lines = x.split('\n')
+ assert lines[-1] == ''
+ # skip the final line because it's empty and should not be indented.
+ return '\n'.join(indent_str + line for line in lines[:-1]) + '\n'
+
+
@jax.tree_util.register_pytree_node_class
class FrozenDict(Mapping[K, V]):
"""An immutable variant of the Python dict."""
@@ -55,7 +63,21 @@
return len(self._dict)
def __repr__(self):
- return 'FrozenDict(%r)' % self._dict
+ return self.pretty_repr()
+
+ def pretty_repr(self, num_spaces=4):
+ """Returns an indented representation of the nested dictionary."""
+ def pretty_dict(x):
+ if not isinstance(x, dict):
+ return repr(x)
+ rep = ''
+ for key, val in x.items():
+ rep += f'{key}: {pretty_dict(val)},\n'
+ if rep:
+ return '{\n' + _indent(rep, num_spaces) + '}'
+ else:
+ return '{}'
+ return f'FrozenDict({pretty_dict(self._dict)})'
def __hash__(self):
if self._hash is None:
| {"golden_diff": "diff --git a/flax/core/frozen_dict.py b/flax/core/frozen_dict.py\n--- a/flax/core/frozen_dict.py\n+++ b/flax/core/frozen_dict.py\n@@ -24,6 +24,14 @@\n V = TypeVar('V')\n \n \n+def _indent(x, num_spaces):\n+ indent_str = ' ' * num_spaces\n+ lines = x.split('\\n')\n+ assert lines[-1] == ''\n+ # skip the final line because it's empty and should not be indented.\n+ return '\\n'.join(indent_str + line for line in lines[:-1]) + '\\n'\n+\n+\n @jax.tree_util.register_pytree_node_class\n class FrozenDict(Mapping[K, V]):\n \"\"\"An immutable variant of the Python dict.\"\"\"\n@@ -55,7 +63,21 @@\n return len(self._dict)\n \n def __repr__(self):\n- return 'FrozenDict(%r)' % self._dict\n+ return self.pretty_repr()\n+\n+ def pretty_repr(self, num_spaces=4):\n+ \"\"\"Returns an indented representation of the nested dictionary.\"\"\"\n+ def pretty_dict(x):\n+ if not isinstance(x, dict):\n+ return repr(x)\n+ rep = ''\n+ for key, val in x.items():\n+ rep += f'{key}: {pretty_dict(val)},\\n'\n+ if rep:\n+ return '{\\n' + _indent(rep, num_spaces) + '}'\n+ else:\n+ return '{}'\n+ return f'FrozenDict({pretty_dict(self._dict)})'\n \n def __hash__(self):\n if self._hash is None:\n", "issue": "QoL: better print for FrozenDict\nThe best way I'm aware of to get an overview of model shape is via `jax.tree_map(jnp.shape, params)`. FrozenDicts have no concept of pretty printing the way dicts do, so large models are unwieldy to parse at a glance. \n", "before_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Frozen Dictionary.\"\"\"\n\nfrom typing import Any, TypeVar, Mapping, Dict, Tuple\n\nfrom flax import serialization\nimport jax\n\n\nK = TypeVar('K')\nV = TypeVar('V')\n\n\[email protected]_util.register_pytree_node_class\nclass FrozenDict(Mapping[K, V]):\n \"\"\"An immutable variant of the Python dict.\"\"\"\n __slots__ = ('_dict', '_hash')\n\n def __init__(self, *args, **kwargs):\n # make sure the dict is as\n xs = dict(*args, **kwargs)\n self._dict = _prepare_freeze(xs)\n\n self._hash = None\n\n def __getitem__(self, key):\n v = self._dict[key]\n if isinstance(v, dict):\n return FrozenDict(v)\n return v\n\n def __setitem__(self, key, value):\n raise ValueError('FrozenDict is immutable.')\n\n def __contains__(self, key):\n return key in self._dict\n\n def __iter__(self):\n return iter(self._dict)\n\n def __len__(self):\n return len(self._dict)\n\n def __repr__(self):\n return 'FrozenDict(%r)' % self._dict\n\n def __hash__(self):\n if self._hash is None:\n h = 0\n for key, value in self.items():\n h ^= hash((key, value))\n self._hash = h\n return self._hash\n\n def copy(self, add_or_replace: Mapping[K, V]) -> 'FrozenDict[K, V]':\n \"\"\"Create a new FrozenDict with additional or replaced entries.\"\"\"\n return type(self)(self, **unfreeze(add_or_replace))\n\n def items(self):\n for key in self._dict:\n yield (key, self[key])\n\n def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:\n \"\"\"Create a new FrozenDict where one entry is removed.\n\n Example::\n\n state, params = variables.pop('params')\n\n Args:\n key: the key to remove from the dict\n Returns:\n A pair with the new FrozenDict and the removed value.\n \"\"\"\n value = self[key]\n new_dict = dict(self._dict)\n new_dict.pop(key)\n new_self = type(self)(new_dict)\n return new_self, value\n\n def unfreeze(self) -> Dict[K, V]:\n return unfreeze(self)\n\n def tree_flatten(self):\n return (self._dict,), ()\n\n @classmethod\n def tree_unflatten(cls, _, data):\n return cls(*data)\n\n\ndef _prepare_freeze(xs: Any) -> Any:\n \"\"\"Deep copy unfrozen dicts to make the dictionary FrozenDict safe.\"\"\"\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}\n\n\ndef freeze(xs: Dict[K, V]) -> FrozenDict[K, V]:\n \"\"\"Freeze a nested dict.\n\n Makes a nested `dict` immutable by transforming it into `FrozenDict`.\n \"\"\"\n return FrozenDict(xs)\n\n\ndef unfreeze(x: FrozenDict[K, V]) -> Dict[K, V]:\n \"\"\"Unfreeze a FrozenDict.\n\n Makes a mutable copy of a `FrozenDict` mutable by transforming\n it into (nested) dict.\n \"\"\"\n if not isinstance(x, (FrozenDict, dict)):\n return x\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n\n\ndef _frozen_dict_state_dict(xs):\n return {key: serialization.to_state_dict(value) for key, value in xs.items()}\n\n\ndef _restore_frozen_dict(xs, states):\n return FrozenDict(\n {key: serialization.from_state_dict(value, states[key])\n for key, value in xs.items()})\n\n\nserialization.register_serialization_state(\n FrozenDict,\n _frozen_dict_state_dict,\n _restore_frozen_dict)\n", "path": "flax/core/frozen_dict.py"}]} | 2,025 | 362 |
gh_patches_debug_11172 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-2464 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enhance parser "satellite_installer_configurations"
As a default, the file "/etc/foreman-installer/custom-hiera.yaml" is empty, it means customers haven't done any tuning, it shouldn't be skipped.
</issue>
<code>
[start of insights/parsers/satellite_installer_configurations.py]
1 """
2 Satellite installer configuration files
3 =======================================
4
5 Parsers included in this module are:
6
7 CustomHiera - file ``/etc/foreman-installer/custom-hiera.yaml``
8 ---------------------------------------------------------------
9 Parsers the file `/etc/foreman-installer/custom-hiera.yaml`
10
11 """
12
13 from insights import parser, YAMLParser
14 from insights.specs import Specs
15
16
17 @parser(Specs.satellite_custom_hiera)
18 class CustomHiera(YAMLParser):
19 """
20 Class to parse ``/etc/foreman-installer/custom-hiera.yaml``
21
22 Examples:
23 >>> 'apache::mod::prefork::serverlimit' in custom_hiera
24 True
25 >>> custom_hiera['apache::mod::prefork::serverlimit']
26 582
27 """
28
29 pass
30
[end of insights/parsers/satellite_installer_configurations.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/insights/parsers/satellite_installer_configurations.py b/insights/parsers/satellite_installer_configurations.py
--- a/insights/parsers/satellite_installer_configurations.py
+++ b/insights/parsers/satellite_installer_configurations.py
@@ -12,6 +12,7 @@
from insights import parser, YAMLParser
from insights.specs import Specs
+from insights.parsers import SkipException
@parser(Specs.satellite_custom_hiera)
@@ -25,5 +26,8 @@
>>> custom_hiera['apache::mod::prefork::serverlimit']
582
"""
-
- pass
+ def parse_content(self, content):
+ try:
+ super(CustomHiera, self).parse_content(content)
+ except SkipException:
+ pass
| {"golden_diff": "diff --git a/insights/parsers/satellite_installer_configurations.py b/insights/parsers/satellite_installer_configurations.py\n--- a/insights/parsers/satellite_installer_configurations.py\n+++ b/insights/parsers/satellite_installer_configurations.py\n@@ -12,6 +12,7 @@\n \n from insights import parser, YAMLParser\n from insights.specs import Specs\n+from insights.parsers import SkipException\n \n \n @parser(Specs.satellite_custom_hiera)\n@@ -25,5 +26,8 @@\n >>> custom_hiera['apache::mod::prefork::serverlimit']\n 582\n \"\"\"\n-\n- pass\n+ def parse_content(self, content):\n+ try:\n+ super(CustomHiera, self).parse_content(content)\n+ except SkipException:\n+ pass\n", "issue": "Enhance parser \"satellite_installer_configurations\"\nAs a default, the file \"/etc/foreman-installer/custom-hiera.yaml\" is empty, it means customers haven't done any tuning, it shouldn't be skipped.\n", "before_files": [{"content": "\"\"\"\nSatellite installer configuration files\n=======================================\n\nParsers included in this module are:\n\nCustomHiera - file ``/etc/foreman-installer/custom-hiera.yaml``\n---------------------------------------------------------------\nParsers the file `/etc/foreman-installer/custom-hiera.yaml`\n\n\"\"\"\n\nfrom insights import parser, YAMLParser\nfrom insights.specs import Specs\n\n\n@parser(Specs.satellite_custom_hiera)\nclass CustomHiera(YAMLParser):\n \"\"\"\n Class to parse ``/etc/foreman-installer/custom-hiera.yaml``\n\n Examples:\n >>> 'apache::mod::prefork::serverlimit' in custom_hiera\n True\n >>> custom_hiera['apache::mod::prefork::serverlimit']\n 582\n \"\"\"\n\n pass\n", "path": "insights/parsers/satellite_installer_configurations.py"}]} | 811 | 191 |
gh_patches_debug_14651 | rasdani/github-patches | git_diff | pfnet__pytorch-pfn-extras-763 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adding pytest in an MPI environment
There are some functions that run in a distributed environment that have not been fully verified, so some pytests will be run in MPI to verify them.
https://github.com/pfnet/pytorch-pfn-extras/blob/266e8bde2c2a1a6aa3f8648d49e758975c8b436a/tests/pytorch_pfn_extras_tests/training_tests/extensions_tests/test_distributed_snapshot.py#L40-L46
</issue>
<code>
[start of setup.py]
1 import os
2
3 import setuptools
4
5 here = os.path.abspath(os.path.dirname(__file__))
6 # Get __version__ variable
7 exec(open(os.path.join(here, "pytorch_pfn_extras", "_version.py")).read())
8
9
10 setuptools.setup(
11 name="pytorch-pfn-extras",
12 version=__version__, # NOQA
13 description="Supplementary components to accelerate research and "
14 "development in PyTorch.",
15 author="Preferred Networks, Inc.",
16 license="MIT License",
17 install_requires=["numpy", "packaging", "torch", "typing-extensions>=3.10"],
18 extras_require={
19 "test": [
20 "pytest",
21 "onnxruntime",
22 "torchvision",
23 "torchaudio",
24 "pysen",
25 "black==23.3.0",
26 "flake8==4.0.1",
27 "isort==5.10.1",
28 "mypy==1.3.0",
29 "types-PyYAML",
30 "types-setuptools",
31 "matplotlib",
32 "tensorboard",
33 "ipython",
34 "ipywidgets",
35 "pandas",
36 "optuna",
37 "onnx",
38 "pytorch-ignite",
39 ],
40 "onnx": ["onnx"],
41 },
42 python_requires=">=3.6.0",
43 packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
44 package_data={"pytorch_pfn_extras": ["py.typed"]},
45 )
46
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,27 +16,6 @@
license="MIT License",
install_requires=["numpy", "packaging", "torch", "typing-extensions>=3.10"],
extras_require={
- "test": [
- "pytest",
- "onnxruntime",
- "torchvision",
- "torchaudio",
- "pysen",
- "black==23.3.0",
- "flake8==4.0.1",
- "isort==5.10.1",
- "mypy==1.3.0",
- "types-PyYAML",
- "types-setuptools",
- "matplotlib",
- "tensorboard",
- "ipython",
- "ipywidgets",
- "pandas",
- "optuna",
- "onnx",
- "pytorch-ignite",
- ],
"onnx": ["onnx"],
},
python_requires=">=3.6.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,27 +16,6 @@\n license=\"MIT License\",\n install_requires=[\"numpy\", \"packaging\", \"torch\", \"typing-extensions>=3.10\"],\n extras_require={\n- \"test\": [\n- \"pytest\",\n- \"onnxruntime\",\n- \"torchvision\",\n- \"torchaudio\",\n- \"pysen\",\n- \"black==23.3.0\",\n- \"flake8==4.0.1\",\n- \"isort==5.10.1\",\n- \"mypy==1.3.0\",\n- \"types-PyYAML\",\n- \"types-setuptools\",\n- \"matplotlib\",\n- \"tensorboard\",\n- \"ipython\",\n- \"ipywidgets\",\n- \"pandas\",\n- \"optuna\",\n- \"onnx\",\n- \"pytorch-ignite\",\n- ],\n \"onnx\": [\"onnx\"],\n },\n python_requires=\">=3.6.0\",\n", "issue": "Adding pytest in an MPI environment\nThere are some functions that run in a distributed environment that have not been fully verified, so some pytests will be run in MPI to verify them.\r\n\r\nhttps://github.com/pfnet/pytorch-pfn-extras/blob/266e8bde2c2a1a6aa3f8648d49e758975c8b436a/tests/pytorch_pfn_extras_tests/training_tests/extensions_tests/test_distributed_snapshot.py#L40-L46\r\n\n", "before_files": [{"content": "import os\n\nimport setuptools\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nexec(open(os.path.join(here, \"pytorch_pfn_extras\", \"_version.py\")).read())\n\n\nsetuptools.setup(\n name=\"pytorch-pfn-extras\",\n version=__version__, # NOQA\n description=\"Supplementary components to accelerate research and \"\n \"development in PyTorch.\",\n author=\"Preferred Networks, Inc.\",\n license=\"MIT License\",\n install_requires=[\"numpy\", \"packaging\", \"torch\", \"typing-extensions>=3.10\"],\n extras_require={\n \"test\": [\n \"pytest\",\n \"onnxruntime\",\n \"torchvision\",\n \"torchaudio\",\n \"pysen\",\n \"black==23.3.0\",\n \"flake8==4.0.1\",\n \"isort==5.10.1\",\n \"mypy==1.3.0\",\n \"types-PyYAML\",\n \"types-setuptools\",\n \"matplotlib\",\n \"tensorboard\",\n \"ipython\",\n \"ipywidgets\",\n \"pandas\",\n \"optuna\",\n \"onnx\",\n \"pytorch-ignite\",\n ],\n \"onnx\": [\"onnx\"],\n },\n python_requires=\">=3.6.0\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"pytorch_pfn_extras\": [\"py.typed\"]},\n)\n", "path": "setup.py"}]} | 1,052 | 244 |
gh_patches_debug_21746 | rasdani/github-patches | git_diff | explosion__spaCy-3389 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[feature request] Factory default for extension attributes
## Feature description
As usual with Python, mutable defaults are a big no-no for extension attributes, since they are shared by all instances, which leads to subtle and ~~quick to anger~~ hard to root out bugs (see e.g. #2581).
The documentation mentions that pitfall, but doesn't offer a convenient solution: if I want to keep a static list of interesting spans in my document in a `Doc._.interesting`, it is not clear where the getter and setter that I am supposed to use for this property should store the state. (From what I understand, it should probably be somewhere in `Doc.user_data`, but I have not found a lot of doc on that either)
I propose a `factory` argument to `set_extension` that would be called the first time that the value for the corresponding extension property is retrieved for a given instance (as `collections.defaultdict` does), so one could just write
```python
spacy.tokens.Doc.set_extension('interesting', factory=list)
```
</issue>
<code>
[start of spacy/tokens/underscore.py]
1 # coding: utf8
2 from __future__ import unicode_literals
3
4 import functools
5
6 from ..errors import Errors
7
8
9 class Underscore(object):
10 doc_extensions = {}
11 span_extensions = {}
12 token_extensions = {}
13
14 def __init__(self, extensions, obj, start=None, end=None):
15 object.__setattr__(self, "_extensions", extensions)
16 object.__setattr__(self, "_obj", obj)
17 # Assumption is that for doc values, _start and _end will both be None
18 # Span will set non-None values for _start and _end
19 # Token will have _start be non-None, _end be None
20 # This lets us key everything into the doc.user_data dictionary,
21 # (see _get_key), and lets us use a single Underscore class.
22 object.__setattr__(self, "_doc", obj.doc)
23 object.__setattr__(self, "_start", start)
24 object.__setattr__(self, "_end", end)
25
26 def __getattr__(self, name):
27 if name not in self._extensions:
28 raise AttributeError(Errors.E046.format(name=name))
29 default, method, getter, setter = self._extensions[name]
30 if getter is not None:
31 return getter(self._obj)
32 elif method is not None:
33 return functools.partial(method, self._obj)
34 else:
35 return self._doc.user_data.get(self._get_key(name), default)
36
37 def __setattr__(self, name, value):
38 if name not in self._extensions:
39 raise AttributeError(Errors.E047.format(name=name))
40 default, method, getter, setter = self._extensions[name]
41 if setter is not None:
42 return setter(self._obj, value)
43 else:
44 self._doc.user_data[self._get_key(name)] = value
45
46 def set(self, name, value):
47 return self.__setattr__(name, value)
48
49 def get(self, name):
50 return self.__getattr__(name)
51
52 def has(self, name):
53 return name in self._extensions
54
55 def _get_key(self, name):
56 return ("._.", name, self._start, self._end)
57
58
59 def get_ext_args(**kwargs):
60 """Validate and convert arguments. Reused in Doc, Token and Span."""
61 default = kwargs.get("default")
62 getter = kwargs.get("getter")
63 setter = kwargs.get("setter")
64 method = kwargs.get("method")
65 if getter is None and setter is not None:
66 raise ValueError(Errors.E089)
67 valid_opts = ("default" in kwargs, method is not None, getter is not None)
68 nr_defined = sum(t is True for t in valid_opts)
69 if nr_defined != 1:
70 raise ValueError(Errors.E083.format(nr_defined=nr_defined))
71 if setter is not None and not hasattr(setter, "__call__"):
72 raise ValueError(Errors.E091.format(name="setter", value=repr(setter)))
73 if getter is not None and not hasattr(getter, "__call__"):
74 raise ValueError(Errors.E091.format(name="getter", value=repr(getter)))
75 if method is not None and not hasattr(method, "__call__"):
76 raise ValueError(Errors.E091.format(name="method", value=repr(method)))
77 return (default, method, getter, setter)
78
79
80 def is_writable_attr(ext):
81 """Check if an extension attribute is writable.
82 ext (tuple): The (default, getter, setter, method) tuple available via
83 {Doc,Span,Token}.get_extension.
84 RETURNS (bool): Whether the attribute is writable.
85 """
86 default, method, getter, setter = ext
87 # Extension is writable if it has a setter (getter + setter), if it has a
88 # default value (or, if its default value is none, none of the other values
89 # should be set).
90 if setter is not None or default is not None or all(e is None for e in ext):
91 return True
92 return False
93
[end of spacy/tokens/underscore.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/spacy/tokens/underscore.py b/spacy/tokens/underscore.py
--- a/spacy/tokens/underscore.py
+++ b/spacy/tokens/underscore.py
@@ -2,11 +2,13 @@
from __future__ import unicode_literals
import functools
+import copy
from ..errors import Errors
class Underscore(object):
+ mutable_types = (dict, list, set)
doc_extensions = {}
span_extensions = {}
token_extensions = {}
@@ -32,7 +34,15 @@
elif method is not None:
return functools.partial(method, self._obj)
else:
- return self._doc.user_data.get(self._get_key(name), default)
+ key = self._get_key(name)
+ if key in self._doc.user_data:
+ return self._doc.user_data[key]
+ elif isinstance(default, self.mutable_types):
+ # Handle mutable default arguments (see #2581)
+ new_default = copy.copy(default)
+ self.__setattr__(name, new_default)
+ return new_default
+ return default
def __setattr__(self, name, value):
if name not in self._extensions:
| {"golden_diff": "diff --git a/spacy/tokens/underscore.py b/spacy/tokens/underscore.py\n--- a/spacy/tokens/underscore.py\n+++ b/spacy/tokens/underscore.py\n@@ -2,11 +2,13 @@\n from __future__ import unicode_literals\n \n import functools\n+import copy\n \n from ..errors import Errors\n \n \n class Underscore(object):\n+ mutable_types = (dict, list, set)\n doc_extensions = {}\n span_extensions = {}\n token_extensions = {}\n@@ -32,7 +34,15 @@\n elif method is not None:\n return functools.partial(method, self._obj)\n else:\n- return self._doc.user_data.get(self._get_key(name), default)\n+ key = self._get_key(name)\n+ if key in self._doc.user_data:\n+ return self._doc.user_data[key]\n+ elif isinstance(default, self.mutable_types):\n+ # Handle mutable default arguments (see #2581)\n+ new_default = copy.copy(default)\n+ self.__setattr__(name, new_default)\n+ return new_default\n+ return default\n \n def __setattr__(self, name, value):\n if name not in self._extensions:\n", "issue": "[feature request] Factory default for extension attributes\n## Feature description\r\nAs usual with Python, mutable defaults are a big no-no for extension attributes, since they are shared by all instances, which leads to subtle and ~~quick to anger~~ hard to root out bugs (see e.g. #2581).\r\nThe documentation mentions that pitfall, but doesn't offer a convenient solution: if I want to keep a static list of interesting spans in my document in a `Doc._.interesting`, it is not clear where the getter and setter that I am supposed to use for this property should store the state. (From what I understand, it should probably be somewhere in `Doc.user_data`, but I have not found a lot of doc on that either)\r\n\r\nI propose a `factory` argument to `set_extension` that would be called the first time that the value for the corresponding extension property is retrieved for a given instance (as `collections.defaultdict` does), so one could just write\r\n\r\n```python\r\nspacy.tokens.Doc.set_extension('interesting', factory=list)\r\n```\n", "before_files": [{"content": "# coding: utf8\nfrom __future__ import unicode_literals\n\nimport functools\n\nfrom ..errors import Errors\n\n\nclass Underscore(object):\n doc_extensions = {}\n span_extensions = {}\n token_extensions = {}\n\n def __init__(self, extensions, obj, start=None, end=None):\n object.__setattr__(self, \"_extensions\", extensions)\n object.__setattr__(self, \"_obj\", obj)\n # Assumption is that for doc values, _start and _end will both be None\n # Span will set non-None values for _start and _end\n # Token will have _start be non-None, _end be None\n # This lets us key everything into the doc.user_data dictionary,\n # (see _get_key), and lets us use a single Underscore class.\n object.__setattr__(self, \"_doc\", obj.doc)\n object.__setattr__(self, \"_start\", start)\n object.__setattr__(self, \"_end\", end)\n\n def __getattr__(self, name):\n if name not in self._extensions:\n raise AttributeError(Errors.E046.format(name=name))\n default, method, getter, setter = self._extensions[name]\n if getter is not None:\n return getter(self._obj)\n elif method is not None:\n return functools.partial(method, self._obj)\n else:\n return self._doc.user_data.get(self._get_key(name), default)\n\n def __setattr__(self, name, value):\n if name not in self._extensions:\n raise AttributeError(Errors.E047.format(name=name))\n default, method, getter, setter = self._extensions[name]\n if setter is not None:\n return setter(self._obj, value)\n else:\n self._doc.user_data[self._get_key(name)] = value\n\n def set(self, name, value):\n return self.__setattr__(name, value)\n\n def get(self, name):\n return self.__getattr__(name)\n\n def has(self, name):\n return name in self._extensions\n\n def _get_key(self, name):\n return (\"._.\", name, self._start, self._end)\n\n\ndef get_ext_args(**kwargs):\n \"\"\"Validate and convert arguments. Reused in Doc, Token and Span.\"\"\"\n default = kwargs.get(\"default\")\n getter = kwargs.get(\"getter\")\n setter = kwargs.get(\"setter\")\n method = kwargs.get(\"method\")\n if getter is None and setter is not None:\n raise ValueError(Errors.E089)\n valid_opts = (\"default\" in kwargs, method is not None, getter is not None)\n nr_defined = sum(t is True for t in valid_opts)\n if nr_defined != 1:\n raise ValueError(Errors.E083.format(nr_defined=nr_defined))\n if setter is not None and not hasattr(setter, \"__call__\"):\n raise ValueError(Errors.E091.format(name=\"setter\", value=repr(setter)))\n if getter is not None and not hasattr(getter, \"__call__\"):\n raise ValueError(Errors.E091.format(name=\"getter\", value=repr(getter)))\n if method is not None and not hasattr(method, \"__call__\"):\n raise ValueError(Errors.E091.format(name=\"method\", value=repr(method)))\n return (default, method, getter, setter)\n\n\ndef is_writable_attr(ext):\n \"\"\"Check if an extension attribute is writable.\n ext (tuple): The (default, getter, setter, method) tuple available via\n {Doc,Span,Token}.get_extension.\n RETURNS (bool): Whether the attribute is writable.\n \"\"\"\n default, method, getter, setter = ext\n # Extension is writable if it has a setter (getter + setter), if it has a\n # default value (or, if its default value is none, none of the other values\n # should be set).\n if setter is not None or default is not None or all(e is None for e in ext):\n return True\n return False\n", "path": "spacy/tokens/underscore.py"}]} | 1,806 | 265 |
gh_patches_debug_33005 | rasdani/github-patches | git_diff | weecology__retriever-378 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
v1.6.0 will break if download scripts are added to version.txt in master
We have now done this twice (see #180 and #199).
In v1.6.0 `__init__.py` line 16: `MASTER = True`. This results in the retriever always checking `master` for `version.txt` and discovering scripts that it doesn't know how to handle. In the future, the retriever will handle this gracefully thanks to #204, but it's unclear how we should go about introducing the download only functionality since it will break a number of existing installations.
</issue>
<code>
[start of scripts/MammalSuperTree.py]
1 #retriever
2 from retriever.lib.templates import DownloadOnlyTemplate
3
4 SCRIPT = DownloadOnlyTemplate(name="Mammal Super Tree",
5 shortname='mammsupertree',
6 ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',
7 citation = "Fritz, S. A., Bininda-Emonds, O. R. P. and Purvis, A. (2009), Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters, 12: 538-549. doi:10.1111/j.1461-0248.2009.01307.x",
8 description="Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549",
9 urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})
10
[end of scripts/MammalSuperTree.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/MammalSuperTree.py b/scripts/MammalSuperTree.py
--- a/scripts/MammalSuperTree.py
+++ b/scripts/MammalSuperTree.py
@@ -1,9 +1,22 @@
#retriever
-from retriever.lib.templates import DownloadOnlyTemplate
-
-SCRIPT = DownloadOnlyTemplate(name="Mammal Super Tree",
- shortname='mammsupertree',
- ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',
- citation = "Fritz, S. A., Bininda-Emonds, O. R. P. and Purvis, A. (2009), Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters, 12: 538-549. doi:10.1111/j.1461-0248.2009.01307.x",
- description="Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549",
- urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})
+from retriever import VERSION
+if (VERSION == 'v1.6') or (VERSION == 'v1.6.0'):
+ #If v1.6 is running use a dummy script to avoid retriever errors
+ #See https://github.com/weecology/retriever/issues/208 for details
+ from retriever.lib.templates import Script
+ class main(Script):
+ def __init(self):
+ Script.__init__(self,
+ name="Mammal Super Tree",
+ shortname='mammsupertree',
+ )
+ SCRIPT = main()
+else:
+ #For all versions other than 1.6 run as normal
+ from retriever.lib.templates import DownloadOnlyTemplate
+ SCRIPT = DownloadOnlyTemplate(name="Mammal Super Tree",
+ shortname='mammsupertree',
+ ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',
+ citation = "Fritz, S. A., Bininda-Emonds, O. R. P. and Purvis, A. (2009), Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters, 12: 538-549. doi:10.1111/j.1461-0248.2009.01307.x",
+ description="Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549",
+ urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})
| {"golden_diff": "diff --git a/scripts/MammalSuperTree.py b/scripts/MammalSuperTree.py\n--- a/scripts/MammalSuperTree.py\n+++ b/scripts/MammalSuperTree.py\n@@ -1,9 +1,22 @@\n #retriever\n-from retriever.lib.templates import DownloadOnlyTemplate\n-\n-SCRIPT = DownloadOnlyTemplate(name=\"Mammal Super Tree\",\n- shortname='mammsupertree',\n- ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',\n- citation = \"Fritz, S. A., Bininda-Emonds, O. R. P. and Purvis, A. (2009), Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters, 12: 538-549. doi:10.1111/j.1461-0248.2009.01307.x\",\n- description=\"Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549\",\n- urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})\n+from retriever import VERSION\n+if (VERSION == 'v1.6') or (VERSION == 'v1.6.0'):\n+ #If v1.6 is running use a dummy script to avoid retriever errors\n+ #See https://github.com/weecology/retriever/issues/208 for details\n+ from retriever.lib.templates import Script\n+ class main(Script):\n+ def __init(self):\n+ Script.__init__(self,\n+ name=\"Mammal Super Tree\",\n+ shortname='mammsupertree',\n+ )\n+ SCRIPT = main()\n+else:\n+ #For all versions other than 1.6 run as normal\n+ from retriever.lib.templates import DownloadOnlyTemplate\n+ SCRIPT = DownloadOnlyTemplate(name=\"Mammal Super Tree\",\n+ shortname='mammsupertree',\n+ ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',\n+ citation = \"Fritz, S. A., Bininda-Emonds, O. R. P. and Purvis, A. (2009), Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters, 12: 538-549. doi:10.1111/j.1461-0248.2009.01307.x\",\n+ description=\"Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549\",\n+ urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})\n", "issue": "v1.6.0 will break if download scripts are added to version.txt in master\nWe have now done this twice (see #180 and #199).\n\nIn v1.6.0 `__init__.py` line 16: `MASTER = True`. This results in the retriever always checking `master` for `version.txt` and discovering scripts that it doesn't know how to handle. In the future, the retriever will handle this gracefully thanks to #204, but it's unclear how we should go about introducing the download only functionality since it will break a number of existing installations.\n\n", "before_files": [{"content": "#retriever\nfrom retriever.lib.templates import DownloadOnlyTemplate\n\nSCRIPT = DownloadOnlyTemplate(name=\"Mammal Super Tree\",\n shortname='mammsupertree',\n ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',\n citation = \"Fritz, S. A., Bininda-Emonds, O. R. P. and Purvis, A. (2009), Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters, 12: 538-549. doi:10.1111/j.1461-0248.2009.01307.x\",\n description=\"Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549\",\n urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})\n", "path": "scripts/MammalSuperTree.py"}]} | 1,054 | 960 |
gh_patches_debug_5038 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1105 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pyhf contrib download fails gracelessly with invalid URL
# Description
calling `pyhf contrib download` (just to see what it does) fails pretty violently. hould we make itt a bit nicer @matthewfeickert
?
```
pyhf contrib download
Traceback (most recent call last):
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/bin/pyhf", line 33, in <module>
sys.exit(load_entry_point('pyhf', 'console_scripts', 'pyhf')())
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfsrc/src/pyhf/cli/contrib.py", line 60, in download
utils.download(archive_url, output_directory, force, compress)
File "/Users/lukasheinrich/Code/pyhfdev/dev/pyhfsrc/src/pyhf/contrib/utils.py", line 47, in download
+ "To download an archive from this host use the --force option."
pyhf.exceptions.InvalidArchiveHost: is not an approved archive host: www.hepdata.net, doi.org
To download an archive from this host use the --force option.
```
</issue>
<code>
[start of src/pyhf/cli/contrib.py]
1 """CLI for functionality that will get migrated out eventually."""
2 import logging
3 import click
4 from pathlib import Path
5
6 from ..contrib import utils
7
8 logging.basicConfig()
9 log = logging.getLogger(__name__)
10
11
12 @click.group(name="contrib")
13 def cli():
14 """
15 Contrib experimental operations.
16
17 .. note::
18
19 Requires installation of the ``contrib`` extra.
20
21 .. code-block:: shell
22
23 $ python -m pip install pyhf[contrib]
24 """
25
26
27 @cli.command()
28 @click.argument("archive-url", default="-")
29 @click.argument("output-directory", default="-")
30 @click.option("-v", "--verbose", is_flag=True, help="Enables verbose mode")
31 @click.option(
32 "-f", "--force", is_flag=True, help="Force download from non-approved host"
33 )
34 @click.option(
35 "-c",
36 "--compress",
37 is_flag=True,
38 help="Keep the archive in a compressed tar.gz form",
39 )
40 def download(archive_url, output_directory, verbose, force, compress):
41 """
42 Download the patchset archive from the remote URL and extract it in a
43 directory at the path given.
44
45 Example:
46
47 .. code-block:: shell
48
49 $ pyhf contrib download --verbose https://www.hepdata.net/record/resource/1408476?view=true 1Lbb-likelihoods
50
51 \b
52 1Lbb-likelihoods/patchset.json
53 1Lbb-likelihoods/README.md
54 1Lbb-likelihoods/BkgOnly.json
55
56 Raises:
57 :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid
58 """
59 try:
60 utils.download(archive_url, output_directory, force, compress)
61
62 if verbose:
63 file_list = [str(file) for file in list(Path(output_directory).glob("*"))]
64 print("\n".join(file_list))
65 except AttributeError as excep:
66 exception_info = (
67 str(excep)
68 + "\nInstallation of the contrib extra is required to use the contrib CLI API"
69 + "\nPlease install with: python -m pip install pyhf[contrib]\n"
70 )
71 log.error(exception_info)
72
[end of src/pyhf/cli/contrib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pyhf/cli/contrib.py b/src/pyhf/cli/contrib.py
--- a/src/pyhf/cli/contrib.py
+++ b/src/pyhf/cli/contrib.py
@@ -25,8 +25,8 @@
@cli.command()
[email protected]("archive-url", default="-")
[email protected]("output-directory", default="-")
[email protected]("archive-url")
[email protected]("output-directory")
@click.option("-v", "--verbose", is_flag=True, help="Enables verbose mode")
@click.option(
"-f", "--force", is_flag=True, help="Force download from non-approved host"
| {"golden_diff": "diff --git a/src/pyhf/cli/contrib.py b/src/pyhf/cli/contrib.py\n--- a/src/pyhf/cli/contrib.py\n+++ b/src/pyhf/cli/contrib.py\n@@ -25,8 +25,8 @@\n \n \n @cli.command()\[email protected](\"archive-url\", default=\"-\")\[email protected](\"output-directory\", default=\"-\")\[email protected](\"archive-url\")\[email protected](\"output-directory\")\n @click.option(\"-v\", \"--verbose\", is_flag=True, help=\"Enables verbose mode\")\n @click.option(\n \"-f\", \"--force\", is_flag=True, help=\"Force download from non-approved host\"\n", "issue": "pyhf contrib download fails gracelessly with invalid URL\n# Description\r\n\r\ncalling `pyhf contrib download` (just to see what it does) fails pretty violently. hould we make itt a bit nicer @matthewfeickert \r\n?\r\n\r\n```\r\npyhf contrib download\r\nTraceback (most recent call last):\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/bin/pyhf\", line 33, in <module>\r\n sys.exit(load_entry_point('pyhf', 'console_scripts', 'pyhf')())\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py\", line 829, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py\", line 782, in main\r\n rv = self.invoke(ctx)\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py\", line 1259, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py\", line 1259, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py\", line 1066, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfdevenv/lib/python3.7/site-packages/click/core.py\", line 610, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfsrc/src/pyhf/cli/contrib.py\", line 60, in download\r\n utils.download(archive_url, output_directory, force, compress)\r\n File \"/Users/lukasheinrich/Code/pyhfdev/dev/pyhfsrc/src/pyhf/contrib/utils.py\", line 47, in download\r\n + \"To download an archive from this host use the --force option.\"\r\npyhf.exceptions.InvalidArchiveHost: is not an approved archive host: www.hepdata.net, doi.org\r\nTo download an archive from this host use the --force option.\r\n```\r\n\n", "before_files": [{"content": "\"\"\"CLI for functionality that will get migrated out eventually.\"\"\"\nimport logging\nimport click\nfrom pathlib import Path\n\nfrom ..contrib import utils\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n\[email protected](name=\"contrib\")\ndef cli():\n \"\"\"\n Contrib experimental operations.\n\n .. note::\n\n Requires installation of the ``contrib`` extra.\n\n .. code-block:: shell\n\n $ python -m pip install pyhf[contrib]\n \"\"\"\n\n\[email protected]()\[email protected](\"archive-url\", default=\"-\")\[email protected](\"output-directory\", default=\"-\")\[email protected](\"-v\", \"--verbose\", is_flag=True, help=\"Enables verbose mode\")\[email protected](\n \"-f\", \"--force\", is_flag=True, help=\"Force download from non-approved host\"\n)\[email protected](\n \"-c\",\n \"--compress\",\n is_flag=True,\n help=\"Keep the archive in a compressed tar.gz form\",\n)\ndef download(archive_url, output_directory, verbose, force, compress):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n .. code-block:: shell\n\n $ pyhf contrib download --verbose https://www.hepdata.net/record/resource/1408476?view=true 1Lbb-likelihoods\n\n \\b\n 1Lbb-likelihoods/patchset.json\n 1Lbb-likelihoods/README.md\n 1Lbb-likelihoods/BkgOnly.json\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n try:\n utils.download(archive_url, output_directory, force, compress)\n\n if verbose:\n file_list = [str(file) for file in list(Path(output_directory).glob(\"*\"))]\n print(\"\\n\".join(file_list))\n except AttributeError as excep:\n exception_info = (\n str(excep)\n + \"\\nInstallation of the contrib extra is required to use the contrib CLI API\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\"\n )\n log.error(exception_info)\n", "path": "src/pyhf/cli/contrib.py"}]} | 1,726 | 140 |
gh_patches_debug_11587 | rasdani/github-patches | git_diff | saulpw__visidata-967 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
no loader for url scheme: postgresql
**Small description**
When attempting to start visidata with
```
vd postgresql:///localdb
```
it fails with
```
Error: no loader for url scheme: postgresql
```
**Expected result**
I would have expected it to work the same way
```
vd postgres:///localdb
```
works, as [both URL schemes are valid](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
P.S.: This is somewhere in between a bug and a feature request. As it's super small and about something existing, I called it a bug. I will raise a PR to fix this shortly.
</issue>
<code>
[start of visidata/loaders/postgres.py]
1 from visidata import *
2
3 __all__ = ['openurl_postgres', 'openurl_rds', 'PgTable', 'PgTablesSheet']
4
5 option('postgres_schema', 'public', 'The desired schema for the Postgres database')
6
7 def codeToType(type_code, colname):
8 import psycopg2
9 try:
10 tname = psycopg2._psycopg.string_types[type_code].name
11 if 'INTEGER' in tname:
12 return int
13 if 'STRING' in tname:
14 return str
15 except KeyError:
16 vd.status('unknown postgres type_code %s for %s' % (type_code, colname))
17 return anytype
18
19
20 def openurl_rds(url, filetype=None):
21 import boto3
22 import psycopg2
23
24 rds = boto3.client('rds')
25 url = urlparse(url.given)
26
27 _, region, dbname = url.path.split('/')
28 token = rds.generate_db_auth_token(url.hostname, url.port, url.username, region)
29
30 conn = psycopg2.connect(
31 user=url.username,
32 dbname=dbname,
33 host=url.hostname,
34 port=url.port,
35 password=token)
36
37 return PgTablesSheet(dbname+"_tables", sql=SQL(conn))
38
39
40 def openurl_postgres(url, filetype=None):
41 import psycopg2
42
43 url = urlparse(url.given)
44 dbname = url.path[1:]
45 conn = psycopg2.connect(
46 user=url.username,
47 dbname=dbname,
48 host=url.hostname,
49 port=url.port,
50 password=url.password)
51
52 return PgTablesSheet(dbname+"_tables", sql=SQL(conn))
53
54
55 class SQL:
56 def __init__(self, conn):
57 self.conn = conn
58
59 def cur(self, qstr):
60 import string
61 randomname = ''.join(random.choice(string.ascii_uppercase) for _ in range(6))
62 cur = self.conn.cursor(randomname)
63 cur.execute(qstr)
64 return cur
65
66 @asyncthread
67 def query_async(self, qstr, callback=None):
68 with self.cur(qstr) as cur:
69 callback(cur)
70 cur.close()
71
72
73 def cursorToColumns(cur, sheet):
74 sheet.columns = []
75 for i, coldesc in enumerate(cur.description):
76 sheet.addColumn(ColumnItem(coldesc.name, i, type=codeToType(coldesc.type_code, coldesc.name)))
77
78
79 # rowdef: (table_name, ncols)
80 class PgTablesSheet(Sheet):
81 rowtype = 'tables'
82
83 def reload(self):
84 schema = options.postgres_schema
85 qstr = f'''
86 SELECT relname table_name, column_count.ncols, reltuples::bigint est_nrows
87 FROM pg_class, pg_namespace, (
88 SELECT table_name, COUNT(column_name) AS ncols FROM information_schema.COLUMNS WHERE table_schema = '{schema}' GROUP BY table_name
89 ) AS column_count
90 WHERE pg_class.relnamespace = pg_namespace.oid AND pg_namespace.nspname = '{schema}' AND column_count.table_name = relname;
91 '''
92
93 with self.sql.cur(qstr) as cur:
94 self.nrowsPerTable = {}
95
96 self.rows = []
97 # try to get first row to make cur.description available
98 r = cur.fetchone()
99 if r:
100 self.addRow(r)
101 cursorToColumns(cur, self)
102 self.setKeys(self.columns[0:1]) # table_name is the key
103
104 for r in cur:
105 self.addRow(r)
106
107 def openRow(self, row):
108 return PgTable(self.name+"."+row[0], source=row[0], sql=self.sql)
109
110
111 # rowdef: tuple of values as returned by fetchone()
112 class PgTable(Sheet):
113 @asyncthread
114 def reload(self):
115 if self.options.postgres_schema:
116 source = f"{self.options.postgres_schema}.{self.source}"
117 else:
118 source = self.source
119 with self.sql.cur(f"SELECT * FROM {source}") as cur:
120 self.rows = []
121 r = cur.fetchone()
122 if r:
123 self.addRow(r)
124 cursorToColumns(cur, self)
125 for r in cur:
126 self.addRow(r)
127
[end of visidata/loaders/postgres.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/visidata/loaders/postgres.py b/visidata/loaders/postgres.py
--- a/visidata/loaders/postgres.py
+++ b/visidata/loaders/postgres.py
@@ -1,6 +1,6 @@
from visidata import *
-__all__ = ['openurl_postgres', 'openurl_rds', 'PgTable', 'PgTablesSheet']
+__all__ = ['openurl_postgres', 'openurl_postgresql', 'openurl_rds', 'PgTable', 'PgTablesSheet']
option('postgres_schema', 'public', 'The desired schema for the Postgres database')
@@ -52,6 +52,9 @@
return PgTablesSheet(dbname+"_tables", sql=SQL(conn))
+openurl_postgresql=openurl_postgres
+
+
class SQL:
def __init__(self, conn):
self.conn = conn
| {"golden_diff": "diff --git a/visidata/loaders/postgres.py b/visidata/loaders/postgres.py\n--- a/visidata/loaders/postgres.py\n+++ b/visidata/loaders/postgres.py\n@@ -1,6 +1,6 @@\n from visidata import *\n \n-__all__ = ['openurl_postgres', 'openurl_rds', 'PgTable', 'PgTablesSheet']\n+__all__ = ['openurl_postgres', 'openurl_postgresql', 'openurl_rds', 'PgTable', 'PgTablesSheet']\n \n option('postgres_schema', 'public', 'The desired schema for the Postgres database')\n \n@@ -52,6 +52,9 @@\n return PgTablesSheet(dbname+\"_tables\", sql=SQL(conn))\n \n \n+openurl_postgresql=openurl_postgres\n+\n+\n class SQL:\n def __init__(self, conn):\n self.conn = conn\n", "issue": "no loader for url scheme: postgresql\n**Small description**\r\n\r\nWhen attempting to start visidata with\r\n```\r\nvd postgresql:///localdb\r\n```\r\nit fails with\r\n```\r\nError: no loader for url scheme: postgresql\r\n```\r\n\r\n**Expected result**\r\n\r\nI would have expected it to work the same way \r\n```\r\nvd postgres:///localdb\r\n```\r\nworks, as [both URL schemes are valid](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).\r\n\r\nP.S.: This is somewhere in between a bug and a feature request. As it's super small and about something existing, I called it a bug. I will raise a PR to fix this shortly.\n", "before_files": [{"content": "from visidata import *\n\n__all__ = ['openurl_postgres', 'openurl_rds', 'PgTable', 'PgTablesSheet']\n\noption('postgres_schema', 'public', 'The desired schema for the Postgres database')\n\ndef codeToType(type_code, colname):\n import psycopg2\n try:\n tname = psycopg2._psycopg.string_types[type_code].name\n if 'INTEGER' in tname:\n return int\n if 'STRING' in tname:\n return str\n except KeyError:\n vd.status('unknown postgres type_code %s for %s' % (type_code, colname))\n return anytype\n\n\ndef openurl_rds(url, filetype=None):\n import boto3\n import psycopg2\n\n rds = boto3.client('rds')\n url = urlparse(url.given)\n\n _, region, dbname = url.path.split('/')\n token = rds.generate_db_auth_token(url.hostname, url.port, url.username, region)\n\n conn = psycopg2.connect(\n user=url.username,\n dbname=dbname,\n host=url.hostname,\n port=url.port,\n password=token)\n\n return PgTablesSheet(dbname+\"_tables\", sql=SQL(conn))\n\n\ndef openurl_postgres(url, filetype=None):\n import psycopg2\n\n url = urlparse(url.given)\n dbname = url.path[1:]\n conn = psycopg2.connect(\n user=url.username,\n dbname=dbname,\n host=url.hostname,\n port=url.port,\n password=url.password)\n\n return PgTablesSheet(dbname+\"_tables\", sql=SQL(conn))\n\n\nclass SQL:\n def __init__(self, conn):\n self.conn = conn\n\n def cur(self, qstr):\n import string\n randomname = ''.join(random.choice(string.ascii_uppercase) for _ in range(6))\n cur = self.conn.cursor(randomname)\n cur.execute(qstr)\n return cur\n\n @asyncthread\n def query_async(self, qstr, callback=None):\n with self.cur(qstr) as cur:\n callback(cur)\n cur.close()\n\n\ndef cursorToColumns(cur, sheet):\n sheet.columns = []\n for i, coldesc in enumerate(cur.description):\n sheet.addColumn(ColumnItem(coldesc.name, i, type=codeToType(coldesc.type_code, coldesc.name)))\n\n\n# rowdef: (table_name, ncols)\nclass PgTablesSheet(Sheet):\n rowtype = 'tables'\n\n def reload(self):\n schema = options.postgres_schema\n qstr = f'''\n SELECT relname table_name, column_count.ncols, reltuples::bigint est_nrows\n FROM pg_class, pg_namespace, (\n SELECT table_name, COUNT(column_name) AS ncols FROM information_schema.COLUMNS WHERE table_schema = '{schema}' GROUP BY table_name\n ) AS column_count\n WHERE pg_class.relnamespace = pg_namespace.oid AND pg_namespace.nspname = '{schema}' AND column_count.table_name = relname;\n '''\n\n with self.sql.cur(qstr) as cur:\n self.nrowsPerTable = {}\n\n self.rows = []\n # try to get first row to make cur.description available\n r = cur.fetchone()\n if r:\n self.addRow(r)\n cursorToColumns(cur, self)\n self.setKeys(self.columns[0:1]) # table_name is the key\n\n for r in cur:\n self.addRow(r)\n\n def openRow(self, row):\n return PgTable(self.name+\".\"+row[0], source=row[0], sql=self.sql)\n\n\n# rowdef: tuple of values as returned by fetchone()\nclass PgTable(Sheet):\n @asyncthread\n def reload(self):\n if self.options.postgres_schema:\n source = f\"{self.options.postgres_schema}.{self.source}\"\n else:\n source = self.source\n with self.sql.cur(f\"SELECT * FROM {source}\") as cur:\n self.rows = []\n r = cur.fetchone()\n if r:\n self.addRow(r)\n cursorToColumns(cur, self)\n for r in cur:\n self.addRow(r)\n", "path": "visidata/loaders/postgres.py"}]} | 1,857 | 197 |
gh_patches_debug_37490 | rasdani/github-patches | git_diff | getsentry__sentry-62640 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Convert digest notifications to use block kit
Convert the `DigestNotificationMessageBuilder` (code [here](https://github.com/getsentry/sentry/blob/master/src/sentry/integrations/slack/message_builder/notifications/digest.py)) to use block kit. This may be harder to test as I have personally never received one.
</issue>
<code>
[start of src/sentry/integrations/slack/message_builder/notifications/digest.py]
1 from __future__ import annotations
2
3 from typing import Any, Mapping
4
5 from sentry.digests import Digest
6 from sentry.digests.utils import get_groups
7 from sentry.integrations.slack.message_builder import SlackBody
8 from sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder
9 from sentry.notifications.notifications.digest import DigestNotification
10 from sentry.services.hybrid_cloud.actor import RpcActor
11
12 from .base import SlackNotificationsMessageBuilder
13
14
15 class DigestNotificationMessageBuilder(SlackNotificationsMessageBuilder):
16 def __init__(
17 self,
18 notification: DigestNotification,
19 context: Mapping[str, Any],
20 recipient: RpcActor,
21 ) -> None:
22 super().__init__(notification, context, recipient)
23 self.notification: DigestNotification = notification
24
25 def build(self) -> SlackBody:
26 """
27 It's currently impossible in mypy to have recursive types so we need a
28 hack to get this to return a SlackBody.
29 """
30 digest: Digest = self.context.get("digest", {})
31 return [
32 SlackIssuesMessageBuilder(
33 group=group,
34 event=event,
35 rules=[rule],
36 issue_details=True,
37 notification=self.notification,
38 recipient=self.recipient,
39 ).build()
40 for rule, group, event in get_groups(digest)
41 ]
42
[end of src/sentry/integrations/slack/message_builder/notifications/digest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/integrations/slack/message_builder/notifications/digest.py b/src/sentry/integrations/slack/message_builder/notifications/digest.py
--- a/src/sentry/integrations/slack/message_builder/notifications/digest.py
+++ b/src/sentry/integrations/slack/message_builder/notifications/digest.py
@@ -2,9 +2,10 @@
from typing import Any, Mapping
+from sentry import features
from sentry.digests import Digest
from sentry.digests.utils import get_groups
-from sentry.integrations.slack.message_builder import SlackBody
+from sentry.integrations.slack.message_builder import SlackAttachment, SlackBlock
from sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder
from sentry.notifications.notifications.digest import DigestNotification
from sentry.services.hybrid_cloud.actor import RpcActor
@@ -22,14 +23,28 @@
super().__init__(notification, context, recipient)
self.notification: DigestNotification = notification
- def build(self) -> SlackBody:
+ def build(self) -> SlackAttachment | SlackBlock:
"""
It's currently impossible in mypy to have recursive types so we need a
hack to get this to return a SlackBody.
"""
digest: Digest = self.context.get("digest", {})
- return [
- SlackIssuesMessageBuilder(
+ digest_groups = get_groups(digest)
+ if not features.has("organizations:slack-block-kit", self.notification.organization):
+ return [
+ SlackIssuesMessageBuilder(
+ group=group,
+ event=event,
+ rules=[rule],
+ issue_details=True,
+ notification=self.notification,
+ recipient=self.recipient,
+ ).build()
+ for rule, group, event in digest_groups
+ ]
+ blocks = []
+ for rule, group, event in digest_groups:
+ alert_as_blocks = SlackIssuesMessageBuilder(
group=group,
event=event,
rules=[rule],
@@ -37,5 +52,8 @@
notification=self.notification,
recipient=self.recipient,
).build()
- for rule, group, event in get_groups(digest)
- ]
+ # we iterate through the list of blocks created for each alert in the digest and add
+ # each block to the list of blocks which is used for the entire digest notification
+ for block in alert_as_blocks.get("blocks"):
+ blocks.append(block)
+ return self._build_blocks(*blocks)
| {"golden_diff": "diff --git a/src/sentry/integrations/slack/message_builder/notifications/digest.py b/src/sentry/integrations/slack/message_builder/notifications/digest.py\n--- a/src/sentry/integrations/slack/message_builder/notifications/digest.py\n+++ b/src/sentry/integrations/slack/message_builder/notifications/digest.py\n@@ -2,9 +2,10 @@\n \n from typing import Any, Mapping\n \n+from sentry import features\n from sentry.digests import Digest\n from sentry.digests.utils import get_groups\n-from sentry.integrations.slack.message_builder import SlackBody\n+from sentry.integrations.slack.message_builder import SlackAttachment, SlackBlock\n from sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder\n from sentry.notifications.notifications.digest import DigestNotification\n from sentry.services.hybrid_cloud.actor import RpcActor\n@@ -22,14 +23,28 @@\n super().__init__(notification, context, recipient)\n self.notification: DigestNotification = notification\n \n- def build(self) -> SlackBody:\n+ def build(self) -> SlackAttachment | SlackBlock:\n \"\"\"\n It's currently impossible in mypy to have recursive types so we need a\n hack to get this to return a SlackBody.\n \"\"\"\n digest: Digest = self.context.get(\"digest\", {})\n- return [\n- SlackIssuesMessageBuilder(\n+ digest_groups = get_groups(digest)\n+ if not features.has(\"organizations:slack-block-kit\", self.notification.organization):\n+ return [\n+ SlackIssuesMessageBuilder(\n+ group=group,\n+ event=event,\n+ rules=[rule],\n+ issue_details=True,\n+ notification=self.notification,\n+ recipient=self.recipient,\n+ ).build()\n+ for rule, group, event in digest_groups\n+ ]\n+ blocks = []\n+ for rule, group, event in digest_groups:\n+ alert_as_blocks = SlackIssuesMessageBuilder(\n group=group,\n event=event,\n rules=[rule],\n@@ -37,5 +52,8 @@\n notification=self.notification,\n recipient=self.recipient,\n ).build()\n- for rule, group, event in get_groups(digest)\n- ]\n+ # we iterate through the list of blocks created for each alert in the digest and add\n+ # each block to the list of blocks which is used for the entire digest notification\n+ for block in alert_as_blocks.get(\"blocks\"):\n+ blocks.append(block)\n+ return self._build_blocks(*blocks)\n", "issue": "Convert digest notifications to use block kit\nConvert the `DigestNotificationMessageBuilder` (code [here](https://github.com/getsentry/sentry/blob/master/src/sentry/integrations/slack/message_builder/notifications/digest.py)) to use block kit. This may be harder to test as I have personally never received one.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Mapping\n\nfrom sentry.digests import Digest\nfrom sentry.digests.utils import get_groups\nfrom sentry.integrations.slack.message_builder import SlackBody\nfrom sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder\nfrom sentry.notifications.notifications.digest import DigestNotification\nfrom sentry.services.hybrid_cloud.actor import RpcActor\n\nfrom .base import SlackNotificationsMessageBuilder\n\n\nclass DigestNotificationMessageBuilder(SlackNotificationsMessageBuilder):\n def __init__(\n self,\n notification: DigestNotification,\n context: Mapping[str, Any],\n recipient: RpcActor,\n ) -> None:\n super().__init__(notification, context, recipient)\n self.notification: DigestNotification = notification\n\n def build(self) -> SlackBody:\n \"\"\"\n It's currently impossible in mypy to have recursive types so we need a\n hack to get this to return a SlackBody.\n \"\"\"\n digest: Digest = self.context.get(\"digest\", {})\n return [\n SlackIssuesMessageBuilder(\n group=group,\n event=event,\n rules=[rule],\n issue_details=True,\n notification=self.notification,\n recipient=self.recipient,\n ).build()\n for rule, group, event in get_groups(digest)\n ]\n", "path": "src/sentry/integrations/slack/message_builder/notifications/digest.py"}]} | 975 | 548 |
gh_patches_debug_14648 | rasdani/github-patches | git_diff | Kinto__kinto-1003 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GET on /v1/admin returns 404
The expected address for built-in admin plugin is `/v1/admin/`. But if you forget the trailing slash and type `/v1/admin` you get a 404. I think it would be better to raise a 307.
Related to #112 and #858.
```
> http get localhost:8888/v1/admin/
HTTP/1.1 200 OK
(...)
> http get localhost:8888/v1/admin
HTTP/1.1 404 Not Found
(...)
```
GET on /v1/admin returns 404
The expected address for built-in admin plugin is `/v1/admin/`. But if you forget the trailing slash and type `/v1/admin` you get a 404. I think it would be better to raise a 307.
Related to #112 and #858.
```
> http get localhost:8888/v1/admin/
HTTP/1.1 200 OK
(...)
> http get localhost:8888/v1/admin
HTTP/1.1 404 Not Found
(...)
```
</issue>
<code>
[start of kinto/plugins/admin/__init__.py]
1 from pyramid.static import static_view
2
3
4 def includeme(config):
5 # Process settings to remove storage wording.
6
7 # Expose capability.
8 config.add_api_capability(
9 "admin",
10 version="1.6.0",
11 description="Serves the admin console.",
12 url="https://github.com/Kinto/kinto-admin/",
13 )
14
15 build_dir = static_view('kinto.plugins.admin:build', use_subpath=True)
16 config.add_route('catchall_static', '/admin/*subpath')
17 config.add_view(build_dir, route_name="catchall_static")
18
[end of kinto/plugins/admin/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/plugins/admin/__init__.py b/kinto/plugins/admin/__init__.py
--- a/kinto/plugins/admin/__init__.py
+++ b/kinto/plugins/admin/__init__.py
@@ -1,4 +1,5 @@
from pyramid.static import static_view
+from pyramid.httpexceptions import HTTPTemporaryRedirect
def includeme(config):
@@ -15,3 +16,10 @@
build_dir = static_view('kinto.plugins.admin:build', use_subpath=True)
config.add_route('catchall_static', '/admin/*subpath')
config.add_view(build_dir, route_name="catchall_static")
+
+ # Setup redirect without trailing slash.
+ def admin_redirect_view(request):
+ raise HTTPTemporaryRedirect(request.path + '/')
+
+ config.add_route('admin_redirect', '/admin')
+ config.add_view(admin_redirect_view, route_name="admin_redirect")
| {"golden_diff": "diff --git a/kinto/plugins/admin/__init__.py b/kinto/plugins/admin/__init__.py\n--- a/kinto/plugins/admin/__init__.py\n+++ b/kinto/plugins/admin/__init__.py\n@@ -1,4 +1,5 @@\n from pyramid.static import static_view\n+from pyramid.httpexceptions import HTTPTemporaryRedirect\n \n \n def includeme(config):\n@@ -15,3 +16,10 @@\n build_dir = static_view('kinto.plugins.admin:build', use_subpath=True)\n config.add_route('catchall_static', '/admin/*subpath')\n config.add_view(build_dir, route_name=\"catchall_static\")\n+\n+ # Setup redirect without trailing slash.\n+ def admin_redirect_view(request):\n+ raise HTTPTemporaryRedirect(request.path + '/')\n+\n+ config.add_route('admin_redirect', '/admin')\n+ config.add_view(admin_redirect_view, route_name=\"admin_redirect\")\n", "issue": "GET on /v1/admin returns 404\nThe expected address for built-in admin plugin is `/v1/admin/`. But if you forget the trailing slash and type `/v1/admin` you get a 404. I think it would be better to raise a 307.\r\n\r\nRelated to #112 and #858.\r\n\r\n```\r\n> http get localhost:8888/v1/admin/\r\nHTTP/1.1 200 OK\r\n(...)\r\n\r\n> http get localhost:8888/v1/admin\r\nHTTP/1.1 404 Not Found\r\n(...)\r\n```\r\n\r\n\nGET on /v1/admin returns 404\nThe expected address for built-in admin plugin is `/v1/admin/`. But if you forget the trailing slash and type `/v1/admin` you get a 404. I think it would be better to raise a 307.\r\n\r\nRelated to #112 and #858.\r\n\r\n```\r\n> http get localhost:8888/v1/admin/\r\nHTTP/1.1 200 OK\r\n(...)\r\n\r\n> http get localhost:8888/v1/admin\r\nHTTP/1.1 404 Not Found\r\n(...)\r\n```\r\n\r\n\n", "before_files": [{"content": "from pyramid.static import static_view\n\n\ndef includeme(config):\n # Process settings to remove storage wording.\n\n # Expose capability.\n config.add_api_capability(\n \"admin\",\n version=\"1.6.0\",\n description=\"Serves the admin console.\",\n url=\"https://github.com/Kinto/kinto-admin/\",\n )\n\n build_dir = static_view('kinto.plugins.admin:build', use_subpath=True)\n config.add_route('catchall_static', '/admin/*subpath')\n config.add_view(build_dir, route_name=\"catchall_static\")\n", "path": "kinto/plugins/admin/__init__.py"}]} | 958 | 199 |
gh_patches_debug_61588 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1660 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Document negative `sigma` values in `filters.gaussian_filter` are clipped to zero.
Negative sigma values have no effect; they are clipped to zero. This should be documented.
</issue>
<code>
[start of skimage/filters/_gaussian.py]
1 import collections as coll
2 import numpy as np
3 from scipy import ndimage as ndi
4 import warnings
5
6 from ..util import img_as_float
7 from ..color import guess_spatial_dimensions
8
9 __all__ = ['gaussian_filter']
10
11
12 def gaussian_filter(image, sigma, output=None, mode='nearest', cval=0,
13 multichannel=None):
14 """Multi-dimensional Gaussian filter
15
16 Parameters
17 ----------
18 image : array-like
19 input image (grayscale or color) to filter.
20 sigma : scalar or sequence of scalars
21 standard deviation for Gaussian kernel. The standard
22 deviations of the Gaussian filter are given for each axis as a
23 sequence, or as a single number, in which case it is equal for
24 all axes.
25 output : array, optional
26 The ``output`` parameter passes an array in which to store the
27 filter output.
28 mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
29 The `mode` parameter determines how the array borders are
30 handled, where `cval` is the value when mode is equal to
31 'constant'. Default is 'nearest'.
32 cval : scalar, optional
33 Value to fill past edges of input if `mode` is 'constant'. Default
34 is 0.0
35 multichannel : bool, optional (default: None)
36 Whether the last axis of the image is to be interpreted as multiple
37 channels. If True, each channel is filtered separately (channels are
38 not mixed together). Only 3 channels are supported. If `None`,
39 the function will attempt to guess this, and raise a warning if
40 ambiguous, when the array has shape (M, N, 3).
41
42 Returns
43 -------
44 filtered_image : ndarray
45 the filtered array
46
47 Notes
48 -----
49 This function is a wrapper around :func:`scipy.ndi.gaussian_filter`.
50
51 Integer arrays are converted to float.
52
53 The multi-dimensional filter is implemented as a sequence of
54 one-dimensional convolution filters. The intermediate arrays are
55 stored in the same data type as the output. Therefore, for output
56 types with a limited precision, the results may be imprecise
57 because intermediate results may be stored with insufficient
58 precision.
59
60 Examples
61 --------
62
63 >>> a = np.zeros((3, 3))
64 >>> a[1, 1] = 1
65 >>> a
66 array([[ 0., 0., 0.],
67 [ 0., 1., 0.],
68 [ 0., 0., 0.]])
69 >>> gaussian_filter(a, sigma=0.4) # mild smoothing
70 array([[ 0.00163116, 0.03712502, 0.00163116],
71 [ 0.03712502, 0.84496158, 0.03712502],
72 [ 0.00163116, 0.03712502, 0.00163116]])
73 >>> gaussian_filter(a, sigma=1) # more smooting
74 array([[ 0.05855018, 0.09653293, 0.05855018],
75 [ 0.09653293, 0.15915589, 0.09653293],
76 [ 0.05855018, 0.09653293, 0.05855018]])
77 >>> # Several modes are possible for handling boundaries
78 >>> gaussian_filter(a, sigma=1, mode='reflect')
79 array([[ 0.08767308, 0.12075024, 0.08767308],
80 [ 0.12075024, 0.16630671, 0.12075024],
81 [ 0.08767308, 0.12075024, 0.08767308]])
82 >>> # For RGB images, each is filtered separately
83 >>> from skimage.data import astronaut
84 >>> image = astronaut()
85 >>> filtered_img = gaussian_filter(image, sigma=1, multichannel=True)
86
87 """
88
89 spatial_dims = guess_spatial_dimensions(image)
90 if spatial_dims is None and multichannel is None:
91 msg = ("Images with dimensions (M, N, 3) are interpreted as 2D+RGB "
92 "by default. Use `multichannel=False` to interpret as "
93 "3D image with last dimension of length 3.")
94 warnings.warn(RuntimeWarning(msg))
95 multichannel = True
96 if multichannel:
97 # do not filter across channels
98 if not isinstance(sigma, coll.Iterable):
99 sigma = [sigma] * (image.ndim - 1)
100 if len(sigma) != image.ndim:
101 sigma = np.concatenate((np.asarray(sigma), [0]))
102 image = img_as_float(image)
103 return ndi.gaussian_filter(image, sigma, mode=mode, cval=cval)
104
[end of skimage/filters/_gaussian.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/filters/_gaussian.py b/skimage/filters/_gaussian.py
--- a/skimage/filters/_gaussian.py
+++ b/skimage/filters/_gaussian.py
@@ -93,6 +93,8 @@
"3D image with last dimension of length 3.")
warnings.warn(RuntimeWarning(msg))
multichannel = True
+ if np.any(np.asarray(sigma) < 0.0):
+ raise ValueError("Sigma values less than zero are not valid")
if multichannel:
# do not filter across channels
if not isinstance(sigma, coll.Iterable):
| {"golden_diff": "diff --git a/skimage/filters/_gaussian.py b/skimage/filters/_gaussian.py\n--- a/skimage/filters/_gaussian.py\n+++ b/skimage/filters/_gaussian.py\n@@ -93,6 +93,8 @@\n \"3D image with last dimension of length 3.\")\n warnings.warn(RuntimeWarning(msg))\n multichannel = True\n+ if np.any(np.asarray(sigma) < 0.0):\n+ raise ValueError(\"Sigma values less than zero are not valid\")\n if multichannel:\n # do not filter across channels\n if not isinstance(sigma, coll.Iterable):\n", "issue": "Document negative `sigma` values in `filters.gaussian_filter` are clipped to zero.\nNegative sigma values have no effect; they are clipped to zero. This should be documented.\n\n", "before_files": [{"content": "import collections as coll\nimport numpy as np\nfrom scipy import ndimage as ndi\nimport warnings\n\nfrom ..util import img_as_float\nfrom ..color import guess_spatial_dimensions\n\n__all__ = ['gaussian_filter']\n\n\ndef gaussian_filter(image, sigma, output=None, mode='nearest', cval=0,\n multichannel=None):\n \"\"\"Multi-dimensional Gaussian filter\n\n Parameters\n ----------\n image : array-like\n input image (grayscale or color) to filter.\n sigma : scalar or sequence of scalars\n standard deviation for Gaussian kernel. The standard\n deviations of the Gaussian filter are given for each axis as a\n sequence, or as a single number, in which case it is equal for\n all axes.\n output : array, optional\n The ``output`` parameter passes an array in which to store the\n filter output.\n mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional\n The `mode` parameter determines how the array borders are\n handled, where `cval` is the value when mode is equal to\n 'constant'. Default is 'nearest'.\n cval : scalar, optional\n Value to fill past edges of input if `mode` is 'constant'. Default\n is 0.0\n multichannel : bool, optional (default: None)\n Whether the last axis of the image is to be interpreted as multiple\n channels. If True, each channel is filtered separately (channels are\n not mixed together). Only 3 channels are supported. If `None`,\n the function will attempt to guess this, and raise a warning if\n ambiguous, when the array has shape (M, N, 3).\n\n Returns\n -------\n filtered_image : ndarray\n the filtered array\n\n Notes\n -----\n This function is a wrapper around :func:`scipy.ndi.gaussian_filter`.\n\n Integer arrays are converted to float.\n\n The multi-dimensional filter is implemented as a sequence of\n one-dimensional convolution filters. The intermediate arrays are\n stored in the same data type as the output. Therefore, for output\n types with a limited precision, the results may be imprecise\n because intermediate results may be stored with insufficient\n precision.\n\n Examples\n --------\n\n >>> a = np.zeros((3, 3))\n >>> a[1, 1] = 1\n >>> a\n array([[ 0., 0., 0.],\n [ 0., 1., 0.],\n [ 0., 0., 0.]])\n >>> gaussian_filter(a, sigma=0.4) # mild smoothing\n array([[ 0.00163116, 0.03712502, 0.00163116],\n [ 0.03712502, 0.84496158, 0.03712502],\n [ 0.00163116, 0.03712502, 0.00163116]])\n >>> gaussian_filter(a, sigma=1) # more smooting\n array([[ 0.05855018, 0.09653293, 0.05855018],\n [ 0.09653293, 0.15915589, 0.09653293],\n [ 0.05855018, 0.09653293, 0.05855018]])\n >>> # Several modes are possible for handling boundaries\n >>> gaussian_filter(a, sigma=1, mode='reflect')\n array([[ 0.08767308, 0.12075024, 0.08767308],\n [ 0.12075024, 0.16630671, 0.12075024],\n [ 0.08767308, 0.12075024, 0.08767308]])\n >>> # For RGB images, each is filtered separately\n >>> from skimage.data import astronaut\n >>> image = astronaut()\n >>> filtered_img = gaussian_filter(image, sigma=1, multichannel=True)\n\n \"\"\"\n\n spatial_dims = guess_spatial_dimensions(image)\n if spatial_dims is None and multichannel is None:\n msg = (\"Images with dimensions (M, N, 3) are interpreted as 2D+RGB \"\n \"by default. Use `multichannel=False` to interpret as \"\n \"3D image with last dimension of length 3.\")\n warnings.warn(RuntimeWarning(msg))\n multichannel = True\n if multichannel:\n # do not filter across channels\n if not isinstance(sigma, coll.Iterable):\n sigma = [sigma] * (image.ndim - 1)\n if len(sigma) != image.ndim:\n sigma = np.concatenate((np.asarray(sigma), [0]))\n image = img_as_float(image)\n return ndi.gaussian_filter(image, sigma, mode=mode, cval=cval)\n", "path": "skimage/filters/_gaussian.py"}]} | 1,995 | 141 |
gh_patches_debug_27760 | rasdani/github-patches | git_diff | microsoft__playwright-python-191 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Internal docs are published on the gh pages instead of the official API
We should remove everything that is not relevant to the end user!
Docs: https://microsoft.github.io/playwright-python/sync_api.html
</issue>
<code>
[start of playwright/__init__.py]
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import playwright.helper as helper
16 from playwright.main import AsyncPlaywrightContextManager, SyncPlaywrightContextManager
17
18 Error = helper.Error
19 TimeoutError = helper.TimeoutError
20
21
22 def async_playwright() -> AsyncPlaywrightContextManager:
23 return AsyncPlaywrightContextManager()
24
25
26 def sync_playwright() -> SyncPlaywrightContextManager:
27 return SyncPlaywrightContextManager()
28
29
30 __all__ = [
31 "async_playwright",
32 "sync_playwright",
33 "Error",
34 "TimeoutError",
35 ]
36
[end of playwright/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/playwright/__init__.py b/playwright/__init__.py
--- a/playwright/__init__.py
+++ b/playwright/__init__.py
@@ -11,6 +11,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+"""
+Python package `playwright` is a Python library to automate Chromium,
+Firefox and WebKit with a single API. Playwright is built to enable cross-browser
+web automation that is ever-green, capable, reliable and fast.
+For more information you'll find the documentation for the sync API [here](sync_api.html)
+and for the async API [here](async_api.html).
+"""
import playwright.helper as helper
from playwright.main import AsyncPlaywrightContextManager, SyncPlaywrightContextManager
@@ -33,3 +40,37 @@
"Error",
"TimeoutError",
]
+
+__pdoc__ = {
+ "accessibility": False,
+ "async_base": False,
+ "browser": False,
+ "browser_context": False,
+ "browser_type": False,
+ "cdp_session": False,
+ "chromium_browser_context": False,
+ "connection": False,
+ "console_message": False,
+ "dialog": False,
+ "download": False,
+ "element_handle": False,
+ "event_context_manager": False,
+ "file_chooser": False,
+ "frame": False,
+ "helper": False,
+ "impl_to_api_mapping": False,
+ "input": False,
+ "js_handle": False,
+ "main": False,
+ "network": False,
+ "object_factory": False,
+ "page": False,
+ "path_utils": False,
+ "playwright": False,
+ "selectors": False,
+ "sync_base": False,
+ "transport": False,
+ "wait_helper": False,
+ "async_playwright": False,
+ "sync_playwright": False,
+}
| {"golden_diff": "diff --git a/playwright/__init__.py b/playwright/__init__.py\n--- a/playwright/__init__.py\n+++ b/playwright/__init__.py\n@@ -11,6 +11,13 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+\"\"\"\n+Python package `playwright` is a Python library to automate Chromium,\n+Firefox and WebKit with a single API. Playwright is built to enable cross-browser\n+web automation that is ever-green, capable, reliable and fast.\n+For more information you'll find the documentation for the sync API [here](sync_api.html)\n+and for the async API [here](async_api.html).\n+\"\"\"\n \n import playwright.helper as helper\n from playwright.main import AsyncPlaywrightContextManager, SyncPlaywrightContextManager\n@@ -33,3 +40,37 @@\n \"Error\",\n \"TimeoutError\",\n ]\n+\n+__pdoc__ = {\n+ \"accessibility\": False,\n+ \"async_base\": False,\n+ \"browser\": False,\n+ \"browser_context\": False,\n+ \"browser_type\": False,\n+ \"cdp_session\": False,\n+ \"chromium_browser_context\": False,\n+ \"connection\": False,\n+ \"console_message\": False,\n+ \"dialog\": False,\n+ \"download\": False,\n+ \"element_handle\": False,\n+ \"event_context_manager\": False,\n+ \"file_chooser\": False,\n+ \"frame\": False,\n+ \"helper\": False,\n+ \"impl_to_api_mapping\": False,\n+ \"input\": False,\n+ \"js_handle\": False,\n+ \"main\": False,\n+ \"network\": False,\n+ \"object_factory\": False,\n+ \"page\": False,\n+ \"path_utils\": False,\n+ \"playwright\": False,\n+ \"selectors\": False,\n+ \"sync_base\": False,\n+ \"transport\": False,\n+ \"wait_helper\": False,\n+ \"async_playwright\": False,\n+ \"sync_playwright\": False,\n+}\n", "issue": "Internal docs are published on the gh pages instead of the official API\nWe should remove everything that is not relevant to the end user!\n\nDocs: https://microsoft.github.io/playwright-python/sync_api.html\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport playwright.helper as helper\nfrom playwright.main import AsyncPlaywrightContextManager, SyncPlaywrightContextManager\n\nError = helper.Error\nTimeoutError = helper.TimeoutError\n\n\ndef async_playwright() -> AsyncPlaywrightContextManager:\n return AsyncPlaywrightContextManager()\n\n\ndef sync_playwright() -> SyncPlaywrightContextManager:\n return SyncPlaywrightContextManager()\n\n\n__all__ = [\n \"async_playwright\",\n \"sync_playwright\",\n \"Error\",\n \"TimeoutError\",\n]\n", "path": "playwright/__init__.py"}]} | 878 | 468 |
gh_patches_debug_6610 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3338 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider scooters_coffee is broken
During the global build at 2021-06-23-14-42-18, spider **scooters_coffee** failed with **324 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/logs/scooters_coffee.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/scooters_coffee.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/scooters_coffee.geojson))
</issue>
<code>
[start of locations/spiders/scooters_coffee.py]
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10 DAY_MAPPING = {'Mon': 'Mo', 'Tue': 'Tu',
11 'Wed': 'We', 'Thu': 'Th',
12 'Fri': 'Fr', 'Sat': 'Sa',
13 'Sun': 'Su'}
14
15
16 class ScootersCoffeeSpider(scrapy.Spider):
17 name = "scooters_coffee"
18 item_attributes = {'brand': "Scooter's Coffee"}
19 allowed_domains = ['code.metalocator.com']
20 download_delay = 0.5
21
22 def start_requests(self):
23 n = 327
24 for store_id in range(1, n+1):
25 url = f'https://code.metalocator.com/index.php?option=com_locator&view=location&tmpl=component&task=load&framed=1&sample_data=undefined&format=json&Itemid=12991&templ[]=item_address_template&lang=&_opt_out=&_urlparams=&distance=NaN&id={store_id}'
26
27 yield scrapy.Request(url=url, callback=self.parse)
28
29 def parse_hours(self, hours):
30 opening_hours = OpeningHours()
31
32 weekdays = re.findall(r'{(.*?)}', hours)
33 for weekday in weekdays:
34 day, open_close = weekday.split('|')
35 if open_close == 'C':
36 continue
37 else:
38 open_time, close_time = open_close.split('-')
39 opening_hours.add_range(day=DAY_MAPPING[day], open_time=open_time, close_time=close_time, time_format='%I:%M%p')
40
41 return opening_hours.as_opening_hours()
42
43 def parse(self, response):
44 store_data = json.loads(response.text)[0]
45 name = store_data['name']
46 if '*permanently closed' in name.lower():
47 pass
48 else: # Gather the store details
49
50 properties = {
51 'ref': store_data['id'],
52 'name': store_data['name'].strip(' *COMING SOON'),
53 'addr_full': store_data['address'],
54 'city': store_data['city'],
55 'state': store_data['state'],
56 'postcode': store_data['postalcode'],
57 'country': store_data['country'],
58 'lat': store_data['lat'],
59 'lon': store_data['lng'],
60 'phone': store_data['phone'],
61 'website': response.url
62 }
63
64 hours = store_data.get('hours', '')
65 if hours and hours != '{Sun|C}{Mon|C}{Tue|C}{Wed|C}{Thu|C}{Fri|C}{Sat|C}':
66 store_hours = self.parse_hours(hours)
67 properties["opening_hours"] = store_hours
68
69 yield GeojsonPointItem(**properties)
70
[end of locations/spiders/scooters_coffee.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/scooters_coffee.py b/locations/spiders/scooters_coffee.py
--- a/locations/spiders/scooters_coffee.py
+++ b/locations/spiders/scooters_coffee.py
@@ -35,6 +35,7 @@
if open_close == 'C':
continue
else:
+ open_close = open_close.replace(' ', '')
open_time, close_time = open_close.split('-')
opening_hours.add_range(day=DAY_MAPPING[day], open_time=open_time, close_time=close_time, time_format='%I:%M%p')
| {"golden_diff": "diff --git a/locations/spiders/scooters_coffee.py b/locations/spiders/scooters_coffee.py\n--- a/locations/spiders/scooters_coffee.py\n+++ b/locations/spiders/scooters_coffee.py\n@@ -35,6 +35,7 @@\n if open_close == 'C':\n continue\n else:\n+ open_close = open_close.replace(' ', '')\n open_time, close_time = open_close.split('-')\n opening_hours.add_range(day=DAY_MAPPING[day], open_time=open_time, close_time=close_time, time_format='%I:%M%p')\n", "issue": "Spider scooters_coffee is broken\nDuring the global build at 2021-06-23-14-42-18, spider **scooters_coffee** failed with **324 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/logs/scooters_coffee.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/scooters_coffee.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/scooters_coffee.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {'Mon': 'Mo', 'Tue': 'Tu',\n 'Wed': 'We', 'Thu': 'Th',\n 'Fri': 'Fr', 'Sat': 'Sa',\n 'Sun': 'Su'}\n\n\nclass ScootersCoffeeSpider(scrapy.Spider):\n name = \"scooters_coffee\"\n item_attributes = {'brand': \"Scooter's Coffee\"}\n allowed_domains = ['code.metalocator.com']\n download_delay = 0.5\n\n def start_requests(self):\n n = 327\n for store_id in range(1, n+1):\n url = f'https://code.metalocator.com/index.php?option=com_locator&view=location&tmpl=component&task=load&framed=1&sample_data=undefined&format=json&Itemid=12991&templ[]=item_address_template&lang=&_opt_out=&_urlparams=&distance=NaN&id={store_id}'\n \n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n weekdays = re.findall(r'{(.*?)}', hours)\n for weekday in weekdays:\n day, open_close = weekday.split('|')\n if open_close == 'C':\n continue\n else:\n open_time, close_time = open_close.split('-')\n opening_hours.add_range(day=DAY_MAPPING[day], open_time=open_time, close_time=close_time, time_format='%I:%M%p')\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n store_data = json.loads(response.text)[0]\n name = store_data['name']\n if '*permanently closed' in name.lower():\n pass\n else: # Gather the store details\n\n properties = {\n 'ref': store_data['id'],\n 'name': store_data['name'].strip(' *COMING SOON'),\n 'addr_full': store_data['address'],\n 'city': store_data['city'],\n 'state': store_data['state'],\n 'postcode': store_data['postalcode'],\n 'country': store_data['country'],\n 'lat': store_data['lat'],\n 'lon': store_data['lng'],\n 'phone': store_data['phone'],\n 'website': response.url\n }\n\n hours = store_data.get('hours', '')\n if hours and hours != '{Sun|C}{Mon|C}{Tue|C}{Wed|C}{Thu|C}{Fri|C}{Sat|C}':\n store_hours = self.parse_hours(hours)\n properties[\"opening_hours\"] = store_hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/scooters_coffee.py"}]} | 1,480 | 133 |
gh_patches_debug_40591 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-4802 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/inference/tensor_parallel/policies/llama.py]
1 from functools import partial
2
3 import torch
4 from transformers.models.llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaModel, LlamaRMSNorm
5
6 from colossalai.shardformer.layer import VocabParallelEmbedding1D
7 from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription
8 # import colossalai
9 from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy
10
11 from ..modeling.llama import LlamaInferenceForwards, get_llama_vllm_rmsnorm_forward
12
13 try:
14 from colossalai.kernel.triton import rmsnorm_forward
15
16 HAS_TRITON_RMSNORM = True
17 except:
18 print("you should install triton from https://github.com/openai/triton")
19 HAS_TRITON_RMSNORM = False
20
21
22 def get_triton_rmsnorm_forward():
23 if HAS_TRITON_RMSNORM:
24
25 def _triton_rmsnorm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor):
26 return rmsnorm_forward(hidden_states, self.weight.data, self.variance_epsilon)
27
28 return _triton_rmsnorm_forward
29 else:
30 return None
31
32
33 class LlamaModelInferPolicy(LlamaForCausalLMPolicy):
34 def __init__(self) -> None:
35 super().__init__()
36
37 def module_policy(self):
38 policy = super().module_policy()
39
40 if self.shard_config.inference_gptq:
41 from colossalai.inference.quant.gptq.cai_gptq import ColCaiQuantLinear, RowCaiQuantLinear
42
43 decoder_attribute_replacement = {
44 "self_attn.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size,
45 "self_attn.num_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size,
46 }
47 policy[LlamaDecoderLayer] = ModulePolicyDescription(
48 attribute_replacement=decoder_attribute_replacement,
49 sub_module_replacement=[
50 SubModuleReplacementDescription(
51 suffix="self_attn.q_proj",
52 target_module=ColCaiQuantLinear,
53 kwargs={'split_num': 1},
54 ),
55 SubModuleReplacementDescription(
56 suffix="self_attn.k_proj",
57 target_module=ColCaiQuantLinear,
58 kwargs={'split_num': 1},
59 ),
60 SubModuleReplacementDescription(
61 suffix="self_attn.v_proj",
62 target_module=ColCaiQuantLinear,
63 kwargs={'split_num': 1},
64 ),
65 SubModuleReplacementDescription(
66 suffix="self_attn.o_proj",
67 target_module=RowCaiQuantLinear,
68 kwargs={'split_num': 1},
69 ),
70 SubModuleReplacementDescription(
71 suffix="mlp.gate_proj",
72 target_module=ColCaiQuantLinear,
73 kwargs={'split_num': 1},
74 ),
75 SubModuleReplacementDescription(
76 suffix="mlp.up_proj",
77 target_module=ColCaiQuantLinear,
78 kwargs={'split_num': 1},
79 ),
80 SubModuleReplacementDescription(
81 suffix="mlp.down_proj",
82 target_module=RowCaiQuantLinear,
83 kwargs={'split_num': 1},
84 )
85 ],
86 )
87
88 self.shard_config._infer()
89
90 infer_forward = LlamaInferenceForwards.llama_model_forward
91 method_replacement = {"forward": partial(infer_forward)}
92 self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaModel)
93
94 infer_forward = LlamaInferenceForwards.llama_decoder_layer_forward
95 method_replacement = {"forward": partial(infer_forward)}
96 self.append_or_create_method_replacement(
97 description=method_replacement, policy=policy, target_key=LlamaDecoderLayer
98 )
99
100 infer_forward = LlamaInferenceForwards.llama_flash_attn_kvcache_forward
101 method_replacement = {"forward": partial(infer_forward)}
102 self.append_or_create_method_replacement(
103 description=method_replacement, policy=policy, target_key=LlamaAttention
104 )
105
106 infer_forward = None
107 if HAS_TRITON_RMSNORM:
108 infer_forward = get_triton_rmsnorm_forward()
109 else:
110 # NOTE: adding rms_norm from cuda kernels caused precision issue, fix @tiandiao123
111 infer_forward = get_llama_vllm_rmsnorm_forward()
112
113 if infer_forward is not None:
114 method_replacement = {"forward": partial(infer_forward)}
115 self.append_or_create_method_replacement(
116 description=method_replacement, policy=policy, target_key=LlamaRMSNorm
117 )
118
119 return policy
120
[end of colossalai/inference/tensor_parallel/policies/llama.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/inference/tensor_parallel/policies/llama.py b/colossalai/inference/tensor_parallel/policies/llama.py
--- a/colossalai/inference/tensor_parallel/policies/llama.py
+++ b/colossalai/inference/tensor_parallel/policies/llama.py
@@ -3,8 +3,8 @@
import torch
from transformers.models.llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaModel, LlamaRMSNorm
-from colossalai.shardformer.layer import VocabParallelEmbedding1D
-from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription
+from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription
+
# import colossalai
from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy
@@ -50,38 +50,38 @@
SubModuleReplacementDescription(
suffix="self_attn.q_proj",
target_module=ColCaiQuantLinear,
- kwargs={'split_num': 1},
+ kwargs={"split_num": 1},
),
SubModuleReplacementDescription(
suffix="self_attn.k_proj",
target_module=ColCaiQuantLinear,
- kwargs={'split_num': 1},
+ kwargs={"split_num": 1},
),
SubModuleReplacementDescription(
suffix="self_attn.v_proj",
target_module=ColCaiQuantLinear,
- kwargs={'split_num': 1},
+ kwargs={"split_num": 1},
),
SubModuleReplacementDescription(
suffix="self_attn.o_proj",
target_module=RowCaiQuantLinear,
- kwargs={'split_num': 1},
+ kwargs={"split_num": 1},
),
SubModuleReplacementDescription(
suffix="mlp.gate_proj",
target_module=ColCaiQuantLinear,
- kwargs={'split_num': 1},
+ kwargs={"split_num": 1},
),
SubModuleReplacementDescription(
suffix="mlp.up_proj",
target_module=ColCaiQuantLinear,
- kwargs={'split_num': 1},
+ kwargs={"split_num": 1},
),
SubModuleReplacementDescription(
suffix="mlp.down_proj",
target_module=RowCaiQuantLinear,
- kwargs={'split_num': 1},
- )
+ kwargs={"split_num": 1},
+ ),
],
)
| {"golden_diff": "diff --git a/colossalai/inference/tensor_parallel/policies/llama.py b/colossalai/inference/tensor_parallel/policies/llama.py\n--- a/colossalai/inference/tensor_parallel/policies/llama.py\n+++ b/colossalai/inference/tensor_parallel/policies/llama.py\n@@ -3,8 +3,8 @@\n import torch\n from transformers.models.llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaModel, LlamaRMSNorm\n \n-from colossalai.shardformer.layer import VocabParallelEmbedding1D\n-from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription\n+from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription\n+\n # import colossalai\n from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy\n \n@@ -50,38 +50,38 @@\n SubModuleReplacementDescription(\n suffix=\"self_attn.q_proj\",\n target_module=ColCaiQuantLinear,\n- kwargs={'split_num': 1},\n+ kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.k_proj\",\n target_module=ColCaiQuantLinear,\n- kwargs={'split_num': 1},\n+ kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.v_proj\",\n target_module=ColCaiQuantLinear,\n- kwargs={'split_num': 1},\n+ kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.o_proj\",\n target_module=RowCaiQuantLinear,\n- kwargs={'split_num': 1},\n+ kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.gate_proj\",\n target_module=ColCaiQuantLinear,\n- kwargs={'split_num': 1},\n+ kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.up_proj\",\n target_module=ColCaiQuantLinear,\n- kwargs={'split_num': 1},\n+ kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.down_proj\",\n target_module=RowCaiQuantLinear,\n- kwargs={'split_num': 1},\n- )\n+ kwargs={\"split_num\": 1},\n+ ),\n ],\n )\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from functools import partial\n\nimport torch\nfrom transformers.models.llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaModel, LlamaRMSNorm\n\nfrom colossalai.shardformer.layer import VocabParallelEmbedding1D\nfrom colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription\n# import colossalai\nfrom colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy\n\nfrom ..modeling.llama import LlamaInferenceForwards, get_llama_vllm_rmsnorm_forward\n\ntry:\n from colossalai.kernel.triton import rmsnorm_forward\n\n HAS_TRITON_RMSNORM = True\nexcept:\n print(\"you should install triton from https://github.com/openai/triton\")\n HAS_TRITON_RMSNORM = False\n\n\ndef get_triton_rmsnorm_forward():\n if HAS_TRITON_RMSNORM:\n\n def _triton_rmsnorm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor):\n return rmsnorm_forward(hidden_states, self.weight.data, self.variance_epsilon)\n\n return _triton_rmsnorm_forward\n else:\n return None\n\n\nclass LlamaModelInferPolicy(LlamaForCausalLMPolicy):\n def __init__(self) -> None:\n super().__init__()\n\n def module_policy(self):\n policy = super().module_policy()\n\n if self.shard_config.inference_gptq:\n from colossalai.inference.quant.gptq.cai_gptq import ColCaiQuantLinear, RowCaiQuantLinear\n\n decoder_attribute_replacement = {\n \"self_attn.hidden_size\": self.model.config.hidden_size // self.shard_config.tensor_parallel_size,\n \"self_attn.num_heads\": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size,\n }\n policy[LlamaDecoderLayer] = ModulePolicyDescription(\n attribute_replacement=decoder_attribute_replacement,\n sub_module_replacement=[\n SubModuleReplacementDescription(\n suffix=\"self_attn.q_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={'split_num': 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.k_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={'split_num': 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.v_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={'split_num': 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.o_proj\",\n target_module=RowCaiQuantLinear,\n kwargs={'split_num': 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.gate_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={'split_num': 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.up_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={'split_num': 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.down_proj\",\n target_module=RowCaiQuantLinear,\n kwargs={'split_num': 1},\n )\n ],\n )\n\n self.shard_config._infer()\n\n infer_forward = LlamaInferenceForwards.llama_model_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaModel)\n\n infer_forward = LlamaInferenceForwards.llama_decoder_layer_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n description=method_replacement, policy=policy, target_key=LlamaDecoderLayer\n )\n\n infer_forward = LlamaInferenceForwards.llama_flash_attn_kvcache_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n description=method_replacement, policy=policy, target_key=LlamaAttention\n )\n\n infer_forward = None\n if HAS_TRITON_RMSNORM:\n infer_forward = get_triton_rmsnorm_forward()\n else:\n # NOTE: adding rms_norm from cuda kernels caused precision issue, fix @tiandiao123\n infer_forward = get_llama_vllm_rmsnorm_forward()\n\n if infer_forward is not None:\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n description=method_replacement, policy=policy, target_key=LlamaRMSNorm\n )\n\n return policy\n", "path": "colossalai/inference/tensor_parallel/policies/llama.py"}]} | 1,830 | 549 |
gh_patches_debug_47928 | rasdani/github-patches | git_diff | uccser__cs-unplugged-862 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Only prepend www for production website
It should not be used for development website.
</issue>
<code>
[start of csunplugged/config/settings/production.py]
1 # -*- coding: utf-8 -*-
2 """
3 Django settings for production environment.
4
5 - Load secret values from environment variables.
6 - Set static URL to Google Cloud Storage Bucket.
7 """
8
9 from .base import * # noqa: F403
10
11
12 # SECRET CONFIGURATION
13 # ------------------------------------------------------------------------------
14 # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
15 # Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
16 SECRET_KEY = env("DJANGO_SECRET_KEY") # noqa: F405
17
18 # SECURITY WARNING: App Engine"s security features ensure that it is safe to
19 # have ALLOWED_HOSTS = ["*"] when the app is deployed. If you deploy a Django
20 # app not on App Engine, make sure to set an appropriate host here.
21 # See https://docs.djangoproject.com/en/1.10/ref/settings/
22 ALLOWED_HOSTS = ["*"]
23
24 # URL Configuration
25 # ------------------------------------------------------------------------------
26 PREPEND_WWW = True
27
28 # DATABASE CONFIGURATION
29 # ----------------------------------------------------------------------------
30 # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
31 DATABASES = {
32 "default": {
33 "ENGINE": "django.db.backends.postgresql",
34 "NAME": "csunplugged",
35 "USER": env("GOOGLE_CLOUD_SQL_DATABASE_USERNAME"), # noqa: F405
36 "PASSWORD": env("GOOGLE_CLOUD_SQL_DATABASE_PASSWORD"), # noqa: F405
37 "HOST": "/cloudsql/" + env("GOOGLE_CLOUD_SQL_CONNECTION_NAME"), # noqa: F405
38 }
39 }
40 DATABASES["default"]["ATOMIC_REQUESTS"] = True
41
42 # Static files
43 STATIC_URL = "https://storage.googleapis.com/" + env("GOOGLE_CLOUD_STORAGE_BUCKET_NAME") + "/static/" # noqa: F405
44
45 # SECURITY CONFIGURATION
46 # ------------------------------------------------------------------------------
47 # See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security
48 # and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy
49
50 # set this to 60 seconds and then to 518400 when you can prove it works
51 SECURE_HSTS_SECONDS = 60
52 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
53 SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True) # noqa: F405
54 SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True) # noqa: F405
55 SECURE_BROWSER_XSS_FILTER = True
56 SESSION_COOKIE_SECURE = True
57 SESSION_COOKIE_HTTPONLY = True
58 SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True) # noqa: F405
59 CSRF_COOKIE_SECURE = True
60 CSRF_COOKIE_HTTPONLY = True
61 X_FRAME_OPTIONS = "DENY"
62
[end of csunplugged/config/settings/production.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/csunplugged/config/settings/production.py b/csunplugged/config/settings/production.py
--- a/csunplugged/config/settings/production.py
+++ b/csunplugged/config/settings/production.py
@@ -23,7 +23,10 @@
# URL Configuration
# ------------------------------------------------------------------------------
-PREPEND_WWW = True
+if env("DEPLOYMENT", default=None) == "prod": # noqa: F405
+ PREPEND_WWW = True
+else:
+ PREPEND_WWW = False
# DATABASE CONFIGURATION
# ----------------------------------------------------------------------------
| {"golden_diff": "diff --git a/csunplugged/config/settings/production.py b/csunplugged/config/settings/production.py\n--- a/csunplugged/config/settings/production.py\n+++ b/csunplugged/config/settings/production.py\n@@ -23,7 +23,10 @@\n \n # URL Configuration\n # ------------------------------------------------------------------------------\n-PREPEND_WWW = True\n+if env(\"DEPLOYMENT\", default=None) == \"prod\": # noqa: F405\n+ PREPEND_WWW = True\n+else:\n+ PREPEND_WWW = False\n \n # DATABASE CONFIGURATION\n # ----------------------------------------------------------------------------\n", "issue": "Only prepend www for production website\nIt should not be used for development website.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for production environment.\n\n- Load secret values from environment variables.\n- Set static URL to Google Cloud Storage Bucket.\n\"\"\"\n\nfrom .base import * # noqa: F403\n\n\n# SECRET CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\") # noqa: F405\n\n# SECURITY WARNING: App Engine\"s security features ensure that it is safe to\n# have ALLOWED_HOSTS = [\"*\"] when the app is deployed. If you deploy a Django\n# app not on App Engine, make sure to set an appropriate host here.\n# See https://docs.djangoproject.com/en/1.10/ref/settings/\nALLOWED_HOSTS = [\"*\"]\n\n# URL Configuration\n# ------------------------------------------------------------------------------\nPREPEND_WWW = True\n\n# DATABASE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": \"csunplugged\",\n \"USER\": env(\"GOOGLE_CLOUD_SQL_DATABASE_USERNAME\"), # noqa: F405\n \"PASSWORD\": env(\"GOOGLE_CLOUD_SQL_DATABASE_PASSWORD\"), # noqa: F405\n \"HOST\": \"/cloudsql/\" + env(\"GOOGLE_CLOUD_SQL_CONNECTION_NAME\"), # noqa: F405\n }\n}\nDATABASES[\"default\"][\"ATOMIC_REQUESTS\"] = True\n\n# Static files\nSTATIC_URL = \"https://storage.googleapis.com/\" + env(\"GOOGLE_CLOUD_STORAGE_BUCKET_NAME\") + \"/static/\" # noqa: F405\n\n# SECURITY CONFIGURATION\n# ------------------------------------------------------------------------------\n# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security\n# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy\n\n# set this to 60 seconds and then to 518400 when you can prove it works\nSECURE_HSTS_SECONDS = 60\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS\", default=True) # noqa: F405\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool(\"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF\", default=True) # noqa: F405\nSECURE_BROWSER_XSS_FILTER = True\nSESSION_COOKIE_SECURE = True\nSESSION_COOKIE_HTTPONLY = True\nSECURE_SSL_REDIRECT = env.bool(\"DJANGO_SECURE_SSL_REDIRECT\", default=True) # noqa: F405\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nX_FRAME_OPTIONS = \"DENY\"\n", "path": "csunplugged/config/settings/production.py"}]} | 1,307 | 130 |
gh_patches_debug_6831 | rasdani/github-patches | git_diff | wagtail__wagtail-6104 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo in wagtail/core/middleware.py warning
The second line of the warning in `wagtail/core/middleware.py` should read:
` 'Please update your code to use Site.find_for_request(request) in place of request.site, '`
instead of:
` 'Please update your code to use Site.get_for_request(request) in place of request.site, '`
</issue>
<code>
[start of wagtail/core/middleware.py]
1 import warnings
2
3 from django.utils.deprecation import MiddlewareMixin
4 from wagtail.core.models import Site
5 from wagtail.utils.deprecation import RemovedInWagtail211Warning
6
7
8 warnings.warn(
9 'wagtail.core.middleware.SiteMiddleware and the use of request.site is deprecated. '
10 'Please update your code to use Site.get_for_request(request) in place of request.site, '
11 'and remove wagtail.core.middleware.SiteMiddleware from MIDDLEWARES',
12 RemovedInWagtail211Warning
13 )
14
15
16 class SiteMiddleware(MiddlewareMixin):
17 def process_request(self, request):
18 """
19 Set request.site to contain the Site object responsible for handling this request,
20 according to hostname matching rules
21 """
22 try:
23 request.site = Site.find_for_request(request)
24 except Site.DoesNotExist:
25 request.site = None
26
[end of wagtail/core/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/core/middleware.py b/wagtail/core/middleware.py
--- a/wagtail/core/middleware.py
+++ b/wagtail/core/middleware.py
@@ -7,7 +7,7 @@
warnings.warn(
'wagtail.core.middleware.SiteMiddleware and the use of request.site is deprecated. '
- 'Please update your code to use Site.get_for_request(request) in place of request.site, '
+ 'Please update your code to use Site.find_for_request(request) in place of request.site, '
'and remove wagtail.core.middleware.SiteMiddleware from MIDDLEWARES',
RemovedInWagtail211Warning
)
| {"golden_diff": "diff --git a/wagtail/core/middleware.py b/wagtail/core/middleware.py\n--- a/wagtail/core/middleware.py\n+++ b/wagtail/core/middleware.py\n@@ -7,7 +7,7 @@\n \n warnings.warn(\n 'wagtail.core.middleware.SiteMiddleware and the use of request.site is deprecated. '\n- 'Please update your code to use Site.get_for_request(request) in place of request.site, '\n+ 'Please update your code to use Site.find_for_request(request) in place of request.site, '\n 'and remove wagtail.core.middleware.SiteMiddleware from MIDDLEWARES',\n RemovedInWagtail211Warning\n )\n", "issue": "Typo in wagtail/core/middleware.py warning\nThe second line of the warning in `wagtail/core/middleware.py` should read:\r\n\r\n` 'Please update your code to use Site.find_for_request(request) in place of request.site, '`\r\n\r\ninstead of:\r\n\r\n` 'Please update your code to use Site.get_for_request(request) in place of request.site, '`\r\n\n", "before_files": [{"content": "import warnings\n\nfrom django.utils.deprecation import MiddlewareMixin\nfrom wagtail.core.models import Site\nfrom wagtail.utils.deprecation import RemovedInWagtail211Warning\n\n\nwarnings.warn(\n 'wagtail.core.middleware.SiteMiddleware and the use of request.site is deprecated. '\n 'Please update your code to use Site.get_for_request(request) in place of request.site, '\n 'and remove wagtail.core.middleware.SiteMiddleware from MIDDLEWARES',\n RemovedInWagtail211Warning\n)\n\n\nclass SiteMiddleware(MiddlewareMixin):\n def process_request(self, request):\n \"\"\"\n Set request.site to contain the Site object responsible for handling this request,\n according to hostname matching rules\n \"\"\"\n try:\n request.site = Site.find_for_request(request)\n except Site.DoesNotExist:\n request.site = None\n", "path": "wagtail/core/middleware.py"}]} | 839 | 146 |
gh_patches_debug_15848 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1588 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: date time issue
### I Have A Problem With:
The integration in general
### What's Your Problem
Used to work. No change in cfg.
At some point, after an upgrade…
Integration fails while starting. See log.
### Source (if relevant)
_No response_
### Logs
```Shell
Denne feilen stammer fra en tilpasset integrasjon.
Logger: waste_collection_schedule.source_shell
Source: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136
Integration: waste_collection_schedule (documentation)
First occurred: 19:02:11 (1 occurrences)
Last logged: 19:02:11
fetch failed for source Stavanger Kommune: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py", line 63, in fetch date = datetime.strptime(date[0] + "." + year, "%d.%m.%Y").date() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/_strptime.py", line 568, in _strptime_datetime tt, fraction, gmtoff_fraction = _strptime(data_string, format) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/_strptime.py", line 534, in _strptime julian = datetime_date(year, month, day).toordinal() - \ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ValueError: day is out of range for month
```
### Relevant Configuration
```YAML
name: stavanger_no
municipality: Stavanger
```
### Checklist Source Error
- [ ] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [ ] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py]
1 from datetime import datetime
2
3 import requests
4 from bs4 import BeautifulSoup
5 from waste_collection_schedule import Collection # type: ignore[attr-defined]
6
7 TITLE = "Stavanger Kommune"
8 DESCRIPTION = "Source for Stavanger Kommune, Norway"
9 URL = "https://www.stavanger.kommune.no/"
10 TEST_CASES = {
11 "TestcaseI": {
12 "id": "57bf9d36-722e-400b-ae93-d80f8e354724",
13 "municipality": "Stavanger",
14 "gnumber": "57",
15 "bnumber": "922",
16 "snumber": "0",
17 },
18 }
19
20 ICON_MAP = {
21 "Restavfall": "mdi:trash-can",
22 "Papp/papir": "mdi:recycle",
23 "Bio": "mdi:leaf",
24 "Juletre": "mdi:pine-tree",
25 }
26
27
28 class Source:
29 def __init__(self, id, municipality, gnumber, bnumber, snumber):
30 self._id = id
31 self._municipality = municipality
32 self._gnumber = gnumber
33 self._bnumber = bnumber
34 self._snumber = snumber
35
36 def fetch(self):
37 url = "https://www.stavanger.kommune.no/renovasjon-og-miljo/tommekalender/finn-kalender/show"
38 headers = {"referer": "https://www.stavanger.kommune.no"}
39
40 params = {
41 "id": self._id,
42 "municipality": self._municipality,
43 "gnumber": self._gnumber,
44 "bnumber": self._bnumber,
45 "snumber": self._snumber,
46 }
47
48 r = requests.get(url, params=params, headers=headers)
49 r.raise_for_status()
50
51 soup = BeautifulSoup(r.text, "html.parser")
52
53 tag = soup.find_all("option")
54 year = tag[0].get("value").split("-")
55 year = year[1]
56
57 entries = []
58 for tag in soup.find_all("tr", {"class": "waste-calendar__item"}):
59 if tag.text.strip() == "Dato og dag\nAvfallstype":
60 continue
61
62 date = tag.text.strip().split(" - ")
63 date = datetime.strptime(date[0] + "." + year, "%d.%m.%Y").date()
64
65 for img in tag.find_all("img"):
66 waste_type = img.get("title")
67 entries.append(
68 Collection(date, waste_type, icon=ICON_MAP.get(waste_type))
69 )
70
71 return entries
72
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py
@@ -51,14 +51,12 @@
soup = BeautifulSoup(r.text, "html.parser")
tag = soup.find_all("option")
- year = tag[0].get("value").split("-")
- year = year[1]
-
entries = []
for tag in soup.find_all("tr", {"class": "waste-calendar__item"}):
if tag.text.strip() == "Dato og dag\nAvfallstype":
continue
+ year = tag.parent.attrs["data-month"].split("-")[1]
date = tag.text.strip().split(" - ")
date = datetime.strptime(date[0] + "." + year, "%d.%m.%Y").date()
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py\n@@ -51,14 +51,12 @@\n soup = BeautifulSoup(r.text, \"html.parser\")\n \n tag = soup.find_all(\"option\")\n- year = tag[0].get(\"value\").split(\"-\")\n- year = year[1]\n-\n entries = []\n for tag in soup.find_all(\"tr\", {\"class\": \"waste-calendar__item\"}):\n if tag.text.strip() == \"Dato og dag\\nAvfallstype\":\n continue\n \n+ year = tag.parent.attrs[\"data-month\"].split(\"-\")[1]\n date = tag.text.strip().split(\" - \")\n date = datetime.strptime(date[0] + \".\" + year, \"%d.%m.%Y\").date()\n", "issue": "[Bug]: date time issue\n### I Have A Problem With:\n\nThe integration in general\n\n### What's Your Problem\n\nUsed to work. No change in cfg.\r\nAt some point, after an upgrade\u2026\r\nIntegration fails while starting. See log.\n\n### Source (if relevant)\n\n_No response_\n\n### Logs\n\n```Shell\nDenne feilen stammer fra en tilpasset integrasjon.\r\n\r\nLogger: waste_collection_schedule.source_shell\r\nSource: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136\r\nIntegration: waste_collection_schedule (documentation)\r\nFirst occurred: 19:02:11 (1 occurrences)\r\nLast logged: 19:02:11\r\n\r\nfetch failed for source Stavanger Kommune: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py\", line 63, in fetch date = datetime.strptime(date[0] + \".\" + year, \"%d.%m.%Y\").date() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/usr/local/lib/python3.11/_strptime.py\", line 568, in _strptime_datetime tt, fraction, gmtoff_fraction = _strptime(data_string, format) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/usr/local/lib/python3.11/_strptime.py\", line 534, in _strptime julian = datetime_date(year, month, day).toordinal() - \\ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ValueError: day is out of range for month\n```\n\n\n### Relevant Configuration\n\n```YAML\nname: stavanger_no\r\n\r\nmunicipality: Stavanger\n```\n\n\n### Checklist Source Error\n\n- [ ] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [ ] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Stavanger Kommune\"\nDESCRIPTION = \"Source for Stavanger Kommune, Norway\"\nURL = \"https://www.stavanger.kommune.no/\"\nTEST_CASES = {\n \"TestcaseI\": {\n \"id\": \"57bf9d36-722e-400b-ae93-d80f8e354724\",\n \"municipality\": \"Stavanger\",\n \"gnumber\": \"57\",\n \"bnumber\": \"922\",\n \"snumber\": \"0\",\n },\n}\n\nICON_MAP = {\n \"Restavfall\": \"mdi:trash-can\",\n \"Papp/papir\": \"mdi:recycle\",\n \"Bio\": \"mdi:leaf\",\n \"Juletre\": \"mdi:pine-tree\",\n}\n\n\nclass Source:\n def __init__(self, id, municipality, gnumber, bnumber, snumber):\n self._id = id\n self._municipality = municipality\n self._gnumber = gnumber\n self._bnumber = bnumber\n self._snumber = snumber\n\n def fetch(self):\n url = \"https://www.stavanger.kommune.no/renovasjon-og-miljo/tommekalender/finn-kalender/show\"\n headers = {\"referer\": \"https://www.stavanger.kommune.no\"}\n\n params = {\n \"id\": self._id,\n \"municipality\": self._municipality,\n \"gnumber\": self._gnumber,\n \"bnumber\": self._bnumber,\n \"snumber\": self._snumber,\n }\n\n r = requests.get(url, params=params, headers=headers)\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n tag = soup.find_all(\"option\")\n year = tag[0].get(\"value\").split(\"-\")\n year = year[1]\n\n entries = []\n for tag in soup.find_all(\"tr\", {\"class\": \"waste-calendar__item\"}):\n if tag.text.strip() == \"Dato og dag\\nAvfallstype\":\n continue\n\n date = tag.text.strip().split(\" - \")\n date = datetime.strptime(date[0] + \".\" + year, \"%d.%m.%Y\").date()\n\n for img in tag.find_all(\"img\"):\n waste_type = img.get(\"title\")\n entries.append(\n Collection(date, waste_type, icon=ICON_MAP.get(waste_type))\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/stavanger_no.py"}]} | 1,889 | 231 |
gh_patches_debug_30563 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1176 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add axis labels to pyhf.contrib.viz.brazil.plot_results
# Description
We know that the axis labels for [`pyhf.contrib.viz.brazil.plot_results`](https://github.com/scikit-hep/pyhf/blob/28fdfe95a3a4846ba70a9a338b3f72a94eac1322/src/pyhf/contrib/viz/brazil.py#L5) are always going to be the same, so we should just add them on there as
```python
ax.set_xlabel(r"$\mu$")
ax.set_ylabel(r"$\mathrm{CL}_{s}$")
```
</issue>
<code>
[start of src/pyhf/contrib/viz/brazil.py]
1 """Brazil Band Plots."""
2 import numpy as np
3
4
5 def plot_results(ax, mutests, tests, test_size=0.05):
6 """Plot a series of hypothesis tests for various POI values."""
7 cls_obs = np.array([test[0] for test in tests]).flatten()
8 cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)]
9 ax.plot(mutests, cls_obs, c='black')
10 for idx, color in zip(range(5), 5 * ['black']):
11 ax.plot(
12 mutests, cls_exp[idx], c=color, linestyle='dotted' if idx != 2 else 'dashed'
13 )
14 ax.fill_between(mutests, cls_exp[0], cls_exp[-1], facecolor='yellow')
15 ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='green')
16 ax.plot(mutests, [test_size] * len(mutests), c='red')
17 ax.set_ylim(0, 1)
18
[end of src/pyhf/contrib/viz/brazil.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pyhf/contrib/viz/brazil.py b/src/pyhf/contrib/viz/brazil.py
--- a/src/pyhf/contrib/viz/brazil.py
+++ b/src/pyhf/contrib/viz/brazil.py
@@ -3,7 +3,37 @@
def plot_results(ax, mutests, tests, test_size=0.05):
- """Plot a series of hypothesis tests for various POI values."""
+ """
+ Plot a series of hypothesis tests for various POI values.
+
+ Example:
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> import pyhf
+ >>> import pyhf.contrib.viz.brazil
+ >>> pyhf.set_backend("numpy")
+ >>> model = pyhf.simplemodels.hepdata_like(
+ ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
+ ... )
+ >>> observations = [51, 48]
+ >>> data = observations + model.config.auxdata
+ >>> poi_vals = np.linspace(0, 5, 41)
+ >>> results = [
+ ... pyhf.infer.hypotest(test_poi, data, model, return_expected_set=True)
+ ... for test_poi in poi_vals
+ ... ]
+ >>> fig, ax = plt.subplots()
+ >>> pyhf.contrib.viz.brazil.plot_results(ax, poi_vals, results)
+
+ Args:
+ ax (`matplotlib.axes.Axes`): The matplotlib axis object to plot on.
+ mutests (:obj:`list` or :obj:`array`): The values of the POI where the
+ hypothesis tests were performed.
+ tests (:obj:`list` or :obj:`array`): The :math:$\\mathrm{CL}_{s}$ values
+ from the hypothesis tests.
+ test_size (:obj:`float`): The size, :math:$\alpha$, of the test.
+ """
cls_obs = np.array([test[0] for test in tests]).flatten()
cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)]
ax.plot(mutests, cls_obs, c='black')
@@ -15,3 +45,6 @@
ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='green')
ax.plot(mutests, [test_size] * len(mutests), c='red')
ax.set_ylim(0, 1)
+
+ ax.set_xlabel(r"$\mu$ (POI)")
+ ax.set_ylabel(r"$\mathrm{CL}_{s}$")
| {"golden_diff": "diff --git a/src/pyhf/contrib/viz/brazil.py b/src/pyhf/contrib/viz/brazil.py\n--- a/src/pyhf/contrib/viz/brazil.py\n+++ b/src/pyhf/contrib/viz/brazil.py\n@@ -3,7 +3,37 @@\n \n \n def plot_results(ax, mutests, tests, test_size=0.05):\n- \"\"\"Plot a series of hypothesis tests for various POI values.\"\"\"\n+ \"\"\"\n+ Plot a series of hypothesis tests for various POI values.\n+\n+ Example:\n+\n+ >>> import numpy as np\n+ >>> import matplotlib.pyplot as plt\n+ >>> import pyhf\n+ >>> import pyhf.contrib.viz.brazil\n+ >>> pyhf.set_backend(\"numpy\")\n+ >>> model = pyhf.simplemodels.hepdata_like(\n+ ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n+ ... )\n+ >>> observations = [51, 48]\n+ >>> data = observations + model.config.auxdata\n+ >>> poi_vals = np.linspace(0, 5, 41)\n+ >>> results = [\n+ ... pyhf.infer.hypotest(test_poi, data, model, return_expected_set=True)\n+ ... for test_poi in poi_vals\n+ ... ]\n+ >>> fig, ax = plt.subplots()\n+ >>> pyhf.contrib.viz.brazil.plot_results(ax, poi_vals, results)\n+\n+ Args:\n+ ax (`matplotlib.axes.Axes`): The matplotlib axis object to plot on.\n+ mutests (:obj:`list` or :obj:`array`): The values of the POI where the\n+ hypothesis tests were performed.\n+ tests (:obj:`list` or :obj:`array`): The :math:$\\\\mathrm{CL}_{s}$ values\n+ from the hypothesis tests.\n+ test_size (:obj:`float`): The size, :math:$\\alpha$, of the test.\n+ \"\"\"\n cls_obs = np.array([test[0] for test in tests]).flatten()\n cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)]\n ax.plot(mutests, cls_obs, c='black')\n@@ -15,3 +45,6 @@\n ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='green')\n ax.plot(mutests, [test_size] * len(mutests), c='red')\n ax.set_ylim(0, 1)\n+\n+ ax.set_xlabel(r\"$\\mu$ (POI)\")\n+ ax.set_ylabel(r\"$\\mathrm{CL}_{s}$\")\n", "issue": "Add axis labels to pyhf.contrib.viz.brazil.plot_results\n# Description\r\n\r\nWe know that the axis labels for [`pyhf.contrib.viz.brazil.plot_results`](https://github.com/scikit-hep/pyhf/blob/28fdfe95a3a4846ba70a9a338b3f72a94eac1322/src/pyhf/contrib/viz/brazil.py#L5) are always going to be the same, so we should just add them on there as\r\n\r\n```python\r\nax.set_xlabel(r\"$\\mu$\")\r\nax.set_ylabel(r\"$\\mathrm{CL}_{s}$\")\r\n```\n", "before_files": [{"content": "\"\"\"Brazil Band Plots.\"\"\"\nimport numpy as np\n\n\ndef plot_results(ax, mutests, tests, test_size=0.05):\n \"\"\"Plot a series of hypothesis tests for various POI values.\"\"\"\n cls_obs = np.array([test[0] for test in tests]).flatten()\n cls_exp = [np.array([test[1][i] for test in tests]).flatten() for i in range(5)]\n ax.plot(mutests, cls_obs, c='black')\n for idx, color in zip(range(5), 5 * ['black']):\n ax.plot(\n mutests, cls_exp[idx], c=color, linestyle='dotted' if idx != 2 else 'dashed'\n )\n ax.fill_between(mutests, cls_exp[0], cls_exp[-1], facecolor='yellow')\n ax.fill_between(mutests, cls_exp[1], cls_exp[-2], facecolor='green')\n ax.plot(mutests, [test_size] * len(mutests), c='red')\n ax.set_ylim(0, 1)\n", "path": "src/pyhf/contrib/viz/brazil.py"}]} | 941 | 619 |
gh_patches_debug_64571 | rasdani/github-patches | git_diff | cocotb__cocotb-1145 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Packaging: Add python_requires to manifest
Define our Python version requirements in our package manifest, as described here: https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 ###############################################################################
3 # Copyright (c) 2013 Potential Ventures Ltd
4 # Copyright (c) 2013 SolarFlare Communications Inc
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above copyright
12 # notice, this list of conditions and the following disclaimer in the
13 # documentation and/or other materials provided with the distribution.
14 # * Neither the name of Potential Ventures Ltd,
15 # SolarFlare Communications Inc nor the
16 # names of its contributors may be used to endorse or promote products
17 # derived from this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
20 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
23 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 ###############################################################################
30
31 from setuptools import setup
32 from setuptools import find_packages
33 from os import path, walk
34
35 def read_file(fname):
36 return open(path.join(path.dirname(__file__), fname)).read()
37
38 def package_files(directory):
39 paths = []
40 for (fpath, directories, filenames) in walk(directory):
41 for filename in filenames:
42 paths.append(path.join('..', fpath, filename))
43 return paths
44
45 version = read_file('version')[8:].strip()
46
47 setup(
48 name='cocotb',
49 version=version,
50 description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',
51 url='https://github.com/potentialventures/cocotb',
52 license='BSD',
53 long_description=read_file('README.md'),
54 long_description_content_type='text/markdown',
55 author='Chris Higgs, Stuart Hodgson',
56 author_email='[email protected]',
57 install_requires=[],
58 packages=find_packages(),
59 include_package_data=True,
60 package_data={'cocotb': package_files('cocotb/share')},
61 entry_points={
62 'console_scripts': [
63 'cocotb-config=cocotb.config:main',
64 ]
65 },
66 platforms='any',
67 classifiers=[
68 "Programming Language :: Python :: 2.7",
69 "Programming Language :: Python :: 3",
70 "License :: OSI Approved :: BSD License",
71 "Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
72 ],
73 )
74
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
old mode 100644
new mode 100755
--- a/setup.py
+++ b/setup.py
@@ -55,6 +55,7 @@
author='Chris Higgs, Stuart Hodgson',
author_email='[email protected]',
install_requires=[],
+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
packages=find_packages(),
include_package_data=True,
package_data={'cocotb': package_files('cocotb/share')},
| {"golden_diff": "diff --git a/setup.py b/setup.py\nold mode 100644\nnew mode 100755\n--- a/setup.py\n+++ b/setup.py\n@@ -55,6 +55,7 @@\n author='Chris Higgs, Stuart Hodgson',\n author_email='[email protected]',\n install_requires=[],\n+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n packages=find_packages(),\n include_package_data=True,\n package_data={'cocotb': package_files('cocotb/share')},\n", "issue": "Packaging: Add python_requires to manifest\nDefine our Python version requirements in our package manifest, as described here: https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires\n", "before_files": [{"content": "#!/usr/bin/env python\n###############################################################################\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom os import path, walk\n\ndef read_file(fname):\n return open(path.join(path.dirname(__file__), fname)).read()\n\ndef package_files(directory):\n paths = []\n for (fpath, directories, filenames) in walk(directory):\n for filename in filenames:\n paths.append(path.join('..', fpath, filename))\n return paths\n\nversion = read_file('version')[8:].strip()\n\nsetup(\n name='cocotb',\n version=version,\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n url='https://github.com/potentialventures/cocotb',\n license='BSD',\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n author='Chris Higgs, Stuart Hodgson',\n author_email='[email protected]',\n install_requires=[],\n packages=find_packages(),\n include_package_data=True,\n package_data={'cocotb': package_files('cocotb/share')},\n entry_points={\n 'console_scripts': [\n 'cocotb-config=cocotb.config:main',\n ]\n },\n platforms='any',\n classifiers=[\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n ],\n)\n", "path": "setup.py"}]} | 1,385 | 146 |
gh_patches_debug_24079 | rasdani/github-patches | git_diff | TheAlgorithms__Python-9005 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
improvement to insertion_sort algorithm
### Feature description
I was about to make a PR to improve the implementation of [insertion_sort algorithm](https://github.com/TheAlgorithms/Python/blob/master/sorts/insertion_sort.py) but since there might be multiple ways of doing so, I thought I should first ask your opinions.
These are the things that need improvements:
1. We unnecessarily create a whole new copy of the list: `enumerate(collection[1:])`.
We can either use "indexes" to avoid this which is not very pythonic, or we can use the iterator of the list using `iter()` and throw away the first item using `next()`. In second case we have to either check for empty list first or wrap it in a try-except block. I'll go with indexes if you ask. What do you think?
2. I think a function should either mutate the list in-place and returns `None`, or it should create new sorted list without modifying the original list. Mutating the list and returning the mutated list is not what most developers expect to see. What do you think?
3. We can safely remove `if insert_index != temp_index:` condition and unindent its body. Assigning an item to an index of a list is not costly. So it's one less line in general.
improvement to insertion_sort algorithm
### Feature description
I was about to make a PR to improve the implementation of [insertion_sort algorithm](https://github.com/TheAlgorithms/Python/blob/master/sorts/insertion_sort.py) but since there might be multiple ways of doing so, I thought I should first ask your opinions.
These are the things that need improvements:
1. We unnecessarily create a whole new copy of the list: `enumerate(collection[1:])`.
We can either use "indexes" to avoid this which is not very pythonic, or we can use the iterator of the list using `iter()` and throw away the first item using `next()`. In second case we have to either check for empty list first or wrap it in a try-except block. I'll go with indexes if you ask. What do you think?
2. I think a function should either mutate the list in-place and returns `None`, or it should create new sorted list without modifying the original list. Mutating the list and returning the mutated list is not what most developers expect to see. What do you think?
3. We can safely remove `if insert_index != temp_index:` condition and unindent its body. Assigning an item to an index of a list is not costly. So it's one less line in general.
</issue>
<code>
[start of sorts/insertion_sort.py]
1 """
2 A pure Python implementation of the insertion sort algorithm
3
4 This algorithm sorts a collection by comparing adjacent elements.
5 When it finds that order is not respected, it moves the element compared
6 backward until the order is correct. It then goes back directly to the
7 element's initial position resuming forward comparison.
8
9 For doctests run following command:
10 python3 -m doctest -v insertion_sort.py
11
12 For manual testing run:
13 python3 insertion_sort.py
14 """
15
16
17 def insertion_sort(collection: list) -> list:
18 """A pure Python implementation of the insertion sort algorithm
19
20 :param collection: some mutable ordered collection with heterogeneous
21 comparable items inside
22 :return: the same collection ordered by ascending
23
24 Examples:
25 >>> insertion_sort([0, 5, 3, 2, 2])
26 [0, 2, 2, 3, 5]
27 >>> insertion_sort([]) == sorted([])
28 True
29 >>> insertion_sort([-2, -5, -45]) == sorted([-2, -5, -45])
30 True
31 >>> insertion_sort(['d', 'a', 'b', 'e', 'c']) == sorted(['d', 'a', 'b', 'e', 'c'])
32 True
33 >>> import random
34 >>> collection = random.sample(range(-50, 50), 100)
35 >>> insertion_sort(collection) == sorted(collection)
36 True
37 >>> import string
38 >>> collection = random.choices(string.ascii_letters + string.digits, k=100)
39 >>> insertion_sort(collection) == sorted(collection)
40 True
41 """
42
43 for insert_index, insert_value in enumerate(collection[1:]):
44 temp_index = insert_index
45 while insert_index >= 0 and insert_value < collection[insert_index]:
46 collection[insert_index + 1] = collection[insert_index]
47 insert_index -= 1
48 if insert_index != temp_index:
49 collection[insert_index + 1] = insert_value
50 return collection
51
52
53 if __name__ == "__main__":
54 from doctest import testmod
55
56 testmod()
57
58 user_input = input("Enter numbers separated by a comma:\n").strip()
59 unsorted = [int(item) for item in user_input.split(",")]
60 print(f"{insertion_sort(unsorted) = }")
61
[end of sorts/insertion_sort.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sorts/insertion_sort.py b/sorts/insertion_sort.py
--- a/sorts/insertion_sort.py
+++ b/sorts/insertion_sort.py
@@ -13,8 +13,19 @@
python3 insertion_sort.py
"""
+from collections.abc import MutableSequence
+from typing import Any, Protocol, TypeVar
-def insertion_sort(collection: list) -> list:
+
+class Comparable(Protocol):
+ def __lt__(self, other: Any, /) -> bool:
+ ...
+
+
+T = TypeVar("T", bound=Comparable)
+
+
+def insertion_sort(collection: MutableSequence[T]) -> MutableSequence[T]:
"""A pure Python implementation of the insertion sort algorithm
:param collection: some mutable ordered collection with heterogeneous
@@ -40,13 +51,12 @@
True
"""
- for insert_index, insert_value in enumerate(collection[1:]):
- temp_index = insert_index
- while insert_index >= 0 and insert_value < collection[insert_index]:
- collection[insert_index + 1] = collection[insert_index]
+ for insert_index in range(1, len(collection)):
+ insert_value = collection[insert_index]
+ while insert_index > 0 and insert_value < collection[insert_index - 1]:
+ collection[insert_index] = collection[insert_index - 1]
insert_index -= 1
- if insert_index != temp_index:
- collection[insert_index + 1] = insert_value
+ collection[insert_index] = insert_value
return collection
| {"golden_diff": "diff --git a/sorts/insertion_sort.py b/sorts/insertion_sort.py\n--- a/sorts/insertion_sort.py\n+++ b/sorts/insertion_sort.py\n@@ -13,8 +13,19 @@\n python3 insertion_sort.py\n \"\"\"\n \n+from collections.abc import MutableSequence\n+from typing import Any, Protocol, TypeVar\n \n-def insertion_sort(collection: list) -> list:\n+\n+class Comparable(Protocol):\n+ def __lt__(self, other: Any, /) -> bool:\n+ ...\n+\n+\n+T = TypeVar(\"T\", bound=Comparable)\n+\n+\n+def insertion_sort(collection: MutableSequence[T]) -> MutableSequence[T]:\n \"\"\"A pure Python implementation of the insertion sort algorithm\n \n :param collection: some mutable ordered collection with heterogeneous\n@@ -40,13 +51,12 @@\n True\n \"\"\"\n \n- for insert_index, insert_value in enumerate(collection[1:]):\n- temp_index = insert_index\n- while insert_index >= 0 and insert_value < collection[insert_index]:\n- collection[insert_index + 1] = collection[insert_index]\n+ for insert_index in range(1, len(collection)):\n+ insert_value = collection[insert_index]\n+ while insert_index > 0 and insert_value < collection[insert_index - 1]:\n+ collection[insert_index] = collection[insert_index - 1]\n insert_index -= 1\n- if insert_index != temp_index:\n- collection[insert_index + 1] = insert_value\n+ collection[insert_index] = insert_value\n return collection\n", "issue": "improvement to insertion_sort algorithm\n### Feature description\r\n\r\nI was about to make a PR to improve the implementation of [insertion_sort algorithm](https://github.com/TheAlgorithms/Python/blob/master/sorts/insertion_sort.py) but since there might be multiple ways of doing so, I thought I should first ask your opinions.\r\n\r\nThese are the things that need improvements:\r\n\r\n1. We unnecessarily create a whole new copy of the list: `enumerate(collection[1:])`.\r\n\r\n We can either use \"indexes\" to avoid this which is not very pythonic, or we can use the iterator of the list using `iter()` and throw away the first item using `next()`. In second case we have to either check for empty list first or wrap it in a try-except block. I'll go with indexes if you ask. What do you think?\r\n\r\n2. I think a function should either mutate the list in-place and returns `None`, or it should create new sorted list without modifying the original list. Mutating the list and returning the mutated list is not what most developers expect to see. What do you think?\r\n\r\n3. We can safely remove `if insert_index != temp_index:` condition and unindent its body. Assigning an item to an index of a list is not costly. So it's one less line in general.\nimprovement to insertion_sort algorithm\n### Feature description\r\n\r\nI was about to make a PR to improve the implementation of [insertion_sort algorithm](https://github.com/TheAlgorithms/Python/blob/master/sorts/insertion_sort.py) but since there might be multiple ways of doing so, I thought I should first ask your opinions.\r\n\r\nThese are the things that need improvements:\r\n\r\n1. We unnecessarily create a whole new copy of the list: `enumerate(collection[1:])`.\r\n\r\n We can either use \"indexes\" to avoid this which is not very pythonic, or we can use the iterator of the list using `iter()` and throw away the first item using `next()`. In second case we have to either check for empty list first or wrap it in a try-except block. I'll go with indexes if you ask. What do you think?\r\n\r\n2. I think a function should either mutate the list in-place and returns `None`, or it should create new sorted list without modifying the original list. Mutating the list and returning the mutated list is not what most developers expect to see. What do you think?\r\n\r\n3. We can safely remove `if insert_index != temp_index:` condition and unindent its body. Assigning an item to an index of a list is not costly. So it's one less line in general.\n", "before_files": [{"content": "\"\"\"\nA pure Python implementation of the insertion sort algorithm\n\nThis algorithm sorts a collection by comparing adjacent elements.\nWhen it finds that order is not respected, it moves the element compared\nbackward until the order is correct. It then goes back directly to the\nelement's initial position resuming forward comparison.\n\nFor doctests run following command:\npython3 -m doctest -v insertion_sort.py\n\nFor manual testing run:\npython3 insertion_sort.py\n\"\"\"\n\n\ndef insertion_sort(collection: list) -> list:\n \"\"\"A pure Python implementation of the insertion sort algorithm\n\n :param collection: some mutable ordered collection with heterogeneous\n comparable items inside\n :return: the same collection ordered by ascending\n\n Examples:\n >>> insertion_sort([0, 5, 3, 2, 2])\n [0, 2, 2, 3, 5]\n >>> insertion_sort([]) == sorted([])\n True\n >>> insertion_sort([-2, -5, -45]) == sorted([-2, -5, -45])\n True\n >>> insertion_sort(['d', 'a', 'b', 'e', 'c']) == sorted(['d', 'a', 'b', 'e', 'c'])\n True\n >>> import random\n >>> collection = random.sample(range(-50, 50), 100)\n >>> insertion_sort(collection) == sorted(collection)\n True\n >>> import string\n >>> collection = random.choices(string.ascii_letters + string.digits, k=100)\n >>> insertion_sort(collection) == sorted(collection)\n True\n \"\"\"\n\n for insert_index, insert_value in enumerate(collection[1:]):\n temp_index = insert_index\n while insert_index >= 0 and insert_value < collection[insert_index]:\n collection[insert_index + 1] = collection[insert_index]\n insert_index -= 1\n if insert_index != temp_index:\n collection[insert_index + 1] = insert_value\n return collection\n\n\nif __name__ == \"__main__\":\n from doctest import testmod\n\n testmod()\n\n user_input = input(\"Enter numbers separated by a comma:\\n\").strip()\n unsorted = [int(item) for item in user_input.split(\",\")]\n print(f\"{insertion_sort(unsorted) = }\")\n", "path": "sorts/insertion_sort.py"}]} | 1,704 | 354 |
gh_patches_debug_893 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-665 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ImageEmbedder default behavior is not a flattened output
## 🐛 Bug
I discovered this issue while testing PR #655. If you run the [Image Embedding README example code](https://github.com/PyTorchLightning/lightning-flash#example-1-image-embedding), it returns a 3D tensor.
My understanding from the use of embeddings in general, and how they are used in [Fifty One](https://voxel51.com/docs/fiftyone/tutorials/image_embeddings.html) is they expect the embeddings to be 1D (for each embedding).
The reason it returns a 3D tensor is because it depends on the backbone used. The default there is `resnet101`, which returns a `2048x7x7` shape tensor. Others like inception return a flat 1D tensor, i.e. length-X.
### To Reproduce
Steps to reproduce the behavior:
Run the [README example](https://github.com/PyTorchLightning/lightning-flash#example-1-image-embedding), but remove the `embedding_dim` parameter. See below for example.
Note: as-is, this will error on `print(embeddings.shape)`, regardless of configuration, since that is a list. But the question here is around the logic for the ImageEmbedder.
#### Code sample
```python
from flash.core.data.utils import download_data
from flash.image import ImageEmbedder
# 1. Download the data
download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip", "data/")
# 2. Create an ImageEmbedder with resnet50 trained on imagenet.
embedder = ImageEmbedder(backbone="resnet50")
# 3. Generate an embedding from an image path.
embeddings = embedder.predict("data/hymenoptera_data/predict/153783656_85f9c3ac70.jpg")
# 4. Print embeddings shape
print(embeddings.shape)
```
### Expected behavior
Expect to see a 100352x1 shape tensor as the output, instead of 2048x7x7.
### Environment
- PyTorch Version (e.g., 1.0): 1.9
- OS (e.g., Linux): Linux
- How you installed PyTorch (`conda`, `pip`, source): pip
- Build command you used (if compiling from source): N/A
- Python version: 3.8.6
- CUDA/cuDNN version: N/A
- GPU models and configuration: N/A
- Any other relevant information: N/A
### Additional context
I believe the question is around what the logic should be here:
https://github.com/PyTorchLightning/lightning-flash/blob/075de3a46d74d9fc0e769401063fede1f12d0518/flash/image/embedding/model.py#L85-L92
If `embedding_dim` is None, then the head is `nn.Identity()`. **If we desire a flat 1D embedding, then the question is: should `nn.Identity()` change to `nn.Flatten()`?**
It could be argued that the user should be left to flatten after on their own, but per the contributing guidelines, I thought this would align with "[Force User Decisions To Best Practices](https://github.com/PyTorchLightning/lightning-flash/blob/ddd942d3dfe3884a97a855446410166c3c9f16d9/.github/CONTRIBUTING.md#force-user-decisions-to-best-practices)"
Let me know your thoughts. If that makes sense, then I can update the code, run some tests, and update docs in a PR.
</issue>
<code>
[start of flash_examples/integrations/fiftyone/image_embedding.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import fiftyone as fo
15 import fiftyone.brain as fob
16 import numpy as np
17
18 from flash.core.data.utils import download_data
19 from flash.image import ImageEmbedder
20
21 # 1 Download data
22 download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip")
23
24 # 2 Load data into FiftyOne
25 dataset = fo.Dataset.from_dir(
26 "data/hymenoptera_data/test/",
27 fo.types.ImageClassificationDirectoryTree,
28 )
29
30 # 3 Load model
31 embedder = ImageEmbedder(backbone="resnet101", embedding_dim=128)
32
33 # 4 Generate embeddings
34 filepaths = dataset.values("filepath")
35 embeddings = np.stack(embedder.predict(filepaths))
36
37 # 5 Visualize in FiftyOne App
38 results = fob.compute_visualization(dataset, embeddings=embeddings)
39 session = fo.launch_app(dataset)
40 plot = results.visualize(labels="ground_truth.label")
41 plot.show()
42
43 # Optional: block execution until App is closed
44 session.wait()
45
[end of flash_examples/integrations/fiftyone/image_embedding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flash_examples/integrations/fiftyone/image_embedding.py b/flash_examples/integrations/fiftyone/image_embedding.py
--- a/flash_examples/integrations/fiftyone/image_embedding.py
+++ b/flash_examples/integrations/fiftyone/image_embedding.py
@@ -28,7 +28,7 @@
)
# 3 Load model
-embedder = ImageEmbedder(backbone="resnet101", embedding_dim=128)
+embedder = ImageEmbedder(backbone="resnet101")
# 4 Generate embeddings
filepaths = dataset.values("filepath")
| {"golden_diff": "diff --git a/flash_examples/integrations/fiftyone/image_embedding.py b/flash_examples/integrations/fiftyone/image_embedding.py\n--- a/flash_examples/integrations/fiftyone/image_embedding.py\n+++ b/flash_examples/integrations/fiftyone/image_embedding.py\n@@ -28,7 +28,7 @@\n )\n \n # 3 Load model\n-embedder = ImageEmbedder(backbone=\"resnet101\", embedding_dim=128)\n+embedder = ImageEmbedder(backbone=\"resnet101\")\n \n # 4 Generate embeddings\n filepaths = dataset.values(\"filepath\")\n", "issue": "ImageEmbedder default behavior is not a flattened output\n## \ud83d\udc1b Bug\r\n\r\nI discovered this issue while testing PR #655. If you run the [Image Embedding README example code](https://github.com/PyTorchLightning/lightning-flash#example-1-image-embedding), it returns a 3D tensor. \r\nMy understanding from the use of embeddings in general, and how they are used in [Fifty One](https://voxel51.com/docs/fiftyone/tutorials/image_embeddings.html) is they expect the embeddings to be 1D (for each embedding). \r\n\r\nThe reason it returns a 3D tensor is because it depends on the backbone used. The default there is `resnet101`, which returns a `2048x7x7` shape tensor. Others like inception return a flat 1D tensor, i.e. length-X.\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\nRun the [README example](https://github.com/PyTorchLightning/lightning-flash#example-1-image-embedding), but remove the `embedding_dim` parameter. See below for example.\r\n\r\nNote: as-is, this will error on `print(embeddings.shape)`, regardless of configuration, since that is a list. But the question here is around the logic for the ImageEmbedder. \r\n\r\n\r\n#### Code sample\r\n```python\r\nfrom flash.core.data.utils import download_data\r\nfrom flash.image import ImageEmbedder\r\n\r\n# 1. Download the data\r\ndownload_data(\"https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip\", \"data/\")\r\n\r\n# 2. Create an ImageEmbedder with resnet50 trained on imagenet.\r\nembedder = ImageEmbedder(backbone=\"resnet50\")\r\n\r\n# 3. Generate an embedding from an image path.\r\nembeddings = embedder.predict(\"data/hymenoptera_data/predict/153783656_85f9c3ac70.jpg\")\r\n\r\n# 4. Print embeddings shape\r\nprint(embeddings.shape)\r\n```\r\n\r\n### Expected behavior\r\n\r\nExpect to see a 100352x1 shape tensor as the output, instead of 2048x7x7. \r\n\r\n### Environment\r\n\r\n - PyTorch Version (e.g., 1.0): 1.9\r\n - OS (e.g., Linux): Linux\r\n - How you installed PyTorch (`conda`, `pip`, source): pip\r\n - Build command you used (if compiling from source): N/A\r\n - Python version: 3.8.6\r\n - CUDA/cuDNN version: N/A\r\n - GPU models and configuration: N/A\r\n - Any other relevant information: N/A\r\n\r\n### Additional context\r\n\r\nI believe the question is around what the logic should be here:\r\nhttps://github.com/PyTorchLightning/lightning-flash/blob/075de3a46d74d9fc0e769401063fede1f12d0518/flash/image/embedding/model.py#L85-L92\r\n\r\nIf `embedding_dim` is None, then the head is `nn.Identity()`. **If we desire a flat 1D embedding, then the question is: should `nn.Identity()` change to `nn.Flatten()`?**\r\n\r\nIt could be argued that the user should be left to flatten after on their own, but per the contributing guidelines, I thought this would align with \"[Force User Decisions To Best Practices](https://github.com/PyTorchLightning/lightning-flash/blob/ddd942d3dfe3884a97a855446410166c3c9f16d9/.github/CONTRIBUTING.md#force-user-decisions-to-best-practices)\"\r\n\r\nLet me know your thoughts. If that makes sense, then I can update the code, run some tests, and update docs in a PR. \r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport fiftyone as fo\nimport fiftyone.brain as fob\nimport numpy as np\n\nfrom flash.core.data.utils import download_data\nfrom flash.image import ImageEmbedder\n\n# 1 Download data\ndownload_data(\"https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip\")\n\n# 2 Load data into FiftyOne\ndataset = fo.Dataset.from_dir(\n \"data/hymenoptera_data/test/\",\n fo.types.ImageClassificationDirectoryTree,\n)\n\n# 3 Load model\nembedder = ImageEmbedder(backbone=\"resnet101\", embedding_dim=128)\n\n# 4 Generate embeddings\nfilepaths = dataset.values(\"filepath\")\nembeddings = np.stack(embedder.predict(filepaths))\n\n# 5 Visualize in FiftyOne App\nresults = fob.compute_visualization(dataset, embeddings=embeddings)\nsession = fo.launch_app(dataset)\nplot = results.visualize(labels=\"ground_truth.label\")\nplot.show()\n\n# Optional: block execution until App is closed\nsession.wait()\n", "path": "flash_examples/integrations/fiftyone/image_embedding.py"}]} | 1,820 | 135 |
gh_patches_debug_15994 | rasdani/github-patches | git_diff | falconry__falcon-1588 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
WebDAV methods not supported
Falcon defines supported HTTP methods in `falcon/constants.py`: supported are "usual" `HTTP_METHODS` and, in addition to that, `WEBDAV_METHODS`. However, only WebDAV versioning extension methods from RFC 3253 are supported, but not the "ordinary" WebDAV ones (i.e. from RFCs 2518 & 4918) like `COPY`, `LOCK`, `MKCOL`, `MOVE` etc.
Supporting only an extension, but not the core upon which that extension builds looks somewhat inconsistent.
</issue>
<code>
[start of falcon/constants.py]
1 import os
2
3 # RFC 7231, 5789 methods
4 HTTP_METHODS = [
5 'CONNECT',
6 'DELETE',
7 'GET',
8 'HEAD',
9 'OPTIONS',
10 'PATCH',
11 'POST',
12 'PUT',
13 'TRACE',
14 ]
15
16 # RFC 3253 methods
17 WEBDAV_METHODS = [
18 'CHECKIN',
19 'CHECKOUT',
20 'REPORT',
21 'UNCHECKIN',
22 'UPDATE',
23 'VERSION-CONTROL',
24 ]
25
26 # if FALCON_CUSTOM_HTTP_METHODS is defined, treat it as a comma-
27 # delimited string of additional supported methods in this env.
28 FALCON_CUSTOM_HTTP_METHODS = [
29 method.strip().upper()
30 for method in os.environ.get('FALCON_CUSTOM_HTTP_METHODS', '').split(',')
31 if method.strip() != ''
32 ]
33
34 COMBINED_METHODS = HTTP_METHODS + WEBDAV_METHODS + FALCON_CUSTOM_HTTP_METHODS
35
36 # NOTE(kgriffs): According to RFC 7159, most JSON parsers assume
37 # UTF-8 and so it is the recommended default charset going forward,
38 # and indeed, other charsets should not be specified to ensure
39 # maximum interoperability.
40 MEDIA_JSON = 'application/json'
41
42 # NOTE(kgriffs): An internet media type for MessagePack has not
43 # yet been registered. 'application/x-msgpack' is commonly used,
44 # but the use of the 'x-' prefix is discouraged by RFC 6838.
45 MEDIA_MSGPACK = 'application/msgpack'
46
47 # NOTE(kgriffs): An internet media type for YAML has not been
48 # registered. RoR uses 'application/x-yaml', but since use of
49 # 'x-' is discouraged by RFC 6838, we don't use it in Falcon.
50 #
51 # The YAML specification requires that parsers deduce the character
52 # encoding by examining the first few bytes of the document itself.
53 # Therefore, it does not make sense to include the charset in the
54 # media type string.
55 MEDIA_YAML = 'application/yaml'
56
57 # NOTE(kgriffs): According to RFC 7303, when the charset is
58 # omitted, preference is given to the encoding specified in the
59 # document itself (either via a BOM, or via the XML declaration). If
60 # the document does not explicitly specify the encoding, UTF-8 is
61 # assumed. We do not specify the charset here, because many parsers
62 # ignore it anyway and just use what is specified in the document,
63 # contrary to the RFCs.
64 MEDIA_XML = 'application/xml'
65
66
67 # NOTE(kgriffs): RFC 4329 recommends application/* over text/.
68 # futhermore, parsers are required to respect the Unicode
69 # encoding signature, if present in the document, and to default
70 # to UTF-8 when not present. Note, however, that implementations
71 # are not required to support anything besides UTF-8, so it is
72 # unclear how much utility an encoding signature (or the charset
73 # parameter for that matter) has in practice.
74 MEDIA_JS = 'application/javascript'
75
76 # NOTE(kgriffs): According to RFC 6838, most text media types should
77 # include the charset parameter.
78 MEDIA_HTML = 'text/html; charset=utf-8'
79 MEDIA_TEXT = 'text/plain; charset=utf-8'
80
81 MEDIA_JPEG = 'image/jpeg'
82 MEDIA_PNG = 'image/png'
83 MEDIA_GIF = 'image/gif'
84
85 DEFAULT_MEDIA_TYPE = MEDIA_JSON
86
[end of falcon/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/falcon/constants.py b/falcon/constants.py
--- a/falcon/constants.py
+++ b/falcon/constants.py
@@ -13,12 +13,19 @@
'TRACE',
]
-# RFC 3253 methods
+# RFC 2518 and 4918 methods
WEBDAV_METHODS = [
'CHECKIN',
'CHECKOUT',
+ 'COPY',
+ 'LOCK',
+ 'MKCOL',
+ 'MOVE',
+ 'PROPFIND',
+ 'PROPPATCH',
'REPORT',
'UNCHECKIN',
+ 'UNLOCK'
'UPDATE',
'VERSION-CONTROL',
]
@@ -63,7 +70,6 @@
# contrary to the RFCs.
MEDIA_XML = 'application/xml'
-
# NOTE(kgriffs): RFC 4329 recommends application/* over text/.
# futhermore, parsers are required to respect the Unicode
# encoding signature, if present in the document, and to default
| {"golden_diff": "diff --git a/falcon/constants.py b/falcon/constants.py\n--- a/falcon/constants.py\n+++ b/falcon/constants.py\n@@ -13,12 +13,19 @@\n 'TRACE',\n ]\n \n-# RFC 3253 methods\n+# RFC 2518 and 4918 methods\n WEBDAV_METHODS = [\n 'CHECKIN',\n 'CHECKOUT',\n+ 'COPY',\n+ 'LOCK',\n+ 'MKCOL',\n+ 'MOVE',\n+ 'PROPFIND',\n+ 'PROPPATCH',\n 'REPORT',\n 'UNCHECKIN',\n+ 'UNLOCK'\n 'UPDATE',\n 'VERSION-CONTROL',\n ]\n@@ -63,7 +70,6 @@\n # contrary to the RFCs.\n MEDIA_XML = 'application/xml'\n \n-\n # NOTE(kgriffs): RFC 4329 recommends application/* over text/.\n # futhermore, parsers are required to respect the Unicode\n # encoding signature, if present in the document, and to default\n", "issue": "WebDAV methods not supported\nFalcon defines supported HTTP methods in `falcon/constants.py`: supported are \"usual\" `HTTP_METHODS` and, in addition to that, `WEBDAV_METHODS`. However, only WebDAV versioning extension methods from RFC 3253 are supported, but not the \"ordinary\" WebDAV ones (i.e. from RFCs 2518 & 4918) like `COPY`, `LOCK`, `MKCOL`, `MOVE` etc.\r\n\r\nSupporting only an extension, but not the core upon which that extension builds looks somewhat inconsistent.\n", "before_files": [{"content": "import os\n\n# RFC 7231, 5789 methods\nHTTP_METHODS = [\n 'CONNECT',\n 'DELETE',\n 'GET',\n 'HEAD',\n 'OPTIONS',\n 'PATCH',\n 'POST',\n 'PUT',\n 'TRACE',\n]\n\n# RFC 3253 methods\nWEBDAV_METHODS = [\n 'CHECKIN',\n 'CHECKOUT',\n 'REPORT',\n 'UNCHECKIN',\n 'UPDATE',\n 'VERSION-CONTROL',\n]\n\n# if FALCON_CUSTOM_HTTP_METHODS is defined, treat it as a comma-\n# delimited string of additional supported methods in this env.\nFALCON_CUSTOM_HTTP_METHODS = [\n method.strip().upper()\n for method in os.environ.get('FALCON_CUSTOM_HTTP_METHODS', '').split(',')\n if method.strip() != ''\n]\n\nCOMBINED_METHODS = HTTP_METHODS + WEBDAV_METHODS + FALCON_CUSTOM_HTTP_METHODS\n\n# NOTE(kgriffs): According to RFC 7159, most JSON parsers assume\n# UTF-8 and so it is the recommended default charset going forward,\n# and indeed, other charsets should not be specified to ensure\n# maximum interoperability.\nMEDIA_JSON = 'application/json'\n\n# NOTE(kgriffs): An internet media type for MessagePack has not\n# yet been registered. 'application/x-msgpack' is commonly used,\n# but the use of the 'x-' prefix is discouraged by RFC 6838.\nMEDIA_MSGPACK = 'application/msgpack'\n\n# NOTE(kgriffs): An internet media type for YAML has not been\n# registered. RoR uses 'application/x-yaml', but since use of\n# 'x-' is discouraged by RFC 6838, we don't use it in Falcon.\n#\n# The YAML specification requires that parsers deduce the character\n# encoding by examining the first few bytes of the document itself.\n# Therefore, it does not make sense to include the charset in the\n# media type string.\nMEDIA_YAML = 'application/yaml'\n\n# NOTE(kgriffs): According to RFC 7303, when the charset is\n# omitted, preference is given to the encoding specified in the\n# document itself (either via a BOM, or via the XML declaration). If\n# the document does not explicitly specify the encoding, UTF-8 is\n# assumed. We do not specify the charset here, because many parsers\n# ignore it anyway and just use what is specified in the document,\n# contrary to the RFCs.\nMEDIA_XML = 'application/xml'\n\n\n# NOTE(kgriffs): RFC 4329 recommends application/* over text/.\n# futhermore, parsers are required to respect the Unicode\n# encoding signature, if present in the document, and to default\n# to UTF-8 when not present. Note, however, that implementations\n# are not required to support anything besides UTF-8, so it is\n# unclear how much utility an encoding signature (or the charset\n# parameter for that matter) has in practice.\nMEDIA_JS = 'application/javascript'\n\n# NOTE(kgriffs): According to RFC 6838, most text media types should\n# include the charset parameter.\nMEDIA_HTML = 'text/html; charset=utf-8'\nMEDIA_TEXT = 'text/plain; charset=utf-8'\n\nMEDIA_JPEG = 'image/jpeg'\nMEDIA_PNG = 'image/png'\nMEDIA_GIF = 'image/gif'\n\nDEFAULT_MEDIA_TYPE = MEDIA_JSON\n", "path": "falcon/constants.py"}]} | 1,570 | 227 |
gh_patches_debug_1925 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-673 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IndexError when deleting a column
## Description
<!-- A clear and concise description of what the bug is. -->
An indexError occurs when deleting a column through the API. Most of the time the error occurs when deleting the first or second column of a table. Deleting the last columns in a table does not seem to produce this error.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
- A column should be deleted
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
1. Delete the first or second column of a table via API. Example: api/v0/tables/1/columns/1/
2. Delete the first or second column of another table via API. Example: api/v0/tables/2/columns/0/
## Screenshots


## Environment
- OS: (_eg._ macOS 10.14.6; Fedora 32)
- Browser: (_eg._ Safari; Firefox)
- Browser Version: (_eg._ 13; 73)
- Other info:
## Additional context
<!-- Add any other context about the problem or screenshots here. -->
</issue>
<code>
[start of db/columns/operations/select.py]
1 import warnings
2
3 from sqlalchemy import Table, MetaData, and_, select, text, func
4
5 from db.tables.operations.select import reflect_table_from_oid
6 from db.utils import execute_statement
7
8
9 def get_column_index_from_name(table_oid, column_name, engine, connection_to_use=None):
10 with warnings.catch_warnings():
11 warnings.filterwarnings("ignore", message="Did not recognize type")
12 pg_attribute = Table("pg_attribute", MetaData(), autoload_with=engine)
13 sel = select(pg_attribute.c.attnum).where(
14 and_(
15 pg_attribute.c.attrelid == table_oid,
16 pg_attribute.c.attname == column_name
17 )
18 )
19 result = execute_statement(engine, sel, connection_to_use).fetchone()[0]
20
21 # Account for dropped columns that don't appear in the SQLAlchemy tables
22 sel = (
23 select(func.count())
24 .where(and_(
25 pg_attribute.c.attisdropped.is_(True),
26 pg_attribute.c.attnum < result,
27 ))
28 )
29 dropped_count = execute_statement(engine, sel, connection_to_use).fetchone()[0]
30
31 return result - 1 - dropped_count
32
33
34 def get_column_default(table_oid, column_index, engine, connection_to_use=None):
35 table = reflect_table_from_oid(table_oid, engine, connection_to_use)
36 column = table.columns[column_index]
37 if column.server_default is None:
38 return None
39
40 metadata = MetaData()
41 with warnings.catch_warnings():
42 warnings.filterwarnings("ignore", message="Did not recognize type")
43 pg_attribute = Table("pg_attribute", metadata, autoload_with=engine)
44 pg_attrdef = Table("pg_attrdef", metadata, autoload_with=engine)
45
46 query = (
47 select(pg_attrdef.c.adbin)
48 .select_from(
49 pg_attrdef
50 .join(
51 pg_attribute,
52 and_(
53 pg_attribute.c.attnum == pg_attrdef.c.adnum,
54 pg_attribute.c.attrelid == pg_attrdef.c.adrelid
55 )
56 )
57 )
58 .where(and_(
59 pg_attribute.c.attrelid == table_oid,
60 pg_attribute.c.attname == column.name,
61 pg_attribute.c.attnum >= 1,
62 ))
63 )
64
65 result = execute_statement(engine, query, connection_to_use).first()[0]
66
67 # Here, we get the 'adbin' value for the current column, stored in the attrdef
68 # system table. The prefix of this value tells us whether the default is static
69 # ('{CONSTANT') or generated ('{FUNCEXPR'). We do not return generated defaults.
70 if result.startswith("{FUNCEXPR"):
71 return None
72
73 default_textual_sql = column.server_default.arg.text
74 # Defaults are stored as text with SQL casts appended
75 # Ex: "'test default string'::character varying" or "'2020-01-01'::date"
76 # Here, we execute the cast to get the proper python value
77 return execute_statement(engine, select(text(default_textual_sql)), connection_to_use).first()[0]
78
[end of db/columns/operations/select.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/db/columns/operations/select.py b/db/columns/operations/select.py
--- a/db/columns/operations/select.py
+++ b/db/columns/operations/select.py
@@ -22,6 +22,7 @@
sel = (
select(func.count())
.where(and_(
+ pg_attribute.c.attrelid == table_oid,
pg_attribute.c.attisdropped.is_(True),
pg_attribute.c.attnum < result,
))
| {"golden_diff": "diff --git a/db/columns/operations/select.py b/db/columns/operations/select.py\n--- a/db/columns/operations/select.py\n+++ b/db/columns/operations/select.py\n@@ -22,6 +22,7 @@\n sel = (\n select(func.count())\n .where(and_(\n+ pg_attribute.c.attrelid == table_oid,\n pg_attribute.c.attisdropped.is_(True),\n pg_attribute.c.attnum < result,\n ))\n", "issue": "IndexError when deleting a column\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\nAn indexError occurs when deleting a column through the API. Most of the time the error occurs when deleting the first or second column of a table. Deleting the last columns in a table does not seem to produce this error. \r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n- A column should be deleted\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n\r\n1. Delete the first or second column of a table via API. Example: api/v0/tables/1/columns/1/\r\n2. Delete the first or second column of another table via API. Example: api/v0/tables/2/columns/0/\r\n\r\n## Screenshots\r\n\r\n\r\n\r\n\r\n## Environment\r\n - OS: (_eg._ macOS 10.14.6; Fedora 32)\r\n - Browser: (_eg._ Safari; Firefox)\r\n - Browser Version: (_eg._ 13; 73)\r\n - Other info:\r\n\r\n## Additional context\r\n<!-- Add any other context about the problem or screenshots here. -->\r\n\n", "before_files": [{"content": "import warnings\n\nfrom sqlalchemy import Table, MetaData, and_, select, text, func\n\nfrom db.tables.operations.select import reflect_table_from_oid\nfrom db.utils import execute_statement\n\n\ndef get_column_index_from_name(table_oid, column_name, engine, connection_to_use=None):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_attribute = Table(\"pg_attribute\", MetaData(), autoload_with=engine)\n sel = select(pg_attribute.c.attnum).where(\n and_(\n pg_attribute.c.attrelid == table_oid,\n pg_attribute.c.attname == column_name\n )\n )\n result = execute_statement(engine, sel, connection_to_use).fetchone()[0]\n\n # Account for dropped columns that don't appear in the SQLAlchemy tables\n sel = (\n select(func.count())\n .where(and_(\n pg_attribute.c.attisdropped.is_(True),\n pg_attribute.c.attnum < result,\n ))\n )\n dropped_count = execute_statement(engine, sel, connection_to_use).fetchone()[0]\n\n return result - 1 - dropped_count\n\n\ndef get_column_default(table_oid, column_index, engine, connection_to_use=None):\n table = reflect_table_from_oid(table_oid, engine, connection_to_use)\n column = table.columns[column_index]\n if column.server_default is None:\n return None\n\n metadata = MetaData()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_attribute = Table(\"pg_attribute\", metadata, autoload_with=engine)\n pg_attrdef = Table(\"pg_attrdef\", metadata, autoload_with=engine)\n\n query = (\n select(pg_attrdef.c.adbin)\n .select_from(\n pg_attrdef\n .join(\n pg_attribute,\n and_(\n pg_attribute.c.attnum == pg_attrdef.c.adnum,\n pg_attribute.c.attrelid == pg_attrdef.c.adrelid\n )\n )\n )\n .where(and_(\n pg_attribute.c.attrelid == table_oid,\n pg_attribute.c.attname == column.name,\n pg_attribute.c.attnum >= 1,\n ))\n )\n\n result = execute_statement(engine, query, connection_to_use).first()[0]\n\n # Here, we get the 'adbin' value for the current column, stored in the attrdef\n # system table. The prefix of this value tells us whether the default is static\n # ('{CONSTANT') or generated ('{FUNCEXPR'). We do not return generated defaults.\n if result.startswith(\"{FUNCEXPR\"):\n return None\n\n default_textual_sql = column.server_default.arg.text\n # Defaults are stored as text with SQL casts appended\n # Ex: \"'test default string'::character varying\" or \"'2020-01-01'::date\"\n # Here, we execute the cast to get the proper python value\n return execute_statement(engine, select(text(default_textual_sql)), connection_to_use).first()[0]\n", "path": "db/columns/operations/select.py"}]} | 1,749 | 104 |
gh_patches_debug_59246 | rasdani/github-patches | git_diff | projectmesa__mesa-451 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Datacollector fix
Simplify the `DataCollector` API by allowing the user-provided model- and agent-level reporters to be the names of attributes, in addition to methods. e.g. instead of needing to write
```
agent_reporters={"Wealth": lambda a: a.wealth}
```
you can write
```
agent_reporters={"Wealth":"wealth"}
```
This PR implements this feature, and updates the tests and documentation accordingly.
</issue>
<code>
[start of mesa/__init__.py]
1 # -*- coding: utf-8 -*-
2 """
3 Mesa Agent-Based Modeling Framework
4
5 Core Objects: Model, and Agent.
6
7 """
8 import datetime
9
10 from .model import Model
11 from .agent import Agent
12
13
14 __all__ = ["Model", "Agent"]
15
16 __title__ = 'mesa'
17 __version__ = '0.8.2'
18 __license__ = 'Apache 2.0'
19 __copyright__ = 'Copyright %s Project Mesa Team' % datetime.date.today().year
20
[end of mesa/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mesa/__init__.py b/mesa/__init__.py
--- a/mesa/__init__.py
+++ b/mesa/__init__.py
@@ -14,6 +14,6 @@
__all__ = ["Model", "Agent"]
__title__ = 'mesa'
-__version__ = '0.8.2'
+__version__ = '0.8.3'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright %s Project Mesa Team' % datetime.date.today().year
| {"golden_diff": "diff --git a/mesa/__init__.py b/mesa/__init__.py\n--- a/mesa/__init__.py\n+++ b/mesa/__init__.py\n@@ -14,6 +14,6 @@\n __all__ = [\"Model\", \"Agent\"]\n \n __title__ = 'mesa'\n-__version__ = '0.8.2'\n+__version__ = '0.8.3'\n __license__ = 'Apache 2.0'\n __copyright__ = 'Copyright %s Project Mesa Team' % datetime.date.today().year\n", "issue": "Datacollector fix\nSimplify the `DataCollector` API by allowing the user-provided model- and agent-level reporters to be the names of attributes, in addition to methods. e.g. instead of needing to write\r\n\r\n```\r\nagent_reporters={\"Wealth\": lambda a: a.wealth}\r\n```\r\n\r\nyou can write\r\n```\r\nagent_reporters={\"Wealth\":\"wealth\"}\r\n```\r\n\r\nThis PR implements this feature, and updates the tests and documentation accordingly.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nMesa Agent-Based Modeling Framework\n\nCore Objects: Model, and Agent.\n\n\"\"\"\nimport datetime\n\nfrom .model import Model\nfrom .agent import Agent\n\n\n__all__ = [\"Model\", \"Agent\"]\n\n__title__ = 'mesa'\n__version__ = '0.8.2'\n__license__ = 'Apache 2.0'\n__copyright__ = 'Copyright %s Project Mesa Team' % datetime.date.today().year\n", "path": "mesa/__init__.py"}]} | 766 | 122 |
gh_patches_debug_31990 | rasdani/github-patches | git_diff | cloudtools__troposphere-1811 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add AWS::Elasticsearch::Domain DomainEndpointOptions
Cloudformation now allows you to specify additional options for the domain endpoint, such as whether to require HTTPS for all traffic, with an Elasticseach Domain.
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-domainendpointoptions.html
Adding support for this would be super.
</issue>
<code>
[start of troposphere/elasticsearch.py]
1 # Copyright (c) 2012-2015, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSProperty, AWSObject, Tags
7 from .compat import policytypes
8 from .validators import boolean, integer, integer_range, positive_integer
9
10 VALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')
11
12
13 def validate_volume_type(volume_type):
14 """Validate VolumeType for ElasticsearchDomain"""
15 if volume_type not in VALID_VOLUME_TYPES:
16 raise ValueError("Elasticsearch Domain VolumeType must be one of: %s" %
17 ", ".join(VALID_VOLUME_TYPES))
18 return volume_type
19
20
21 class CognitoOptions(AWSProperty):
22 props = {
23 'Enabled': (boolean, False),
24 'IdentityPoolId': (basestring, False),
25 'RoleArn': (basestring, False),
26 'UserPoolId': (basestring, False),
27 }
28
29
30 class EBSOptions(AWSProperty):
31 props = {
32 'EBSEnabled': (boolean, False),
33 'Iops': (positive_integer, False),
34 'VolumeSize': (integer, False),
35 'VolumeType': (validate_volume_type, False)
36 }
37
38 def validate(self):
39 volume_type = self.properties.get('VolumeType')
40 iops = self.properties.get('Iops')
41 if volume_type == 'io1' and not iops:
42 raise ValueError("Must specify Iops if VolumeType is 'io1'.")
43
44
45 class ZoneAwarenessConfig(AWSProperty):
46 props = {
47 'AvailabilityZoneCount': (integer, False),
48 }
49
50
51 class ElasticsearchClusterConfig(AWSProperty):
52 props = {
53 'DedicatedMasterCount': (integer, False),
54 'DedicatedMasterEnabled': (boolean, False),
55 'DedicatedMasterType': (basestring, False),
56 'InstanceCount': (integer, False),
57 'InstanceType': (basestring, False),
58 'ZoneAwarenessConfig': (ZoneAwarenessConfig, False),
59 'ZoneAwarenessEnabled': (boolean, False)
60 }
61
62
63 class EncryptionAtRestOptions(AWSProperty):
64 props = {
65 'Enabled': (boolean, False),
66 'KmsKeyId': (basestring, False),
67 }
68
69
70 class NodeToNodeEncryptionOptions(AWSProperty):
71 props = {
72 'Enabled': (boolean, False),
73 }
74
75
76 class SnapshotOptions(AWSProperty):
77 props = {
78 'AutomatedSnapshotStartHour': (integer_range(0, 23), False)
79 }
80
81
82 class VPCOptions(AWSProperty):
83 props = {
84 'SecurityGroupIds': ([basestring], False),
85 'SubnetIds': ([basestring], False)
86 }
87
88
89 class MasterUserOptions(AWSProperty):
90 props = {
91 'MasterUserARN': (basestring, False),
92 'MasterUserName': (basestring, False),
93 'MasterUserPassword': (basestring, False),
94 }
95
96
97 class AdvancedSecurityOptionsInput(AWSProperty):
98 props = {
99 'Enabled': (boolean, False),
100 'InternalUserDatabaseEnabled': (boolean, False),
101 'MasterUserOptions': (MasterUserOptions, False),
102 }
103
104
105 class Domain(AWSObject):
106 resource_type = "AWS::Elasticsearch::Domain"
107
108 props = {
109 'AccessPolicies': (policytypes, False),
110 'AdvancedOptions': (dict, False),
111 'AdvancedSecurityOptions': (AdvancedSecurityOptionsInput, False),
112 'CognitoOptions': (CognitoOptions, False),
113 'DomainName': (basestring, False),
114 'EBSOptions': (EBSOptions, False),
115 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),
116 'ElasticsearchVersion': (basestring, False),
117 'EncryptionAtRestOptions': (EncryptionAtRestOptions, False),
118 'LogPublishingOptions': (dict, False),
119 'NodeToNodeEncryptionOptions': (NodeToNodeEncryptionOptions, False),
120 'SnapshotOptions': (SnapshotOptions, False),
121 'Tags': ((Tags, list), False),
122 'VPCOptions': (VPCOptions, False),
123 }
124
125
126 # Backward compatibility
127 ElasticsearchDomain = Domain
128
[end of troposphere/elasticsearch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/troposphere/elasticsearch.py b/troposphere/elasticsearch.py
--- a/troposphere/elasticsearch.py
+++ b/troposphere/elasticsearch.py
@@ -8,6 +8,10 @@
from .validators import boolean, integer, integer_range, positive_integer
VALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')
+VALID_TLS_SECURITY_POLICIES = (
+ 'Policy-Min-TLS-1-0-2019-07',
+ 'Policy-Min-TLS-1-2-2019-07'
+ )
def validate_volume_type(volume_type):
@@ -18,6 +22,14 @@
return volume_type
+def validate_tls_security_policy(tls_security_policy):
+ """Validate TLS Security Policy for ElasticsearchDomain"""
+ if tls_security_policy not in VALID_TLS_SECURITY_POLICIES:
+ raise ValueError("Minimum TLS Security Policy must be one of: %s" %
+ ", ".join(VALID_TLS_SECURITY_POLICIES))
+ return tls_security_policy
+
+
class CognitoOptions(AWSProperty):
props = {
'Enabled': (boolean, False),
@@ -27,6 +39,13 @@
}
+class DomainEndpointOptions(AWSProperty):
+ props = {
+ 'EnforceHTTPS': (boolean, False),
+ 'TLSSecurityPolicy': (validate_tls_security_policy, False),
+ }
+
+
class EBSOptions(AWSProperty):
props = {
'EBSEnabled': (boolean, False),
@@ -111,6 +130,7 @@
'AdvancedSecurityOptions': (AdvancedSecurityOptionsInput, False),
'CognitoOptions': (CognitoOptions, False),
'DomainName': (basestring, False),
+ 'DomainEndpointOptions': (DomainEndpointOptions, False),
'EBSOptions': (EBSOptions, False),
'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),
'ElasticsearchVersion': (basestring, False),
| {"golden_diff": "diff --git a/troposphere/elasticsearch.py b/troposphere/elasticsearch.py\n--- a/troposphere/elasticsearch.py\n+++ b/troposphere/elasticsearch.py\n@@ -8,6 +8,10 @@\n from .validators import boolean, integer, integer_range, positive_integer\n \n VALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')\n+VALID_TLS_SECURITY_POLICIES = (\n+ 'Policy-Min-TLS-1-0-2019-07',\n+ 'Policy-Min-TLS-1-2-2019-07'\n+ )\n \n \n def validate_volume_type(volume_type):\n@@ -18,6 +22,14 @@\n return volume_type\n \n \n+def validate_tls_security_policy(tls_security_policy):\n+ \"\"\"Validate TLS Security Policy for ElasticsearchDomain\"\"\"\n+ if tls_security_policy not in VALID_TLS_SECURITY_POLICIES:\n+ raise ValueError(\"Minimum TLS Security Policy must be one of: %s\" %\n+ \", \".join(VALID_TLS_SECURITY_POLICIES))\n+ return tls_security_policy\n+\n+\n class CognitoOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n@@ -27,6 +39,13 @@\n }\n \n \n+class DomainEndpointOptions(AWSProperty):\n+ props = {\n+ 'EnforceHTTPS': (boolean, False),\n+ 'TLSSecurityPolicy': (validate_tls_security_policy, False),\n+ }\n+\n+\n class EBSOptions(AWSProperty):\n props = {\n 'EBSEnabled': (boolean, False),\n@@ -111,6 +130,7 @@\n 'AdvancedSecurityOptions': (AdvancedSecurityOptionsInput, False),\n 'CognitoOptions': (CognitoOptions, False),\n 'DomainName': (basestring, False),\n+ 'DomainEndpointOptions': (DomainEndpointOptions, False),\n 'EBSOptions': (EBSOptions, False),\n 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),\n 'ElasticsearchVersion': (basestring, False),\n", "issue": "Add AWS::Elasticsearch::Domain DomainEndpointOptions \nCloudformation now allows you to specify additional options for the domain endpoint, such as whether to require HTTPS for all traffic, with an Elasticseach Domain. \r\n\r\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-domainendpointoptions.html\r\n\r\nAdding support for this would be super. \n", "before_files": [{"content": "# Copyright (c) 2012-2015, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSProperty, AWSObject, Tags\nfrom .compat import policytypes\nfrom .validators import boolean, integer, integer_range, positive_integer\n\nVALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')\n\n\ndef validate_volume_type(volume_type):\n \"\"\"Validate VolumeType for ElasticsearchDomain\"\"\"\n if volume_type not in VALID_VOLUME_TYPES:\n raise ValueError(\"Elasticsearch Domain VolumeType must be one of: %s\" %\n \", \".join(VALID_VOLUME_TYPES))\n return volume_type\n\n\nclass CognitoOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n 'IdentityPoolId': (basestring, False),\n 'RoleArn': (basestring, False),\n 'UserPoolId': (basestring, False),\n }\n\n\nclass EBSOptions(AWSProperty):\n props = {\n 'EBSEnabled': (boolean, False),\n 'Iops': (positive_integer, False),\n 'VolumeSize': (integer, False),\n 'VolumeType': (validate_volume_type, False)\n }\n\n def validate(self):\n volume_type = self.properties.get('VolumeType')\n iops = self.properties.get('Iops')\n if volume_type == 'io1' and not iops:\n raise ValueError(\"Must specify Iops if VolumeType is 'io1'.\")\n\n\nclass ZoneAwarenessConfig(AWSProperty):\n props = {\n 'AvailabilityZoneCount': (integer, False),\n }\n\n\nclass ElasticsearchClusterConfig(AWSProperty):\n props = {\n 'DedicatedMasterCount': (integer, False),\n 'DedicatedMasterEnabled': (boolean, False),\n 'DedicatedMasterType': (basestring, False),\n 'InstanceCount': (integer, False),\n 'InstanceType': (basestring, False),\n 'ZoneAwarenessConfig': (ZoneAwarenessConfig, False),\n 'ZoneAwarenessEnabled': (boolean, False)\n }\n\n\nclass EncryptionAtRestOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n 'KmsKeyId': (basestring, False),\n }\n\n\nclass NodeToNodeEncryptionOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n }\n\n\nclass SnapshotOptions(AWSProperty):\n props = {\n 'AutomatedSnapshotStartHour': (integer_range(0, 23), False)\n }\n\n\nclass VPCOptions(AWSProperty):\n props = {\n 'SecurityGroupIds': ([basestring], False),\n 'SubnetIds': ([basestring], False)\n }\n\n\nclass MasterUserOptions(AWSProperty):\n props = {\n 'MasterUserARN': (basestring, False),\n 'MasterUserName': (basestring, False),\n 'MasterUserPassword': (basestring, False),\n }\n\n\nclass AdvancedSecurityOptionsInput(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n 'InternalUserDatabaseEnabled': (boolean, False),\n 'MasterUserOptions': (MasterUserOptions, False),\n }\n\n\nclass Domain(AWSObject):\n resource_type = \"AWS::Elasticsearch::Domain\"\n\n props = {\n 'AccessPolicies': (policytypes, False),\n 'AdvancedOptions': (dict, False),\n 'AdvancedSecurityOptions': (AdvancedSecurityOptionsInput, False),\n 'CognitoOptions': (CognitoOptions, False),\n 'DomainName': (basestring, False),\n 'EBSOptions': (EBSOptions, False),\n 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),\n 'ElasticsearchVersion': (basestring, False),\n 'EncryptionAtRestOptions': (EncryptionAtRestOptions, False),\n 'LogPublishingOptions': (dict, False),\n 'NodeToNodeEncryptionOptions': (NodeToNodeEncryptionOptions, False),\n 'SnapshotOptions': (SnapshotOptions, False),\n 'Tags': ((Tags, list), False),\n 'VPCOptions': (VPCOptions, False),\n }\n\n\n# Backward compatibility\nElasticsearchDomain = Domain\n", "path": "troposphere/elasticsearch.py"}]} | 1,812 | 452 |
gh_patches_debug_12129 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-1670 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Filters not applied when calculating count of items within group
## Reproduce
1. Go to the Library Management schema.
1. Load the Table Page for the Publications table.
1. Group by "Publication Year".
1. Observe the first group, for year 1900, to contain 10 records and to display a "Count" of 10. Good.
1. Add a filter condition requiring Title to contain the string "To".
1. Observe the first group, for year 1900, to contain 2 records.
1. Expect "Count" to display 2.
1. Observe "Count" displays 10.

</issue>
<code>
[start of db/transforms/operations/apply.py]
1 from db.transforms.base import enforce_relation_type_expectations, Transform
2 from db.transforms import base
3
4
5 def apply_transformations(relation, transformations):
6 enforce_relation_type_expectations(relation)
7 for transform in transformations:
8 relation = _apply_transform(relation, transform)
9 return relation
10
11
12 def _apply_transform(relation, transform):
13 assert isinstance(transform, Transform)
14 relation = transform.apply_to_relation(relation)
15 enforce_relation_type_expectations(relation)
16 return relation
17
18
19 # NOTE deprecated; this will be replaced with apply_transformations
20 def apply_transformations_deprecated(
21 table,
22 limit=None,
23 offset=None,
24 order_by=None,
25 filter=None,
26 columns_to_select=None,
27 group_by=None,
28 duplicate_only=None,
29 search=[],
30 ):
31 # TODO rename the actual method parameter
32 relation = table
33
34 enforce_relation_type_expectations(relation)
35
36 transforms = []
37
38 if duplicate_only:
39 transforms.append(base.DuplicateOnly(duplicate_only))
40 if group_by:
41 transforms.append(base.Group(group_by))
42 if order_by:
43 transforms.append(base.Order(order_by))
44 if filter:
45 transforms.append(base.Filter(filter))
46 if search:
47 transforms.append(base.Search([search, limit]))
48 if columns_to_select:
49 transforms.append(base.SelectSubsetOfColumns(columns_to_select))
50 if offset:
51 transforms.append(base.Offset(offset))
52 if limit:
53 transforms.append(base.Limit(limit))
54
55 relation = apply_transformations(relation, transforms)
56 return relation
57
[end of db/transforms/operations/apply.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/db/transforms/operations/apply.py b/db/transforms/operations/apply.py
--- a/db/transforms/operations/apply.py
+++ b/db/transforms/operations/apply.py
@@ -35,14 +35,14 @@
transforms = []
+ if filter:
+ transforms.append(base.Filter(filter))
if duplicate_only:
transforms.append(base.DuplicateOnly(duplicate_only))
if group_by:
transforms.append(base.Group(group_by))
if order_by:
transforms.append(base.Order(order_by))
- if filter:
- transforms.append(base.Filter(filter))
if search:
transforms.append(base.Search([search, limit]))
if columns_to_select:
| {"golden_diff": "diff --git a/db/transforms/operations/apply.py b/db/transforms/operations/apply.py\n--- a/db/transforms/operations/apply.py\n+++ b/db/transforms/operations/apply.py\n@@ -35,14 +35,14 @@\n \n transforms = []\n \n+ if filter:\n+ transforms.append(base.Filter(filter))\n if duplicate_only:\n transforms.append(base.DuplicateOnly(duplicate_only))\n if group_by:\n transforms.append(base.Group(group_by))\n if order_by:\n transforms.append(base.Order(order_by))\n- if filter:\n- transforms.append(base.Filter(filter))\n if search:\n transforms.append(base.Search([search, limit]))\n if columns_to_select:\n", "issue": "Filters not applied when calculating count of items within group\n## Reproduce\n\n1. Go to the Library Management schema.\n1. Load the Table Page for the Publications table.\n1. Group by \"Publication Year\".\n1. Observe the first group, for year 1900, to contain 10 records and to display a \"Count\" of 10. Good.\n1. Add a filter condition requiring Title to contain the string \"To\".\n1. Observe the first group, for year 1900, to contain 2 records.\n1. Expect \"Count\" to display 2.\n1. Observe \"Count\" displays 10.\n\n\n\n", "before_files": [{"content": "from db.transforms.base import enforce_relation_type_expectations, Transform\nfrom db.transforms import base\n\n\ndef apply_transformations(relation, transformations):\n enforce_relation_type_expectations(relation)\n for transform in transformations:\n relation = _apply_transform(relation, transform)\n return relation\n\n\ndef _apply_transform(relation, transform):\n assert isinstance(transform, Transform)\n relation = transform.apply_to_relation(relation)\n enforce_relation_type_expectations(relation)\n return relation\n\n\n# NOTE deprecated; this will be replaced with apply_transformations\ndef apply_transformations_deprecated(\n table,\n limit=None,\n offset=None,\n order_by=None,\n filter=None,\n columns_to_select=None,\n group_by=None,\n duplicate_only=None,\n search=[],\n):\n # TODO rename the actual method parameter\n relation = table\n\n enforce_relation_type_expectations(relation)\n\n transforms = []\n\n if duplicate_only:\n transforms.append(base.DuplicateOnly(duplicate_only))\n if group_by:\n transforms.append(base.Group(group_by))\n if order_by:\n transforms.append(base.Order(order_by))\n if filter:\n transforms.append(base.Filter(filter))\n if search:\n transforms.append(base.Search([search, limit]))\n if columns_to_select:\n transforms.append(base.SelectSubsetOfColumns(columns_to_select))\n if offset:\n transforms.append(base.Offset(offset))\n if limit:\n transforms.append(base.Limit(limit))\n\n relation = apply_transformations(relation, transforms)\n return relation\n", "path": "db/transforms/operations/apply.py"}]} | 1,164 | 153 |
gh_patches_debug_1479 | rasdani/github-patches | git_diff | fidals__shopelectro-870 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add absolute urls to the canonical links. stb2
Необходимо поправить построение канонических ссылок на сайте
Адрес в канонической ссылке должен быть обязательно абсолютный
<link rel="canonical" href="https://www.сайт.ру/адрес_страницы" >
а не так
<link rel="canonical" href="/адрес_страницы" > - это неверно
Поисковики игнорируют этот тег, если указан относительный адрес в теге...
У меня при скане появляется много страниц дублей (пагинация), в коде указан каноникал. а при сканировании методом аналогичным поисковому роботу сраницы как канонические не помечаются
Вероятно, на STB нужно сделать так же.
</issue>
<code>
[start of shopelectro/context_processors.py]
1 from django.conf import settings
2
3
4 def shop(request):
5 """
6 Inject shop dict into request.
7
8 Shop dict contains information about shop:
9 emails, phones, API-integrations.
10 """
11 return {
12 'shop': settings.SHOP,
13 'DEBUG': settings.DEBUG,
14 'BASE_URL': settings.BASE_URL,
15 'SENTRY_FRONT_DSN': settings.SENTRY_FRONT_DSN,
16 }
17
[end of shopelectro/context_processors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/shopelectro/context_processors.py b/shopelectro/context_processors.py
--- a/shopelectro/context_processors.py
+++ b/shopelectro/context_processors.py
@@ -11,6 +11,6 @@
return {
'shop': settings.SHOP,
'DEBUG': settings.DEBUG,
- 'BASE_URL': settings.BASE_URL,
+ 'base_url': settings.BASE_URL,
'SENTRY_FRONT_DSN': settings.SENTRY_FRONT_DSN,
}
| {"golden_diff": "diff --git a/shopelectro/context_processors.py b/shopelectro/context_processors.py\n--- a/shopelectro/context_processors.py\n+++ b/shopelectro/context_processors.py\n@@ -11,6 +11,6 @@\n return {\n 'shop': settings.SHOP,\n 'DEBUG': settings.DEBUG,\n- 'BASE_URL': settings.BASE_URL,\n+ 'base_url': settings.BASE_URL,\n 'SENTRY_FRONT_DSN': settings.SENTRY_FRONT_DSN,\n }\n", "issue": "Add absolute urls to the canonical links. stb2\n\u041d\u0435\u043e\u0431\u0445\u043e\u0434\u0438\u043c\u043e \u043f\u043e\u043f\u0440\u0430\u0432\u0438\u0442\u044c \u043f\u043e\u0441\u0442\u0440\u043e\u0435\u043d\u0438\u0435 \u043a\u0430\u043d\u043e\u043d\u0438\u0447\u0435\u0441\u043a\u0438\u0445 \u0441\u0441\u044b\u043b\u043e\u043a \u043d\u0430 \u0441\u0430\u0439\u0442\u0435\r\n\u0410\u0434\u0440\u0435\u0441 \u0432 \u043a\u0430\u043d\u043e\u043d\u0438\u0447\u0435\u0441\u043a\u043e\u0439 \u0441\u0441\u044b\u043b\u043a\u0435 \u0434\u043e\u043b\u0436\u0435\u043d \u0431\u044b\u0442\u044c \u043e\u0431\u044f\u0437\u0430\u0442\u0435\u043b\u044c\u043d\u043e \u0430\u0431\u0441\u043e\u043b\u044e\u0442\u043d\u044b\u0439\r\n<link rel=\"canonical\" href=\"https://www.\u0441\u0430\u0439\u0442.\u0440\u0443/\u0430\u0434\u0440\u0435\u0441_\u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b\" >\r\n\u0430 \u043d\u0435 \u0442\u0430\u043a\r\n<link rel=\"canonical\" href=\"/\u0430\u0434\u0440\u0435\u0441_\u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b\" > - \u044d\u0442\u043e \u043d\u0435\u0432\u0435\u0440\u043d\u043e\r\n\u041f\u043e\u0438\u0441\u043a\u043e\u0432\u0438\u043a\u0438 \u0438\u0433\u043d\u043e\u0440\u0438\u0440\u0443\u044e\u0442 \u044d\u0442\u043e\u0442 \u0442\u0435\u0433, \u0435\u0441\u043b\u0438 \u0443\u043a\u0430\u0437\u0430\u043d \u043e\u0442\u043d\u043e\u0441\u0438\u0442\u0435\u043b\u044c\u043d\u044b\u0439 \u0430\u0434\u0440\u0435\u0441 \u0432 \u0442\u0435\u0433\u0435...\r\n\u0423 \u043c\u0435\u043d\u044f \u043f\u0440\u0438 \u0441\u043a\u0430\u043d\u0435 \u043f\u043e\u044f\u0432\u043b\u044f\u0435\u0442\u0441\u044f \u043c\u043d\u043e\u0433\u043e \u0441\u0442\u0440\u0430\u043d\u0438\u0446 \u0434\u0443\u0431\u043b\u0435\u0439 (\u043f\u0430\u0433\u0438\u043d\u0430\u0446\u0438\u044f), \u0432 \u043a\u043e\u0434\u0435 \u0443\u043a\u0430\u0437\u0430\u043d \u043a\u0430\u043d\u043e\u043d\u0438\u043a\u0430\u043b. \u0430 \u043f\u0440\u0438 \u0441\u043a\u0430\u043d\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u0438 \u043c\u0435\u0442\u043e\u0434\u043e\u043c \u0430\u043d\u0430\u043b\u043e\u0433\u0438\u0447\u043d\u044b\u043c \u043f\u043e\u0438\u0441\u043a\u043e\u0432\u043e\u043c\u0443 \u0440\u043e\u0431\u043e\u0442\u0443 \u0441\u0440\u0430\u043d\u0438\u0446\u044b \u043a\u0430\u043a \u043a\u0430\u043d\u043e\u043d\u0438\u0447\u0435\u0441\u043a\u0438\u0435 \u043d\u0435 \u043f\u043e\u043c\u0435\u0447\u0430\u044e\u0442\u0441\u044f\r\n\r\n\u0412\u0435\u0440\u043e\u044f\u0442\u043d\u043e, \u043d\u0430 STB \u043d\u0443\u0436\u043d\u043e \u0441\u0434\u0435\u043b\u0430\u0442\u044c \u0442\u0430\u043a \u0436\u0435. \n", "before_files": [{"content": "from django.conf import settings\n\n\ndef shop(request):\n \"\"\"\n Inject shop dict into request.\n\n Shop dict contains information about shop:\n emails, phones, API-integrations.\n \"\"\"\n return {\n 'shop': settings.SHOP,\n 'DEBUG': settings.DEBUG,\n 'BASE_URL': settings.BASE_URL,\n 'SENTRY_FRONT_DSN': settings.SENTRY_FRONT_DSN,\n }\n", "path": "shopelectro/context_processors.py"}]} | 844 | 108 |
gh_patches_debug_16334 | rasdani/github-patches | git_diff | falconry__falcon-741 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor setup.py external module selection for Cython
Reduce complexity and duplication of the external module selection for Cython in setup.py.
At the time of this issue, this cruft was located in:
https://github.com/kgriffs/falcon/blob/routing/setup.py#L35
</issue>
<code>
[start of setup.py]
1 import glob
2 import imp
3 import io
4 import os
5 from os import path
6 from setuptools import setup, find_packages, Extension
7 import sys
8
9 MYDIR = path.abspath(os.path.dirname(__file__))
10
11 VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))
12 VERSION = VERSION.__version__
13
14 # NOTE(kgriffs): python-mimeparse is newer than mimeparse, supports Py3
15 # TODO(kgriffs): Fork and optimize/modernize python-mimeparse
16 REQUIRES = ['six>=1.4.0', 'python-mimeparse']
17
18 JYTHON = 'java' in sys.platform
19
20 try:
21 sys.pypy_version_info
22 PYPY = True
23 except AttributeError:
24 PYPY = False
25
26 if PYPY or JYTHON:
27 CYTHON = False
28 else:
29 try:
30 from Cython.Distutils import build_ext
31 CYTHON = True
32 except ImportError:
33 # TODO(kgriffs): pip now ignores all output, so the user
34 # may not see this message. See also:
35 #
36 # https://github.com/pypa/pip/issues/2732
37 #
38 print('\nNOTE: Cython not installed. '
39 'Falcon will still work fine, but may run '
40 'a bit slower.\n')
41 CYTHON = False
42
43 if CYTHON:
44 def list_modules(dirname):
45 filenames = glob.glob(path.join(dirname, '*.py'))
46
47 module_names = []
48 for name in filenames:
49 module, ext = path.splitext(path.basename(name))
50 if module != '__init__':
51 module_names.append(module)
52
53 return module_names
54
55 ext_modules = [
56 Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])
57 for ext in list_modules(path.join(MYDIR, 'falcon'))]
58
59 ext_modules += [
60 Extension('falcon.util.' + ext,
61 [path.join('falcon', 'util', ext + '.py')])
62
63 for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]
64
65 ext_modules += [
66 Extension('falcon.routing.' + ext,
67 [path.join('falcon', 'routing', ext + '.py')])
68
69 for ext in list_modules(path.join(MYDIR, 'falcon', 'routing'))]
70
71 cmdclass = {'build_ext': build_ext}
72
73 else:
74 cmdclass = {}
75 ext_modules = []
76
77 setup(
78 name='falcon',
79 version=VERSION,
80 description='An unladen web framework for building APIs and app backends.',
81 long_description=io.open('README.rst', 'r', encoding='utf-8').read(),
82 classifiers=[
83 'Development Status :: 5 - Production/Stable',
84 'Environment :: Web Environment',
85 'Natural Language :: English',
86 'Intended Audience :: Developers',
87 'Intended Audience :: System Administrators',
88 'License :: OSI Approved :: Apache Software License',
89 'Operating System :: MacOS :: MacOS X',
90 'Operating System :: Microsoft :: Windows',
91 'Operating System :: POSIX',
92 'Topic :: Internet :: WWW/HTTP :: WSGI',
93 'Topic :: Software Development :: Libraries :: Application Frameworks',
94 'Programming Language :: Python',
95 'Programming Language :: Python :: Implementation :: CPython',
96 'Programming Language :: Python :: Implementation :: PyPy',
97 'Programming Language :: Python :: Implementation :: Jython',
98 'Programming Language :: Python :: 2.6',
99 'Programming Language :: Python :: 2.7',
100 'Programming Language :: Python :: 3.3',
101 'Programming Language :: Python :: 3.4',
102 'Programming Language :: Python :: 3.5',
103 ],
104 keywords='wsgi web api framework rest http cloud',
105 author='Kurt Griffiths',
106 author_email='[email protected]',
107 url='http://falconframework.org',
108 license='Apache 2.0',
109 packages=find_packages(exclude=['tests']),
110 include_package_data=True,
111 zip_safe=False,
112 install_requires=REQUIRES,
113 setup_requires=[],
114 cmdclass=cmdclass,
115 ext_modules=ext_modules,
116 test_suite='nose.collector',
117 entry_points={
118 'console_scripts': [
119 'falcon-bench = falcon.cmd.bench:main'
120 ]
121 }
122 )
123
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -52,21 +52,15 @@
return module_names
+ package_names = ['falcon', 'falcon.util', 'falcon.routing']
ext_modules = [
- Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])
- for ext in list_modules(path.join(MYDIR, 'falcon'))]
-
- ext_modules += [
- Extension('falcon.util.' + ext,
- [path.join('falcon', 'util', ext + '.py')])
-
- for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]
-
- ext_modules += [
- Extension('falcon.routing.' + ext,
- [path.join('falcon', 'routing', ext + '.py')])
-
- for ext in list_modules(path.join(MYDIR, 'falcon', 'routing'))]
+ Extension(
+ package + '.' + module,
+ [path.join(*(package.split('.') + [module + '.py']))]
+ )
+ for package in package_names
+ for module in list_modules(path.join(MYDIR, *package.split('.')))
+ ]
cmdclass = {'build_ext': build_ext}
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -52,21 +52,15 @@\n \n return module_names\n \n+ package_names = ['falcon', 'falcon.util', 'falcon.routing']\n ext_modules = [\n- Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])\n- for ext in list_modules(path.join(MYDIR, 'falcon'))]\n-\n- ext_modules += [\n- Extension('falcon.util.' + ext,\n- [path.join('falcon', 'util', ext + '.py')])\n-\n- for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]\n-\n- ext_modules += [\n- Extension('falcon.routing.' + ext,\n- [path.join('falcon', 'routing', ext + '.py')])\n-\n- for ext in list_modules(path.join(MYDIR, 'falcon', 'routing'))]\n+ Extension(\n+ package + '.' + module,\n+ [path.join(*(package.split('.') + [module + '.py']))]\n+ )\n+ for package in package_names\n+ for module in list_modules(path.join(MYDIR, *package.split('.')))\n+ ]\n \n cmdclass = {'build_ext': build_ext}\n", "issue": "Refactor setup.py external module selection for Cython\nReduce complexity and duplication of the external module selection for Cython in setup.py.\n\nAt the time of this issue, this cruft was located in: \nhttps://github.com/kgriffs/falcon/blob/routing/setup.py#L35\n\n", "before_files": [{"content": "import glob\nimport imp\nimport io\nimport os\nfrom os import path\nfrom setuptools import setup, find_packages, Extension\nimport sys\n\nMYDIR = path.abspath(os.path.dirname(__file__))\n\nVERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\nVERSION = VERSION.__version__\n\n# NOTE(kgriffs): python-mimeparse is newer than mimeparse, supports Py3\n# TODO(kgriffs): Fork and optimize/modernize python-mimeparse\nREQUIRES = ['six>=1.4.0', 'python-mimeparse']\n\nJYTHON = 'java' in sys.platform\n\ntry:\n sys.pypy_version_info\n PYPY = True\nexcept AttributeError:\n PYPY = False\n\nif PYPY or JYTHON:\n CYTHON = False\nelse:\n try:\n from Cython.Distutils import build_ext\n CYTHON = True\n except ImportError:\n # TODO(kgriffs): pip now ignores all output, so the user\n # may not see this message. See also:\n #\n # https://github.com/pypa/pip/issues/2732\n #\n print('\\nNOTE: Cython not installed. '\n 'Falcon will still work fine, but may run '\n 'a bit slower.\\n')\n CYTHON = False\n\nif CYTHON:\n def list_modules(dirname):\n filenames = glob.glob(path.join(dirname, '*.py'))\n\n module_names = []\n for name in filenames:\n module, ext = path.splitext(path.basename(name))\n if module != '__init__':\n module_names.append(module)\n\n return module_names\n\n ext_modules = [\n Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])\n for ext in list_modules(path.join(MYDIR, 'falcon'))]\n\n ext_modules += [\n Extension('falcon.util.' + ext,\n [path.join('falcon', 'util', ext + '.py')])\n\n for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]\n\n ext_modules += [\n Extension('falcon.routing.' + ext,\n [path.join('falcon', 'routing', ext + '.py')])\n\n for ext in list_modules(path.join(MYDIR, 'falcon', 'routing'))]\n\n cmdclass = {'build_ext': build_ext}\n\nelse:\n cmdclass = {}\n ext_modules = []\n\nsetup(\n name='falcon',\n version=VERSION,\n description='An unladen web framework for building APIs and app backends.',\n long_description=io.open('README.rst', 'r', encoding='utf-8').read(),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: Implementation :: Jython',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n keywords='wsgi web api framework rest http cloud',\n author='Kurt Griffiths',\n author_email='[email protected]',\n url='http://falconframework.org',\n license='Apache 2.0',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIRES,\n setup_requires=[],\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n test_suite='nose.collector',\n entry_points={\n 'console_scripts': [\n 'falcon-bench = falcon.cmd.bench:main'\n ]\n }\n)\n", "path": "setup.py"}]} | 1,790 | 292 |
gh_patches_debug_89 | rasdani/github-patches | git_diff | encode__httpx-286 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
stop nox on first error
During nox is running, if it gets an error, continues running. if we add this: `nox.options.stop_on_first_error = True`, we don't overlook check errors or others before PR.
</issue>
<code>
[start of noxfile.py]
1 import nox
2
3 source_files = ("httpx", "tests", "setup.py", "noxfile.py")
4
5
6 @nox.session(reuse_venv=True)
7 def lint(session):
8 session.install("autoflake", "black", "flake8", "isort", "seed-isort-config")
9
10 session.run("autoflake", "--in-place", "--recursive", *source_files)
11 session.run("seed-isort-config", "--application-directories=httpx")
12 session.run("isort", "--project=httpx", "--recursive", "--apply", *source_files)
13 session.run("black", "--target-version=py36", *source_files)
14
15 check(session)
16
17
18 @nox.session(reuse_venv=True)
19 def check(session):
20 session.install(
21 "black", "flake8", "flake8-bugbear", "flake8-comprehensions", "mypy"
22 )
23
24 session.run("black", "--check", "--diff", "--target-version=py36", *source_files)
25 session.run("flake8", *source_files)
26 session.run("mypy", "httpx")
27
28
29 @nox.session(reuse_venv=True)
30 def docs(session):
31 session.install("mkdocs", "mkdocs-material")
32
33 session.run("mkdocs", "build")
34
35
36 @nox.session(python=["3.6", "3.7", "3.8"])
37 def test(session):
38 session.install("-r", "test-requirements.txt")
39
40 session.run("python", "-m", "pytest")
41
[end of noxfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -1,5 +1,7 @@
import nox
+nox.options.stop_on_first_error = True
+
source_files = ("httpx", "tests", "setup.py", "noxfile.py")
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -1,5 +1,7 @@\n import nox\n \n+nox.options.stop_on_first_error = True\n+\n source_files = (\"httpx\", \"tests\", \"setup.py\", \"noxfile.py\")\n", "issue": "stop nox on first error\nDuring nox is running, if it gets an error, continues running. if we add this: `nox.options.stop_on_first_error = True`, we don't overlook check errors or others before PR.\n", "before_files": [{"content": "import nox\n\nsource_files = (\"httpx\", \"tests\", \"setup.py\", \"noxfile.py\")\n\n\[email protected](reuse_venv=True)\ndef lint(session):\n session.install(\"autoflake\", \"black\", \"flake8\", \"isort\", \"seed-isort-config\")\n\n session.run(\"autoflake\", \"--in-place\", \"--recursive\", *source_files)\n session.run(\"seed-isort-config\", \"--application-directories=httpx\")\n session.run(\"isort\", \"--project=httpx\", \"--recursive\", \"--apply\", *source_files)\n session.run(\"black\", \"--target-version=py36\", *source_files)\n\n check(session)\n\n\[email protected](reuse_venv=True)\ndef check(session):\n session.install(\n \"black\", \"flake8\", \"flake8-bugbear\", \"flake8-comprehensions\", \"mypy\"\n )\n\n session.run(\"black\", \"--check\", \"--diff\", \"--target-version=py36\", *source_files)\n session.run(\"flake8\", *source_files)\n session.run(\"mypy\", \"httpx\")\n\n\[email protected](reuse_venv=True)\ndef docs(session):\n session.install(\"mkdocs\", \"mkdocs-material\")\n\n session.run(\"mkdocs\", \"build\")\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\ndef test(session):\n session.install(\"-r\", \"test-requirements.txt\")\n\n session.run(\"python\", \"-m\", \"pytest\")\n", "path": "noxfile.py"}]} | 986 | 73 |
gh_patches_debug_24283 | rasdani/github-patches | git_diff | ESMCI__cime-260 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove ESMF from driver code
</issue>
<code>
[start of utils/python/CIME/SystemTests/pfs.py]
1 """
2 CIME performance test This class inherits from SystemTestsCommon
3
4 20 days performance test, no restart files written
5 """
6
7 from CIME.XML.standard_module_setup import *
8 from system_tests_common import SystemTestsCommon
9
10 logger = logging.getLogger(__name__)
11
12 class PFS(SystemTestsCommon):
13
14 def __init__(self, case):
15 """
16 initialize an object interface to the PFS system test
17 """
18 SystemTestsCommon.__init__(self, case)
19
20 def run(self):
21 self._case_set_value("STOP_OPTION", "ndays")
22 self._case.set_value("STOP_N", 20)
23 self._case.set_value("REST_OPTION","none")
24 self._case.set_value("CONTINUE_RUN", False)
25 self._case.flush()
26
27 logger.info("doing an 20 day initial test, no restarts written")
28 return SystemTestsCommon._run(self)
29
30 def report(self):
31 SystemTestsCommon.report(self)
32
[end of utils/python/CIME/SystemTests/pfs.py]
[start of utils/python/CIME/preview_namelists.py]
1 """
2 API for preview namelist
3 """
4
5 from CIME.XML.standard_module_setup import *
6 from CIME.utils import expect, run_cmd
7 from CIME.XML.env_mach_specific import EnvMachSpecific
8
9 import glob, shutil
10 logger = logging.getLogger(__name__)
11
12 def preview_namelists(case, dryrun=False, casedir=None):
13 # refresh case xml files from object
14 case.flush()
15
16 # Get data from XML
17 exeroot = case.get_value("EXEROOT")
18 libroot = case.get_value("LIBROOT")
19 incroot = case.get_value("INCROOT")
20 rundir = case.get_value("RUNDIR")
21 caseroot = case.get_value("CASEROOT")
22 casebuild = case.get_value("CASEBUILD")
23 testcase = case.get_value("TESTCASE")
24
25 logger.debug("LID is: '%s'" % os.getenv("LID", ""))
26 logger.debug("caseroot is: '%s'" % caseroot)
27
28 dryrun = True if (testcase == "SBN") else dryrun
29
30 models = ["atm", "lnd", "ice", "ocn", "glc", "wav", "rof", "cpl"]
31 docdir = os.path.join(caseroot, "CaseDocs")
32
33 if (dryrun):
34 # Only create rundir
35 try:
36 os.makedirs(rundir)
37 except OSError:
38 logger.warning("Not able to create $RUNDIR, trying a subdirectory of $CASEROOT")
39 rundir = os.path.join(caseroot, rundir)
40 try:
41 os.makedirs(rundir)
42 logger.info("Success! Setting RUNDIR=%s" % rundir)
43 case.set_value("RUNDIR", rundir)
44 except OSError:
45 expect(False, "Could not create rundir")
46
47 else:
48
49 # Load modules
50 env_module = case._get_env("mach_specific")
51 env_module.load_env_for_case(compiler=case.get_value("COMPILER"),
52 debug=case.get_value("DEBUG"),
53 mpilib=case.get_value("MPILIB"))
54
55 # Make necessary directories
56 dirs_to_make = [os.path.join(exeroot, model, "obj") for model in models]
57 dirs_to_make.extend([exeroot, libroot, incroot, rundir, docdir])
58
59 for dir_to_make in dirs_to_make:
60 if (not os.path.isdir(dir_to_make)):
61 try:
62 logger.debug("Making dir '%s'" % dir_to_make)
63 os.makedirs(dir_to_make)
64 except OSError as e:
65 expect(False, "Could not make directory '%s', error: %s" % (dir_to_make, e))
66
67 # Create namelists
68 for model in models:
69 model_str = "drv" if model == "cpl" else model
70 config_file = case.get_value("CONFIG_%s_FILE" % model_str.upper())
71 config_dir = os.path.dirname(config_file)
72 cmd = os.path.join(config_dir, "buildnml")
73 logger.info("Running %s"%cmd)
74 if (logger.level == logging.DEBUG):
75 run_cmd("PREVIEW_NML=1 %s %s" % (cmd, caseroot))
76 else:
77 run_cmd("%s %s" % (cmd, caseroot))
78 # refresh case xml object from file
79 case.read_xml(caseroot)
80 # Save namelists to docdir
81 if (not os.path.isdir(docdir)):
82 os.makedirs(docdir)
83 try:
84 with open(os.path.join(docdir, "README"), "w") as fd:
85 fd.write(" CESM Resolved Namelist Files\n For documentation only DO NOT MODIFY\n")
86 except (OSError, IOError) as e:
87 expect(False, "Failed to write %s/README: %s" % (docdir, e))
88
89
90 for cpglob in ["*_in_[0-9]*", "*modelio*", "*_in",
91 "*streams*txt*", "*stxt", "*maps.rc", "*cism.config*"]:
92 for file_to_copy in glob.glob(os.path.join(rundir, cpglob)):
93 logger.debug("Copy file from '%s' to '%s'" % (file_to_copy, docdir))
94 shutil.copy2(file_to_copy, docdir)
95
96 # Copy over chemistry mechanism docs if they exist
97 if (os.path.isdir(os.path.join(casebuild, "camconf"))):
98 for file_to_copy in glob.glob(os.path.join(casebuild, "camconf", "*chem_mech*")):
99 shutil.copy2(file_to_copy, docdir)
100
[end of utils/python/CIME/preview_namelists.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/python/CIME/SystemTests/pfs.py b/utils/python/CIME/SystemTests/pfs.py
--- a/utils/python/CIME/SystemTests/pfs.py
+++ b/utils/python/CIME/SystemTests/pfs.py
@@ -18,7 +18,7 @@
SystemTestsCommon.__init__(self, case)
def run(self):
- self._case_set_value("STOP_OPTION", "ndays")
+ self._case.set_value("STOP_OPTION", "ndays")
self._case.set_value("STOP_N", 20)
self._case.set_value("REST_OPTION","none")
self._case.set_value("CONTINUE_RUN", False)
diff --git a/utils/python/CIME/preview_namelists.py b/utils/python/CIME/preview_namelists.py
--- a/utils/python/CIME/preview_namelists.py
+++ b/utils/python/CIME/preview_namelists.py
@@ -72,9 +72,12 @@
cmd = os.path.join(config_dir, "buildnml")
logger.info("Running %s"%cmd)
if (logger.level == logging.DEBUG):
- run_cmd("PREVIEW_NML=1 %s %s" % (cmd, caseroot))
+ rc, out, err = run_cmd("PREVIEW_NML=1 %s %s" % (cmd, caseroot), ok_to_fail=True)
+ expect(rc==0,"Command %s failed rc=%d\nout=%s\nerr=%s"%(cmd,rc,out,err))
else:
- run_cmd("%s %s" % (cmd, caseroot))
+ rc, out, err = run_cmd("%s %s" % (cmd, caseroot), ok_to_fail=True)
+ expect(rc==0,"Command %s failed rc=%d\nout=%s\nerr=%s"%(cmd,rc,out,err))
+
# refresh case xml object from file
case.read_xml(caseroot)
# Save namelists to docdir
| {"golden_diff": "diff --git a/utils/python/CIME/SystemTests/pfs.py b/utils/python/CIME/SystemTests/pfs.py\n--- a/utils/python/CIME/SystemTests/pfs.py\n+++ b/utils/python/CIME/SystemTests/pfs.py\n@@ -18,7 +18,7 @@\n SystemTestsCommon.__init__(self, case)\n \n def run(self):\n- self._case_set_value(\"STOP_OPTION\", \"ndays\")\n+ self._case.set_value(\"STOP_OPTION\", \"ndays\")\n self._case.set_value(\"STOP_N\", 20)\n self._case.set_value(\"REST_OPTION\",\"none\")\n self._case.set_value(\"CONTINUE_RUN\", False)\ndiff --git a/utils/python/CIME/preview_namelists.py b/utils/python/CIME/preview_namelists.py\n--- a/utils/python/CIME/preview_namelists.py\n+++ b/utils/python/CIME/preview_namelists.py\n@@ -72,9 +72,12 @@\n cmd = os.path.join(config_dir, \"buildnml\")\n logger.info(\"Running %s\"%cmd)\n if (logger.level == logging.DEBUG):\n- run_cmd(\"PREVIEW_NML=1 %s %s\" % (cmd, caseroot))\n+ rc, out, err = run_cmd(\"PREVIEW_NML=1 %s %s\" % (cmd, caseroot), ok_to_fail=True)\n+ expect(rc==0,\"Command %s failed rc=%d\\nout=%s\\nerr=%s\"%(cmd,rc,out,err))\n else:\n- run_cmd(\"%s %s\" % (cmd, caseroot))\n+ rc, out, err = run_cmd(\"%s %s\" % (cmd, caseroot), ok_to_fail=True)\n+ expect(rc==0,\"Command %s failed rc=%d\\nout=%s\\nerr=%s\"%(cmd,rc,out,err))\n+\n # refresh case xml object from file\n case.read_xml(caseroot)\n # Save namelists to docdir\n", "issue": "Remove ESMF from driver code\n\n", "before_files": [{"content": "\"\"\"\nCIME performance test This class inherits from SystemTestsCommon\n\n20 days performance test, no restart files written\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\nfrom system_tests_common import SystemTestsCommon\n\nlogger = logging.getLogger(__name__)\n\nclass PFS(SystemTestsCommon):\n\n def __init__(self, case):\n \"\"\"\n initialize an object interface to the PFS system test\n \"\"\"\n SystemTestsCommon.__init__(self, case)\n\n def run(self):\n self._case_set_value(\"STOP_OPTION\", \"ndays\")\n self._case.set_value(\"STOP_N\", 20)\n self._case.set_value(\"REST_OPTION\",\"none\")\n self._case.set_value(\"CONTINUE_RUN\", False)\n self._case.flush()\n\n logger.info(\"doing an 20 day initial test, no restarts written\")\n return SystemTestsCommon._run(self)\n\n def report(self):\n SystemTestsCommon.report(self)\n", "path": "utils/python/CIME/SystemTests/pfs.py"}, {"content": "\"\"\"\nAPI for preview namelist\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect, run_cmd\nfrom CIME.XML.env_mach_specific import EnvMachSpecific\n\nimport glob, shutil\nlogger = logging.getLogger(__name__)\n\ndef preview_namelists(case, dryrun=False, casedir=None):\n # refresh case xml files from object\n case.flush()\n\n # Get data from XML\n exeroot = case.get_value(\"EXEROOT\")\n libroot = case.get_value(\"LIBROOT\")\n incroot = case.get_value(\"INCROOT\")\n rundir = case.get_value(\"RUNDIR\")\n caseroot = case.get_value(\"CASEROOT\")\n casebuild = case.get_value(\"CASEBUILD\")\n testcase = case.get_value(\"TESTCASE\")\n\n logger.debug(\"LID is: '%s'\" % os.getenv(\"LID\", \"\"))\n logger.debug(\"caseroot is: '%s'\" % caseroot)\n\n dryrun = True if (testcase == \"SBN\") else dryrun\n\n models = [\"atm\", \"lnd\", \"ice\", \"ocn\", \"glc\", \"wav\", \"rof\", \"cpl\"]\n docdir = os.path.join(caseroot, \"CaseDocs\")\n\n if (dryrun):\n # Only create rundir\n try:\n os.makedirs(rundir)\n except OSError:\n logger.warning(\"Not able to create $RUNDIR, trying a subdirectory of $CASEROOT\")\n rundir = os.path.join(caseroot, rundir)\n try:\n os.makedirs(rundir)\n logger.info(\"Success! Setting RUNDIR=%s\" % rundir)\n case.set_value(\"RUNDIR\", rundir)\n except OSError:\n expect(False, \"Could not create rundir\")\n\n else:\n\n # Load modules\n env_module = case._get_env(\"mach_specific\")\n env_module.load_env_for_case(compiler=case.get_value(\"COMPILER\"),\n debug=case.get_value(\"DEBUG\"),\n mpilib=case.get_value(\"MPILIB\"))\n\n # Make necessary directories\n dirs_to_make = [os.path.join(exeroot, model, \"obj\") for model in models]\n dirs_to_make.extend([exeroot, libroot, incroot, rundir, docdir])\n\n for dir_to_make in dirs_to_make:\n if (not os.path.isdir(dir_to_make)):\n try:\n logger.debug(\"Making dir '%s'\" % dir_to_make)\n os.makedirs(dir_to_make)\n except OSError as e:\n expect(False, \"Could not make directory '%s', error: %s\" % (dir_to_make, e))\n\n # Create namelists\n for model in models:\n model_str = \"drv\" if model == \"cpl\" else model\n config_file = case.get_value(\"CONFIG_%s_FILE\" % model_str.upper())\n config_dir = os.path.dirname(config_file)\n cmd = os.path.join(config_dir, \"buildnml\")\n logger.info(\"Running %s\"%cmd)\n if (logger.level == logging.DEBUG):\n run_cmd(\"PREVIEW_NML=1 %s %s\" % (cmd, caseroot))\n else:\n run_cmd(\"%s %s\" % (cmd, caseroot))\n # refresh case xml object from file\n case.read_xml(caseroot)\n # Save namelists to docdir\n if (not os.path.isdir(docdir)):\n os.makedirs(docdir)\n try:\n with open(os.path.join(docdir, \"README\"), \"w\") as fd:\n fd.write(\" CESM Resolved Namelist Files\\n For documentation only DO NOT MODIFY\\n\")\n except (OSError, IOError) as e:\n expect(False, \"Failed to write %s/README: %s\" % (docdir, e))\n\n\n for cpglob in [\"*_in_[0-9]*\", \"*modelio*\", \"*_in\",\n \"*streams*txt*\", \"*stxt\", \"*maps.rc\", \"*cism.config*\"]:\n for file_to_copy in glob.glob(os.path.join(rundir, cpglob)):\n logger.debug(\"Copy file from '%s' to '%s'\" % (file_to_copy, docdir))\n shutil.copy2(file_to_copy, docdir)\n\n # Copy over chemistry mechanism docs if they exist\n if (os.path.isdir(os.path.join(casebuild, \"camconf\"))):\n for file_to_copy in glob.glob(os.path.join(casebuild, \"camconf\", \"*chem_mech*\")):\n shutil.copy2(file_to_copy, docdir)\n", "path": "utils/python/CIME/preview_namelists.py"}]} | 2,031 | 438 |
gh_patches_debug_14453 | rasdani/github-patches | git_diff | interlegis__sapl-3226 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Erro encontrado quando a audiência não possui matéria ligada.
<!--- Forneça um resumo geral da _issue_ no título acima -->
Erro encontrado quando a audiência não possui matéria ligada. Derivado do ticket [273270](https://suporte.interlegis.leg.br/scp/tickets.php?id=37122)
## Contexto
<!--- Como esse problema o afeta? O que você está tentando realizar? -->
<!--- Fornecer o contexto nos ajuda a encontrar uma solução que seja mais útil no mundo real -->
## Imagens do Ocorrido
<!--- Representação visual em vídeo ou imagem do ocorrido -->
<!--- Se está descrevendo um bug poste imagens ou vídeos na reprodução do bug citado, caso se aplique -->
## Seu Ambiente
<!--- Inclua detalhes relevantes sobre o ambiente em que você presenciou/experienciou o bug. -->
* Versão usada (_Release_):
* Nome e versão do navegador:
* Nome e versão do Sistema Operacional (desktop ou mobile):
* Link para o seu projeto (Caso de fork deste projeto):
</issue>
<code>
[start of sapl/audiencia/views.py]
1 import sapl
2
3 from django.http import HttpResponse
4 from django.core.urlresolvers import reverse
5 from django.views.decorators.clickjacking import xframe_options_exempt
6 from django.views.generic import UpdateView
7 from sapl.crud.base import RP_DETAIL, RP_LIST, Crud, MasterDetailCrud
8
9 from .forms import AudienciaForm, AnexoAudienciaPublicaForm
10 from .models import AudienciaPublica, AnexoAudienciaPublica
11
12
13 def index(request):
14 return HttpResponse("Audiência Pública")
15
16
17 class AudienciaCrud(Crud):
18 model = AudienciaPublica
19 public = [RP_LIST, RP_DETAIL, ]
20
21 class BaseMixin(Crud.BaseMixin):
22 list_field_names = ['numero', 'nome', 'tipo', 'materia',
23 'data']
24 ordering = '-data', 'nome', 'numero', 'tipo'
25
26 class ListView(Crud.ListView):
27 paginate_by = 10
28
29 def get_context_data(self, **kwargs):
30 context = super().get_context_data(**kwargs)
31
32 audiencia_materia = {}
33 for o in context['object_list']:
34 # indexado pelo numero da audiencia
35 audiencia_materia[str(o.numero)] = o.materia
36
37 for row in context['rows']:
38 coluna_materia = row[3] # se mudar a ordem de listagem mudar aqui
39 if coluna_materia[0]:
40 materia = audiencia_materia[row[0][0]]
41 url_materia = reverse('sapl.materia:materialegislativa_detail',
42 kwargs={'pk': materia.id})
43 row[3] = (coluna_materia[0], url_materia)
44 return context
45
46 class CreateView(Crud.CreateView):
47 form_class = AudienciaForm
48
49 def form_valid(self, form):
50 return super(Crud.CreateView, self).form_valid(form)
51
52 class UpdateView(Crud.UpdateView):
53 form_class = AudienciaForm
54
55 def get_initial(self):
56 initial = super(UpdateView, self).get_initial()
57 if self.object.materia:
58 initial['tipo_materia'] = self.object.materia.tipo.id
59 initial['numero_materia'] = self.object.materia.numero
60 initial['ano_materia'] = self.object.materia.ano
61 return initial
62
63 class DeleteView(Crud.DeleteView):
64 pass
65
66 class DetailView(Crud.DetailView):
67
68 layout_key = 'AudienciaPublicaDetail'
69
70 @xframe_options_exempt
71 def get(self, request, *args, **kwargs):
72 return super().get(request, *args, **kwargs)
73
74
75 class AudienciaPublicaMixin:
76
77 def has_permission(self):
78 app_config = sapl.base.models.AppConfig.objects.last()
79 if app_config and app_config.documentos_administrativos == 'O':
80 return True
81
82 return super().has_permission()
83
84
85 class AnexoAudienciaPublicaCrud(MasterDetailCrud):
86 model = AnexoAudienciaPublica
87 parent_field = 'audiencia'
88 help_topic = 'numeracao_docsacess'
89 public = [RP_LIST, RP_DETAIL, ]
90
91 class BaseMixin(MasterDetailCrud.BaseMixin):
92 list_field_names = ['assunto']
93
94 class CreateView(MasterDetailCrud.CreateView):
95 form_class = AnexoAudienciaPublicaForm
96 layout_key = None
97
98 class UpdateView(MasterDetailCrud.UpdateView):
99 form_class = AnexoAudienciaPublicaForm
100
101 class ListView(AudienciaPublicaMixin, MasterDetailCrud.ListView):
102
103 def get_queryset(self):
104 qs = super(MasterDetailCrud.ListView, self).get_queryset()
105 kwargs = {self.crud.parent_field: self.kwargs['pk']}
106 return qs.filter(**kwargs).order_by('-data', '-id')
107
108 class DetailView(AudienciaPublicaMixin, MasterDetailCrud.DetailView):
109 pass
110
[end of sapl/audiencia/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sapl/audiencia/views.py b/sapl/audiencia/views.py
--- a/sapl/audiencia/views.py
+++ b/sapl/audiencia/views.py
@@ -38,8 +38,11 @@
coluna_materia = row[3] # se mudar a ordem de listagem mudar aqui
if coluna_materia[0]:
materia = audiencia_materia[row[0][0]]
- url_materia = reverse('sapl.materia:materialegislativa_detail',
- kwargs={'pk': materia.id})
+ if materia:
+ url_materia = reverse('sapl.materia:materialegislativa_detail',
+ kwargs={'pk': materia.id})
+ else:
+ url_materia = None
row[3] = (coluna_materia[0], url_materia)
return context
| {"golden_diff": "diff --git a/sapl/audiencia/views.py b/sapl/audiencia/views.py\n--- a/sapl/audiencia/views.py\n+++ b/sapl/audiencia/views.py\n@@ -38,8 +38,11 @@\n coluna_materia = row[3] # se mudar a ordem de listagem mudar aqui\n if coluna_materia[0]:\n materia = audiencia_materia[row[0][0]]\n- url_materia = reverse('sapl.materia:materialegislativa_detail',\n- kwargs={'pk': materia.id})\n+ if materia:\n+ url_materia = reverse('sapl.materia:materialegislativa_detail',\n+ kwargs={'pk': materia.id})\n+ else:\n+ url_materia = None\n row[3] = (coluna_materia[0], url_materia)\n return context\n", "issue": "Erro encontrado quando a audi\u00eancia n\u00e3o possui mat\u00e9ria ligada.\n<!--- Forne\u00e7a um resumo geral da _issue_ no t\u00edtulo acima -->\r\nErro encontrado quando a audi\u00eancia n\u00e3o possui mat\u00e9ria ligada. Derivado do ticket [273270](https://suporte.interlegis.leg.br/scp/tickets.php?id=37122)\r\n\r\n\r\n\r\n## Contexto\r\n<!--- Como esse problema o afeta? O que voc\u00ea est\u00e1 tentando realizar? -->\r\n<!--- Fornecer o contexto nos ajuda a encontrar uma solu\u00e7\u00e3o que seja mais \u00fatil no mundo real -->\r\n\r\n## Imagens do Ocorrido\r\n<!--- Representa\u00e7\u00e3o visual em v\u00eddeo ou imagem do ocorrido -->\r\n<!--- Se est\u00e1 descrevendo um bug poste imagens ou v\u00eddeos na reprodu\u00e7\u00e3o do bug citado, caso se aplique -->\r\n\r\n## Seu Ambiente\r\n<!--- Inclua detalhes relevantes sobre o ambiente em que voc\u00ea presenciou/experienciou o bug. -->\r\n* Vers\u00e3o usada (_Release_):\r\n* Nome e vers\u00e3o do navegador:\r\n* Nome e vers\u00e3o do Sistema Operacional (desktop ou mobile):\r\n* Link para o seu projeto (Caso de fork deste projeto):\r\n\n", "before_files": [{"content": "import sapl\n\nfrom django.http import HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.generic import UpdateView\nfrom sapl.crud.base import RP_DETAIL, RP_LIST, Crud, MasterDetailCrud\n\nfrom .forms import AudienciaForm, AnexoAudienciaPublicaForm\nfrom .models import AudienciaPublica, AnexoAudienciaPublica\n\n\ndef index(request):\n return HttpResponse(\"Audi\u00eancia P\u00fablica\")\n\n\nclass AudienciaCrud(Crud):\n model = AudienciaPublica\n public = [RP_LIST, RP_DETAIL, ]\n\n class BaseMixin(Crud.BaseMixin):\n list_field_names = ['numero', 'nome', 'tipo', 'materia',\n 'data'] \n ordering = '-data', 'nome', 'numero', 'tipo'\n\n class ListView(Crud.ListView):\n paginate_by = 10\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n audiencia_materia = {}\n for o in context['object_list']:\n # indexado pelo numero da audiencia\n audiencia_materia[str(o.numero)] = o.materia\n\n for row in context['rows']:\n coluna_materia = row[3] # se mudar a ordem de listagem mudar aqui\n if coluna_materia[0]:\n materia = audiencia_materia[row[0][0]]\n url_materia = reverse('sapl.materia:materialegislativa_detail',\n kwargs={'pk': materia.id})\n row[3] = (coluna_materia[0], url_materia)\n return context\n\n class CreateView(Crud.CreateView):\n form_class = AudienciaForm\n\n def form_valid(self, form):\n return super(Crud.CreateView, self).form_valid(form)\n\n class UpdateView(Crud.UpdateView):\n form_class = AudienciaForm\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n if self.object.materia:\n initial['tipo_materia'] = self.object.materia.tipo.id\n initial['numero_materia'] = self.object.materia.numero\n initial['ano_materia'] = self.object.materia.ano\n return initial\n \n class DeleteView(Crud.DeleteView):\n pass\n\n class DetailView(Crud.DetailView):\n\n layout_key = 'AudienciaPublicaDetail'\n\n @xframe_options_exempt\n def get(self, request, *args, **kwargs):\n return super().get(request, *args, **kwargs)\n\n\nclass AudienciaPublicaMixin:\n\n def has_permission(self):\n app_config = sapl.base.models.AppConfig.objects.last()\n if app_config and app_config.documentos_administrativos == 'O':\n return True\n\n return super().has_permission()\n\n\nclass AnexoAudienciaPublicaCrud(MasterDetailCrud):\n model = AnexoAudienciaPublica\n parent_field = 'audiencia'\n help_topic = 'numeracao_docsacess'\n public = [RP_LIST, RP_DETAIL, ]\n\n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['assunto']\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = AnexoAudienciaPublicaForm\n layout_key = None\n\n class UpdateView(MasterDetailCrud.UpdateView):\n form_class = AnexoAudienciaPublicaForm\n\n class ListView(AudienciaPublicaMixin, MasterDetailCrud.ListView):\n\n def get_queryset(self):\n qs = super(MasterDetailCrud.ListView, self).get_queryset()\n kwargs = {self.crud.parent_field: self.kwargs['pk']}\n return qs.filter(**kwargs).order_by('-data', '-id')\n\n class DetailView(AudienciaPublicaMixin, MasterDetailCrud.DetailView):\n pass\n", "path": "sapl/audiencia/views.py"}]} | 1,867 | 197 |
gh_patches_debug_62155 | rasdani/github-patches | git_diff | Parsl__parsl-597 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make `GlobusScheme` inherit from `RepresentationMixin`
Otherwise, the config printed in the log is not copy-and-pasteable:
```
storage_access=[<parsl.data_provider.scheme.GlobusScheme object at 0x7f48d021fbe0>],
working_dir=None
```
Make `GlobusScheme` inherit from `RepresentationMixin`
Otherwise, the config printed in the log is not copy-and-pasteable:
```
storage_access=[<parsl.data_provider.scheme.GlobusScheme object at 0x7f48d021fbe0>],
working_dir=None
```
</issue>
<code>
[start of parsl/data_provider/scheme.py]
1
2 class GlobusScheme(object):
3 """Specification for accessing data on a remote executor via Globus.
4
5 Parameters
6 ----------
7 endpoint_uuid : str
8 Universally unique identifier of the Globus endpoint at which the data can be accessed.
9 This can be found in the `Manage Endpoints <https://www.globus.org/app/endpoints>`_ page.
10 endpoint_path : str, optional
11 FIXME
12 local_path : str, optional
13 FIXME
14 """
15 def __init__(self, endpoint_uuid, endpoint_path=None, local_path=None):
16 self.endpoint_uuid = endpoint_uuid
17 self.endpoint_path = endpoint_path
18 self.local_path = local_path
19
[end of parsl/data_provider/scheme.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/data_provider/scheme.py b/parsl/data_provider/scheme.py
--- a/parsl/data_provider/scheme.py
+++ b/parsl/data_provider/scheme.py
@@ -1,5 +1,7 @@
+from parsl.utils import RepresentationMixin
-class GlobusScheme(object):
+
+class GlobusScheme(RepresentationMixin):
"""Specification for accessing data on a remote executor via Globus.
Parameters
| {"golden_diff": "diff --git a/parsl/data_provider/scheme.py b/parsl/data_provider/scheme.py\n--- a/parsl/data_provider/scheme.py\n+++ b/parsl/data_provider/scheme.py\n@@ -1,5 +1,7 @@\n+from parsl.utils import RepresentationMixin\n \n-class GlobusScheme(object):\n+\n+class GlobusScheme(RepresentationMixin):\n \"\"\"Specification for accessing data on a remote executor via Globus.\n \n Parameters\n", "issue": "Make `GlobusScheme` inherit from `RepresentationMixin`\nOtherwise, the config printed in the log is not copy-and-pasteable:\r\n\r\n```\r\n storage_access=[<parsl.data_provider.scheme.GlobusScheme object at 0x7f48d021fbe0>], \r\n working_dir=None\r\n```\r\n\r\n\nMake `GlobusScheme` inherit from `RepresentationMixin`\nOtherwise, the config printed in the log is not copy-and-pasteable:\r\n\r\n```\r\n storage_access=[<parsl.data_provider.scheme.GlobusScheme object at 0x7f48d021fbe0>], \r\n working_dir=None\r\n```\r\n\r\n\n", "before_files": [{"content": "\nclass GlobusScheme(object):\n \"\"\"Specification for accessing data on a remote executor via Globus.\n\n Parameters\n ----------\n endpoint_uuid : str\n Universally unique identifier of the Globus endpoint at which the data can be accessed.\n This can be found in the `Manage Endpoints <https://www.globus.org/app/endpoints>`_ page.\n endpoint_path : str, optional\n FIXME\n local_path : str, optional\n FIXME\n \"\"\"\n def __init__(self, endpoint_uuid, endpoint_path=None, local_path=None):\n self.endpoint_uuid = endpoint_uuid\n self.endpoint_path = endpoint_path\n self.local_path = local_path\n", "path": "parsl/data_provider/scheme.py"}]} | 854 | 98 |
gh_patches_debug_5521 | rasdani/github-patches | git_diff | aws__aws-cli-2510 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Version 1.11.17 and above breaks EC2 describe volumes with JSON input
Related Issue: #1976
JSON File:
```json
{
"VolumeIds": [
"vol-<id>"
]
}
```
Command:
```
aws ec2 describe-volumes --region <region> --cli-input-json file://<file-name>.json
```
Error Message:
```
An error occurred (InvalidParameterCombination) when calling the DescribeVolumes operation: The parameter volumeSet cannot be used with the parameter maxResults
```
OS Release:
```
NAME="Amazon Linux AMI"
VERSION="2016.09"
ID="amzn"
ID_LIKE="rhel fedora"
VERSION_ID="2016.09"
PRETTY_NAME="Amazon Linux AMI 2016.09"
ANSI_COLOR="0;33"
CPE_NAME="cpe:/o:amazon:linux:2016.09:ga"
HOME_URL="http://aws.amazon.com/amazon-linux-ami/"
```
Works:
```
python27-botocore-1.4.46-1.58.amzn1.noarch
aws-cli-1.10.56-1.41.amzn1.noarch
```
Breaks:
```
python27-botocore-1.4.74-1.60.amzn1.noarch
aws-cli-1.11.17-1.43.amzn1.noarch
```
```
python27-botocore-1.4.86-1.62.amzn1.noarch
aws-cli-1.11.29-1.45.amzn1.noarch
```
</issue>
<code>
[start of awscli/customizations/cliinputjson.py]
1 # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import json
14
15 from awscli.paramfile import get_paramfile
16 from awscli.argprocess import ParamError
17 from awscli.customizations.arguments import OverrideRequiredArgsArgument
18
19
20 def register_cli_input_json(cli):
21 cli.register('building-argument-table', add_cli_input_json)
22
23
24 def add_cli_input_json(session, argument_table, **kwargs):
25 # This argument cannot support operations with streaming output which
26 # is designated by the argument name `outfile`.
27 if 'outfile' not in argument_table:
28 cli_input_json_argument = CliInputJSONArgument(session)
29 cli_input_json_argument.add_to_arg_table(argument_table)
30
31
32 class CliInputJSONArgument(OverrideRequiredArgsArgument):
33 """This argument inputs a JSON string as the entire input for a command.
34
35 Ideally, the value to this argument should be a filled out JSON file
36 generated by ``--generate-cli-skeleton``. The items in the JSON string
37 will not clobber other arguments entered into the command line.
38 """
39 ARG_DATA = {
40 'name': 'cli-input-json',
41 'help_text': 'Performs service operation based on the JSON string '
42 'provided. The JSON string follows the format provided '
43 'by ``--generate-cli-skeleton``. If other arguments are '
44 'provided on the command line, the CLI values will override '
45 'the JSON-provided values.'
46 }
47
48 def __init__(self, session):
49 super(CliInputJSONArgument, self).__init__(session)
50
51 def _register_argument_action(self):
52 self._session.register(
53 'calling-command', self.add_to_call_parameters)
54 super(CliInputJSONArgument, self)._register_argument_action()
55
56 def add_to_call_parameters(self, call_parameters, parsed_args,
57 parsed_globals, **kwargs):
58
59 # Check if ``--cli-input-json`` was specified in the command line.
60 input_json = getattr(parsed_args, 'cli_input_json', None)
61 if input_json is not None:
62 # Retrieve the JSON from the file if needed.
63 retrieved_json = get_paramfile(input_json)
64 # Nothing was retrieved from the file. So assume the argument
65 # is already a JSON string.
66 if retrieved_json is None:
67 retrieved_json = input_json
68 try:
69 # Try to load the JSON string into a python dictionary
70 input_data = json.loads(retrieved_json)
71 except ValueError as e:
72 raise ParamError(
73 self.name, "Invalid JSON: %s\nJSON received: %s"
74 % (e, retrieved_json))
75 # Add the members from the input JSON to the call parameters.
76 self._update_call_parameters(call_parameters, input_data)
77
78 def _update_call_parameters(self, call_parameters, input_data):
79 for input_key in input_data.keys():
80 # Only add the values to ``call_parameters`` if not already
81 # present.
82 if input_key not in call_parameters:
83 call_parameters[input_key] = input_data[input_key]
84
[end of awscli/customizations/cliinputjson.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/cliinputjson.py b/awscli/customizations/cliinputjson.py
--- a/awscli/customizations/cliinputjson.py
+++ b/awscli/customizations/cliinputjson.py
@@ -50,7 +50,7 @@
def _register_argument_action(self):
self._session.register(
- 'calling-command', self.add_to_call_parameters)
+ 'calling-command.*', self.add_to_call_parameters)
super(CliInputJSONArgument, self)._register_argument_action()
def add_to_call_parameters(self, call_parameters, parsed_args,
| {"golden_diff": "diff --git a/awscli/customizations/cliinputjson.py b/awscli/customizations/cliinputjson.py\n--- a/awscli/customizations/cliinputjson.py\n+++ b/awscli/customizations/cliinputjson.py\n@@ -50,7 +50,7 @@\n \n def _register_argument_action(self):\n self._session.register(\n- 'calling-command', self.add_to_call_parameters)\n+ 'calling-command.*', self.add_to_call_parameters)\n super(CliInputJSONArgument, self)._register_argument_action()\n \n def add_to_call_parameters(self, call_parameters, parsed_args,\n", "issue": "Version 1.11.17 and above breaks EC2 describe volumes with JSON input\nRelated Issue: #1976 \r\n\r\nJSON File:\r\n```json\r\n{\r\n \"VolumeIds\": [\r\n \"vol-<id>\"\r\n ]\r\n}\r\n```\r\n\r\nCommand:\r\n```\r\naws ec2 describe-volumes --region <region> --cli-input-json file://<file-name>.json\r\n```\r\n\r\nError Message:\r\n```\r\nAn error occurred (InvalidParameterCombination) when calling the DescribeVolumes operation: The parameter volumeSet cannot be used with the parameter maxResults\r\n```\r\n\r\nOS Release:\r\n```\r\nNAME=\"Amazon Linux AMI\"\r\nVERSION=\"2016.09\"\r\nID=\"amzn\"\r\nID_LIKE=\"rhel fedora\"\r\nVERSION_ID=\"2016.09\"\r\nPRETTY_NAME=\"Amazon Linux AMI 2016.09\"\r\nANSI_COLOR=\"0;33\"\r\nCPE_NAME=\"cpe:/o:amazon:linux:2016.09:ga\"\r\nHOME_URL=\"http://aws.amazon.com/amazon-linux-ami/\"\r\n```\r\n\r\nWorks:\r\n```\r\npython27-botocore-1.4.46-1.58.amzn1.noarch\r\naws-cli-1.10.56-1.41.amzn1.noarch\r\n```\r\n\r\nBreaks:\r\n```\r\npython27-botocore-1.4.74-1.60.amzn1.noarch\r\naws-cli-1.11.17-1.43.amzn1.noarch\r\n```\r\n```\r\npython27-botocore-1.4.86-1.62.amzn1.noarch\r\naws-cli-1.11.29-1.45.amzn1.noarch\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport json\n\nfrom awscli.paramfile import get_paramfile\nfrom awscli.argprocess import ParamError\nfrom awscli.customizations.arguments import OverrideRequiredArgsArgument\n\n\ndef register_cli_input_json(cli):\n cli.register('building-argument-table', add_cli_input_json)\n\n\ndef add_cli_input_json(session, argument_table, **kwargs):\n # This argument cannot support operations with streaming output which\n # is designated by the argument name `outfile`.\n if 'outfile' not in argument_table:\n cli_input_json_argument = CliInputJSONArgument(session)\n cli_input_json_argument.add_to_arg_table(argument_table)\n\n\nclass CliInputJSONArgument(OverrideRequiredArgsArgument):\n \"\"\"This argument inputs a JSON string as the entire input for a command.\n\n Ideally, the value to this argument should be a filled out JSON file\n generated by ``--generate-cli-skeleton``. The items in the JSON string\n will not clobber other arguments entered into the command line.\n \"\"\"\n ARG_DATA = {\n 'name': 'cli-input-json',\n 'help_text': 'Performs service operation based on the JSON string '\n 'provided. The JSON string follows the format provided '\n 'by ``--generate-cli-skeleton``. If other arguments are '\n 'provided on the command line, the CLI values will override '\n 'the JSON-provided values.'\n }\n\n def __init__(self, session):\n super(CliInputJSONArgument, self).__init__(session)\n\n def _register_argument_action(self):\n self._session.register(\n 'calling-command', self.add_to_call_parameters)\n super(CliInputJSONArgument, self)._register_argument_action()\n\n def add_to_call_parameters(self, call_parameters, parsed_args,\n parsed_globals, **kwargs):\n\n # Check if ``--cli-input-json`` was specified in the command line.\n input_json = getattr(parsed_args, 'cli_input_json', None)\n if input_json is not None:\n # Retrieve the JSON from the file if needed.\n retrieved_json = get_paramfile(input_json)\n # Nothing was retrieved from the file. So assume the argument\n # is already a JSON string.\n if retrieved_json is None:\n retrieved_json = input_json\n try:\n # Try to load the JSON string into a python dictionary\n input_data = json.loads(retrieved_json)\n except ValueError as e:\n raise ParamError(\n self.name, \"Invalid JSON: %s\\nJSON received: %s\"\n % (e, retrieved_json))\n # Add the members from the input JSON to the call parameters.\n self._update_call_parameters(call_parameters, input_data)\n\n def _update_call_parameters(self, call_parameters, input_data):\n for input_key in input_data.keys():\n # Only add the values to ``call_parameters`` if not already\n # present.\n if input_key not in call_parameters:\n call_parameters[input_key] = input_data[input_key]\n", "path": "awscli/customizations/cliinputjson.py"}]} | 1,849 | 126 |
gh_patches_debug_62141 | rasdani/github-patches | git_diff | searx__searx-1277 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
findx crashes
... with message on web page: findx (unexpected crash: string indices must be integers)
</issue>
<code>
[start of searx/engines/findx.py]
1 """
2 FindX (General, Images, Videos)
3
4 @website https://www.findx.com
5 @provide-api no
6 @using-api no
7 @results HTML
8 @stable no
9 @parse url, title, content, embedded, img_src, thumbnail_src
10 """
11
12 from dateutil import parser
13 from json import loads
14 import re
15
16 from lxml import html
17
18 from searx import logger
19 from searx.engines.xpath import extract_text
20 from searx.engines.youtube_noapi import base_youtube_url, embedded_url
21 from searx.url_utils import urlencode
22
23
24 paging = True
25 results_xpath = '//script[@id="initial-state"]'
26 search_url = 'https://www.findx.com/{category}?{q}'
27 type_map = {
28 'none': 'web',
29 'general': 'web',
30 'images': 'images',
31 'videos': 'videos',
32 }
33
34
35 def request(query, params):
36 params['url'] = search_url.format(
37 category=type_map[params['category']],
38 q=urlencode({
39 'q': query,
40 'page': params['pageno']
41 })
42 )
43 return params
44
45
46 def response(resp):
47 dom = html.fromstring(resp.text)
48 results_raw_json = dom.xpath(results_xpath)
49 results_json = loads(extract_text(results_raw_json))
50
51 if len(results_json['web']['results']) > 0:
52 return _general_results(results_json['web']['results'])
53
54 if len(results_json['images']['results']) > 0:
55 return _images_results(results_json['images']['results'])
56
57 if len(results_json['video']['results']) > 0:
58 return _videos_results(results_json['video']['results'])
59
60 return []
61
62
63 def _general_results(general_results):
64 results = []
65 for result in general_results:
66 results.append({
67 'url': result['url'],
68 'title': result['title'],
69 'content': result['sum'],
70 })
71 return results
72
73
74 def _images_results(image_results):
75 results = []
76 for result in image_results:
77 results.append({
78 'url': result['sourceURL'],
79 'title': result['title'],
80 'content': result['source'],
81 'thumbnail_src': _extract_url(result['assets']['thumb']['url']),
82 'img_src': _extract_url(result['assets']['file']['url']),
83 'template': 'images.html',
84 })
85 return results
86
87
88 def _videos_results(video_results):
89 results = []
90 for result in video_results:
91 if not result['kind'].startswith('youtube'):
92 logger.warn('Unknown video kind in findx: {}'.format(result['kind']))
93 continue
94
95 description = result['snippet']['description']
96 if len(description) > 300:
97 description = description[:300] + '...'
98
99 results.append({
100 'url': base_youtube_url + result['id'],
101 'title': result['snippet']['title'],
102 'content': description,
103 'thumbnail': _extract_url(result['snippet']['thumbnails']['default']['url']),
104 'publishedDate': parser.parse(result['snippet']['publishedAt']),
105 'embedded': embedded_url.format(videoid=result['id']),
106 'template': 'videos.html',
107 })
108 return results
109
110
111 def _extract_url(url):
112 matching = re.search('(/https?://[^)]+)', url)
113 if matching:
114 return matching.group(0)[1:]
115 return ''
116
[end of searx/engines/findx.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/findx.py b/searx/engines/findx.py
--- a/searx/engines/findx.py
+++ b/searx/engines/findx.py
@@ -49,7 +49,7 @@
results_json = loads(extract_text(results_raw_json))
if len(results_json['web']['results']) > 0:
- return _general_results(results_json['web']['results'])
+ return _general_results(results_json['web']['results']['webSearch']['results'])
if len(results_json['images']['results']) > 0:
return _images_results(results_json['images']['results'])
| {"golden_diff": "diff --git a/searx/engines/findx.py b/searx/engines/findx.py\n--- a/searx/engines/findx.py\n+++ b/searx/engines/findx.py\n@@ -49,7 +49,7 @@\n results_json = loads(extract_text(results_raw_json))\n \n if len(results_json['web']['results']) > 0:\n- return _general_results(results_json['web']['results'])\n+ return _general_results(results_json['web']['results']['webSearch']['results'])\n \n if len(results_json['images']['results']) > 0:\n return _images_results(results_json['images']['results'])\n", "issue": "findx crashes\n... with message on web page: findx (unexpected crash: string indices must be integers) \n", "before_files": [{"content": "\"\"\"\nFindX (General, Images, Videos)\n\n@website https://www.findx.com\n@provide-api no\n@using-api no\n@results HTML\n@stable no\n@parse url, title, content, embedded, img_src, thumbnail_src\n\"\"\"\n\nfrom dateutil import parser\nfrom json import loads\nimport re\n\nfrom lxml import html\n\nfrom searx import logger\nfrom searx.engines.xpath import extract_text\nfrom searx.engines.youtube_noapi import base_youtube_url, embedded_url\nfrom searx.url_utils import urlencode\n\n\npaging = True\nresults_xpath = '//script[@id=\"initial-state\"]'\nsearch_url = 'https://www.findx.com/{category}?{q}'\ntype_map = {\n 'none': 'web',\n 'general': 'web',\n 'images': 'images',\n 'videos': 'videos',\n}\n\n\ndef request(query, params):\n params['url'] = search_url.format(\n category=type_map[params['category']],\n q=urlencode({\n 'q': query,\n 'page': params['pageno']\n })\n )\n return params\n\n\ndef response(resp):\n dom = html.fromstring(resp.text)\n results_raw_json = dom.xpath(results_xpath)\n results_json = loads(extract_text(results_raw_json))\n\n if len(results_json['web']['results']) > 0:\n return _general_results(results_json['web']['results'])\n\n if len(results_json['images']['results']) > 0:\n return _images_results(results_json['images']['results'])\n\n if len(results_json['video']['results']) > 0:\n return _videos_results(results_json['video']['results'])\n\n return []\n\n\ndef _general_results(general_results):\n results = []\n for result in general_results:\n results.append({\n 'url': result['url'],\n 'title': result['title'],\n 'content': result['sum'],\n })\n return results\n\n\ndef _images_results(image_results):\n results = []\n for result in image_results:\n results.append({\n 'url': result['sourceURL'],\n 'title': result['title'],\n 'content': result['source'],\n 'thumbnail_src': _extract_url(result['assets']['thumb']['url']),\n 'img_src': _extract_url(result['assets']['file']['url']),\n 'template': 'images.html',\n })\n return results\n\n\ndef _videos_results(video_results):\n results = []\n for result in video_results:\n if not result['kind'].startswith('youtube'):\n logger.warn('Unknown video kind in findx: {}'.format(result['kind']))\n continue\n\n description = result['snippet']['description']\n if len(description) > 300:\n description = description[:300] + '...'\n\n results.append({\n 'url': base_youtube_url + result['id'],\n 'title': result['snippet']['title'],\n 'content': description,\n 'thumbnail': _extract_url(result['snippet']['thumbnails']['default']['url']),\n 'publishedDate': parser.parse(result['snippet']['publishedAt']),\n 'embedded': embedded_url.format(videoid=result['id']),\n 'template': 'videos.html',\n })\n return results\n\n\ndef _extract_url(url):\n matching = re.search('(/https?://[^)]+)', url)\n if matching:\n return matching.group(0)[1:]\n return ''\n", "path": "searx/engines/findx.py"}]} | 1,547 | 146 |
gh_patches_debug_21871 | rasdani/github-patches | git_diff | kubeflow__pipelines-4611 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SDK - Drop support for Python 3.5
Python 3.5 has reached EoL: https://www.python.org/downloads/release/python-3510/#:~:text=Python%203.5%20will%20reach%20its,release%20of%20the%203.5%20series.
We're going to stop supporting Python 3.5 soon.
Please feel free to comment or vote on this issue.
</issue>
<code>
[start of sdk/python/setup.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import re
17 from setuptools import setup
18
19 NAME = 'kfp'
20 #VERSION = .... Change the version in kfp/__init__.py
21
22 # NOTICE, after any updates to the following, ./requirements.in should be updated
23 # accordingly.
24 REQUIRES = [
25 'absl-py>=0.9,<=0.11',
26 'PyYAML>=5.3,<6',
27 'google-cloud-storage>=1.13.0,<2',
28 'kubernetes>=8.0.0,<12.0.0',
29 'google-auth>=1.6.1,<2',
30 'requests-toolbelt>=0.8.0,<1',
31 'cloudpickle>=1.3.0,<2',
32 # Update the upper version whenever a new major version of the
33 # kfp-server-api package is released.
34 # Update the lower version when kfp sdk depends on new apis/fields in
35 # kfp-server-api.
36 # Note, please also update ./requirements.in
37 'kfp-server-api>=1.1.2,<2.0.0',
38 'jsonschema>=3.0.1,<4',
39 'tabulate>=0.8.6,<1',
40 'click>=7.1.1,<8',
41 'Deprecated>=1.2.7,<2',
42 'strip-hints>=0.1.8,<1',
43 'docstring-parser>=0.7.3,<1',
44 'kfp-pipeline-spec>=0.1.5,<0.2.0',
45 'fire>=0.3.1,<1',
46 'protobuf>=3.13.0,<4'
47 ]
48
49 TESTS_REQUIRE = [
50 'mock',
51 ]
52
53
54 def find_version(*file_path_parts):
55 here = os.path.abspath(os.path.dirname(__file__))
56 with open(os.path.join(here, *file_path_parts), 'r') as fp:
57 version_file_text = fp.read()
58
59 version_match = re.search(
60 r"^__version__ = ['\"]([^'\"]*)['\"]",
61 version_file_text,
62 re.M,
63 )
64 if version_match:
65 return version_match.group(1)
66
67 raise RuntimeError('Unable to find version string.')
68
69
70 setup(
71 name=NAME,
72 version=find_version('kfp', '__init__.py'),
73 description='KubeFlow Pipelines SDK',
74 author='google',
75 install_requires=REQUIRES,
76 tests_require=TESTS_REQUIRE,
77 packages=[
78 'kfp',
79 'kfp.cli',
80 'kfp.cli.diagnose_me',
81 'kfp.compiler',
82 'kfp.components',
83 'kfp.components.structures',
84 'kfp.containers',
85 'kfp.dsl',
86 'kfp.dsl.extensions',
87 'kfp.notebook',
88 'kfp.v2',
89 'kfp.v2.compiler',
90 'kfp.v2.components',
91 'kfp.v2.dsl',
92 ],
93 classifiers=[
94 'Intended Audience :: Developers',
95 'Intended Audience :: Education',
96 'Intended Audience :: Science/Research',
97 'License :: OSI Approved :: Apache Software License',
98 'Programming Language :: Python :: 3',
99 'Programming Language :: Python :: 3.5',
100 'Programming Language :: Python :: 3.6',
101 'Programming Language :: Python :: 3.7',
102 'Topic :: Scientific/Engineering',
103 'Topic :: Scientific/Engineering :: Artificial Intelligence',
104 'Topic :: Software Development',
105 'Topic :: Software Development :: Libraries',
106 'Topic :: Software Development :: Libraries :: Python Modules',
107 ],
108 python_requires='>=3.5.3',
109 include_package_data=True,
110 entry_points={
111 'console_scripts': [
112 'dsl-compile = kfp.compiler.main:main',
113 'dsl-compile-v2 = kfp.v2.compiler.main:main',
114 'kfp=kfp.__main__:main'
115 ]
116 }
117 )
118
[end of sdk/python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -96,16 +96,16 @@
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
- python_requires='>=3.5.3',
+ python_requires='>=3.6.1',
include_package_data=True,
entry_points={
'console_scripts': [
| {"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -96,16 +96,16 @@\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n+ 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n- python_requires='>=3.5.3',\n+ python_requires='>=3.6.1',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n", "issue": "SDK - Drop support for Python 3.5\nPython 3.5 has reached EoL: https://www.python.org/downloads/release/python-3510/#:~:text=Python%203.5%20will%20reach%20its,release%20of%20the%203.5%20series.\r\n\r\nWe're going to stop supporting Python 3.5 soon.\r\n\r\nPlease feel free to comment or vote on this issue.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\n# NOTICE, after any updates to the following, ./requirements.in should be updated\n# accordingly.\nREQUIRES = [\n 'absl-py>=0.9,<=0.11',\n 'PyYAML>=5.3,<6',\n 'google-cloud-storage>=1.13.0,<2',\n 'kubernetes>=8.0.0,<12.0.0',\n 'google-auth>=1.6.1,<2',\n 'requests-toolbelt>=0.8.0,<1',\n 'cloudpickle>=1.3.0,<2',\n # Update the upper version whenever a new major version of the\n # kfp-server-api package is released.\n # Update the lower version when kfp sdk depends on new apis/fields in\n # kfp-server-api.\n # Note, please also update ./requirements.in\n 'kfp-server-api>=1.1.2,<2.0.0',\n 'jsonschema>=3.0.1,<4',\n 'tabulate>=0.8.6,<1',\n 'click>=7.1.1,<8',\n 'Deprecated>=1.2.7,<2',\n 'strip-hints>=0.1.8,<1',\n 'docstring-parser>=0.7.3,<1',\n 'kfp-pipeline-spec>=0.1.5,<0.2.0',\n 'fire>=0.3.1,<1',\n 'protobuf>=3.13.0,<4'\n]\n\nTESTS_REQUIRE = [\n 'mock',\n]\n\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError('Unable to find version string.')\n\n\nsetup(\n name=NAME,\n version=find_version('kfp', '__init__.py'),\n description='KubeFlow Pipelines SDK',\n author='google',\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRE,\n packages=[\n 'kfp',\n 'kfp.cli',\n 'kfp.cli.diagnose_me',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.dsl.extensions',\n 'kfp.notebook',\n 'kfp.v2',\n 'kfp.v2.compiler',\n 'kfp.v2.components',\n 'kfp.v2.dsl',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.5.3',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main',\n 'dsl-compile-v2 = kfp.v2.compiler.main:main',\n 'kfp=kfp.__main__:main'\n ]\n }\n)\n", "path": "sdk/python/setup.py"}]} | 1,863 | 213 |
gh_patches_debug_2002 | rasdani/github-patches | git_diff | uclapi__uclapi-4023 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Feature Request] Add /authorize Oauth route
**Is your feature request related to a problem? Please describe.**
I have been attempting to use 'auth0-react' to implement Oauth with UCL API, however, this requires a fair bit of tinkering as
the defaults of this and many other auth libraries are to redirect to a "/authorize?client_id..." endpoint which the UCL API does not support.
While this can be avoided through customisation, would it be possible to add a "/authorize" route, as I believe this could make it easier to use some of the "plug and play" Americanized auth libraries available?
**Describe the solution you'd like**
Edit uclapi/backend/uclapi/oauth/urls.py as below
```
urlpatterns = [
url(r'authorise/$', views.authorise),
url(r'authorize/$', views.authorise), <===== Including views.authorise for the 'authorize/$' route.
url(r'shibcallback', views.shibcallback),
url(r'token$', views.token),
url(r'tokens/scopes$', views.scope_map),
url(r'tokens/test$', views.token_test),
url(r'user/allow$', views.userallow),
url(r'user/deny$', views.userdeny),
url(r'user/data$', views.userdata),
url(r'user/studentnumber$', views.get_student_number),
url(r'deauthorise$', views.deauthorise_app),
url(r'user/settings$', views.get_settings)
]
```


</issue>
<code>
[start of backend/uclapi/oauth/urls.py]
1 from django.conf.urls import url
2
3 from . import views
4
5 urlpatterns = [
6 url(r'authorise/$', views.authorise),
7 url(r'adcallback', views.adcallback),
8 url(r'token$', views.token),
9 url(r'tokens/scopes$', views.scope_map),
10 url(r'tokens/test$', views.token_test),
11 url(r'user/allow$', views.userallow),
12 url(r'user/deny$', views.userdeny),
13 url(r'user/data$', views.userdata),
14 url(r'user/studentnumber$', views.get_student_number),
15 url(r'deauthorise$', views.deauthorise_app),
16 url(r'user/settings$', views.get_settings)
17 ]
18
[end of backend/uclapi/oauth/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/uclapi/oauth/urls.py b/backend/uclapi/oauth/urls.py
--- a/backend/uclapi/oauth/urls.py
+++ b/backend/uclapi/oauth/urls.py
@@ -4,6 +4,7 @@
urlpatterns = [
url(r'authorise/$', views.authorise),
+ url(r'authorize/$', views.authorise),
url(r'adcallback', views.adcallback),
url(r'token$', views.token),
url(r'tokens/scopes$', views.scope_map),
| {"golden_diff": "diff --git a/backend/uclapi/oauth/urls.py b/backend/uclapi/oauth/urls.py\n--- a/backend/uclapi/oauth/urls.py\n+++ b/backend/uclapi/oauth/urls.py\n@@ -4,6 +4,7 @@\n \n urlpatterns = [\n url(r'authorise/$', views.authorise),\n+ url(r'authorize/$', views.authorise),\n url(r'adcallback', views.adcallback),\n url(r'token$', views.token),\n url(r'tokens/scopes$', views.scope_map),\n", "issue": "[Feature Request] Add /authorize Oauth route\n**Is your feature request related to a problem? Please describe.**\r\nI have been attempting to use 'auth0-react' to implement Oauth with UCL API, however, this requires a fair bit of tinkering as\r\nthe defaults of this and many other auth libraries are to redirect to a \"/authorize?client_id...\" endpoint which the UCL API does not support. \r\n\r\nWhile this can be avoided through customisation, would it be possible to add a \"/authorize\" route, as I believe this could make it easier to use some of the \"plug and play\" Americanized auth libraries available?\r\n\r\n**Describe the solution you'd like**\r\n\r\n Edit uclapi/backend/uclapi/oauth/urls.py as below \r\n```\r\nurlpatterns = [\r\n url(r'authorise/$', views.authorise),\r\n url(r'authorize/$', views.authorise), <===== Including views.authorise for the 'authorize/$' route.\r\n url(r'shibcallback', views.shibcallback),\r\n url(r'token$', views.token),\r\n url(r'tokens/scopes$', views.scope_map),\r\n url(r'tokens/test$', views.token_test),\r\n url(r'user/allow$', views.userallow),\r\n url(r'user/deny$', views.userdeny),\r\n url(r'user/data$', views.userdata),\r\n url(r'user/studentnumber$', views.get_student_number),\r\n url(r'deauthorise$', views.deauthorise_app),\r\n url(r'user/settings$', views.get_settings)\r\n]\r\n```\r\n\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'authorise/$', views.authorise),\n url(r'adcallback', views.adcallback),\n url(r'token$', views.token),\n url(r'tokens/scopes$', views.scope_map),\n url(r'tokens/test$', views.token_test),\n url(r'user/allow$', views.userallow),\n url(r'user/deny$', views.userdeny),\n url(r'user/data$', views.userdata),\n url(r'user/studentnumber$', views.get_student_number),\n url(r'deauthorise$', views.deauthorise_app),\n url(r'user/settings$', views.get_settings)\n]\n", "path": "backend/uclapi/oauth/urls.py"}]} | 1,156 | 113 |
gh_patches_debug_31634 | rasdani/github-patches | git_diff | Pylons__pyramid-1467 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Creating Integration Tests needs an example plug-in
We have test code here, without the corresponding tested code: http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/testing.html#creating-integration-tests. It would be good to see what it would look like.
</issue>
<code>
[start of docs/narr/MyProject/setup.py]
1 import os
2
3 from setuptools import setup, find_packages
4
5 here = os.path.abspath(os.path.dirname(__file__))
6 with open(os.path.join(here, 'README.txt')) as f:
7 README = f.read()
8 with open(os.path.join(here, 'CHANGES.txt')) as f:
9 CHANGES = f.read()
10
11 requires = [
12 'pyramid',
13 'pyramid_chameleon',
14 'pyramid_debugtoolbar',
15 'waitress',
16 ]
17
18 setup(name='MyProject',
19 version='0.0',
20 description='MyProject',
21 long_description=README + '\n\n' + CHANGES,
22 classifiers=[
23 "Programming Language :: Python",
24 "Framework :: Pyramid",
25 "Topic :: Internet :: WWW/HTTP",
26 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
27 ],
28 author='',
29 author_email='',
30 url='',
31 keywords='web pyramid pylons',
32 packages=find_packages(),
33 include_package_data=True,
34 zip_safe=False,
35 install_requires=requires,
36 tests_require=requires,
37 test_suite="myproject",
38 entry_points="""\
39 [paste.app_factory]
40 main = myproject:main
41 """,
42 )
43
[end of docs/narr/MyProject/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/narr/MyProject/setup.py b/docs/narr/MyProject/setup.py
--- a/docs/narr/MyProject/setup.py
+++ b/docs/narr/MyProject/setup.py
@@ -1,30 +1,42 @@
-import os
+"""Setup for the MyProject package.
+"""
+import os
from setuptools import setup, find_packages
-here = os.path.abspath(os.path.dirname(__file__))
-with open(os.path.join(here, 'README.txt')) as f:
- README = f.read()
-with open(os.path.join(here, 'CHANGES.txt')) as f:
- CHANGES = f.read()
-requires = [
+HERE = os.path.abspath(os.path.dirname(__file__))
+
+
+with open(os.path.join(HERE, 'README.txt')) as fp:
+ README = fp.read()
+
+
+with open(os.path.join(HERE, 'CHANGES.txt')) as fp:
+ CHANGES = fp.read()
+
+
+REQUIRES = [
'pyramid',
'pyramid_chameleon',
'pyramid_debugtoolbar',
'waitress',
]
+TESTS_REQUIRE = [
+ 'webtest'
+ ]
+
setup(name='MyProject',
version='0.0',
description='MyProject',
long_description=README + '\n\n' + CHANGES,
classifiers=[
- "Programming Language :: Python",
- "Framework :: Pyramid",
- "Topic :: Internet :: WWW/HTTP",
- "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
- ],
+ 'Programming Language :: Python',
+ 'Framework :: Pyramid',
+ 'Topic :: Internet :: WWW/HTTP',
+ 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
+ ],
author='',
author_email='',
url='',
@@ -32,11 +44,10 @@
packages=find_packages(),
include_package_data=True,
zip_safe=False,
- install_requires=requires,
- tests_require=requires,
- test_suite="myproject",
+ install_requires=REQUIRES,
+ tests_require=TESTS_REQUIRE,
+ test_suite='myproject',
entry_points="""\
[paste.app_factory]
main = myproject:main
- """,
- )
+ """)
| {"golden_diff": "diff --git a/docs/narr/MyProject/setup.py b/docs/narr/MyProject/setup.py\n--- a/docs/narr/MyProject/setup.py\n+++ b/docs/narr/MyProject/setup.py\n@@ -1,30 +1,42 @@\n-import os\n+\"\"\"Setup for the MyProject package.\n \n+\"\"\"\n+import os\n from setuptools import setup, find_packages\n \n-here = os.path.abspath(os.path.dirname(__file__))\n-with open(os.path.join(here, 'README.txt')) as f:\n- README = f.read()\n-with open(os.path.join(here, 'CHANGES.txt')) as f:\n- CHANGES = f.read()\n \n-requires = [\n+HERE = os.path.abspath(os.path.dirname(__file__))\n+\n+\n+with open(os.path.join(HERE, 'README.txt')) as fp:\n+ README = fp.read()\n+\n+\n+with open(os.path.join(HERE, 'CHANGES.txt')) as fp:\n+ CHANGES = fp.read()\n+\n+\n+REQUIRES = [\n 'pyramid',\n 'pyramid_chameleon',\n 'pyramid_debugtoolbar',\n 'waitress',\n ]\n \n+TESTS_REQUIRE = [\n+ 'webtest'\n+ ]\n+\n setup(name='MyProject',\n version='0.0',\n description='MyProject',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n- \"Programming Language :: Python\",\n- \"Framework :: Pyramid\",\n- \"Topic :: Internet :: WWW/HTTP\",\n- \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n- ],\n+ 'Programming Language :: Python',\n+ 'Framework :: Pyramid',\n+ 'Topic :: Internet :: WWW/HTTP',\n+ 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n+ ],\n author='',\n author_email='',\n url='',\n@@ -32,11 +44,10 @@\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n- install_requires=requires,\n- tests_require=requires,\n- test_suite=\"myproject\",\n+ install_requires=REQUIRES,\n+ tests_require=TESTS_REQUIRE,\n+ test_suite='myproject',\n entry_points=\"\"\"\\\n [paste.app_factory]\n main = myproject:main\n- \"\"\",\n- )\n+ \"\"\")\n", "issue": "Creating Integration Tests needs an example plug-in\nWe have test code here, without the corresponding tested code: http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/testing.html#creating-integration-tests. It would be good to see what it would look like.\n\n", "before_files": [{"content": "import os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(here, 'README.txt')) as f:\n README = f.read()\nwith open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\n\nrequires = [\n 'pyramid',\n 'pyramid_chameleon',\n 'pyramid_debugtoolbar',\n 'waitress',\n ]\n\nsetup(name='MyProject',\n version='0.0',\n description='MyProject',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n author='',\n author_email='',\n url='',\n keywords='web pyramid pylons',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=requires,\n tests_require=requires,\n test_suite=\"myproject\",\n entry_points=\"\"\"\\\n [paste.app_factory]\n main = myproject:main\n \"\"\",\n )\n", "path": "docs/narr/MyProject/setup.py"}]} | 928 | 506 |
gh_patches_debug_6035 | rasdani/github-patches | git_diff | pyro-ppl__pyro-3325 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug in `CVAE` example
There is a bug in `CVAE` example. The `target` input to the `MaskedBCELoss` is not binary (it has values of -1, 0, 1). This was discovered by the PyTorch 2.1 update which started to validate the inputs of `F.binary_cross_entropy_loss`.
> FAILED tests/test_examples.py::test_cpu[cvae/main.py --num-quadrant-inputs=1 --num-epochs=1] - subprocess.CalledProcessError: Command '['/opt/hostedtoolcache/Python/3.8.18/x64/bin/python', '/home/runner/work/pyro/pyro/examples/cvae/main.py', '--num-quadrant-inputs=1', '--num-epochs=1']' returned non-zero exit status 1.
= 1 failed, 148 passed, 97 skipped, 26558 deselected, 2 warnings in 1948.89s (0:32:28) =
</issue>
<code>
[start of examples/cvae/baseline.py]
1 # Copyright Contributors to the Pyro project.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import copy
5 from pathlib import Path
6
7 import numpy as np
8 import torch
9 import torch.nn as nn
10 import torch.nn.functional as F
11 from tqdm import tqdm
12
13
14 class BaselineNet(nn.Module):
15 def __init__(self, hidden_1, hidden_2):
16 super().__init__()
17 self.fc1 = nn.Linear(784, hidden_1)
18 self.fc2 = nn.Linear(hidden_1, hidden_2)
19 self.fc3 = nn.Linear(hidden_2, 784)
20 self.relu = nn.ReLU()
21
22 def forward(self, x):
23 x = x.view(-1, 784)
24 hidden = self.relu(self.fc1(x))
25 hidden = self.relu(self.fc2(hidden))
26 y = torch.sigmoid(self.fc3(hidden))
27 return y
28
29
30 class MaskedBCELoss(nn.Module):
31 def __init__(self, masked_with=-1):
32 super().__init__()
33 self.masked_with = masked_with
34
35 def forward(self, input, target):
36 target = target.view(input.shape)
37 loss = F.binary_cross_entropy(input, target, reduction="none")
38 loss[target == self.masked_with] = 0
39 return loss.sum()
40
41
42 def train(
43 device,
44 dataloaders,
45 dataset_sizes,
46 learning_rate,
47 num_epochs,
48 early_stop_patience,
49 model_path,
50 ):
51 # Train baseline
52 baseline_net = BaselineNet(500, 500)
53 baseline_net.to(device)
54 optimizer = torch.optim.Adam(baseline_net.parameters(), lr=learning_rate)
55 criterion = MaskedBCELoss()
56 best_loss = np.inf
57 early_stop_count = 0
58
59 for epoch in range(num_epochs):
60 for phase in ["train", "val"]:
61 if phase == "train":
62 baseline_net.train()
63 else:
64 baseline_net.eval()
65
66 running_loss = 0.0
67 num_preds = 0
68
69 bar = tqdm(
70 dataloaders[phase], desc="NN Epoch {} {}".format(epoch, phase).ljust(20)
71 )
72 for i, batch in enumerate(bar):
73 inputs = batch["input"].to(device)
74 outputs = batch["output"].to(device)
75
76 optimizer.zero_grad()
77
78 with torch.set_grad_enabled(phase == "train"):
79 preds = baseline_net(inputs)
80 loss = criterion(preds, outputs) / inputs.size(0)
81 if phase == "train":
82 loss.backward()
83 optimizer.step()
84
85 running_loss += loss.item()
86 num_preds += 1
87 if i % 10 == 0:
88 bar.set_postfix(
89 loss="{:.2f}".format(running_loss / num_preds),
90 early_stop_count=early_stop_count,
91 )
92
93 epoch_loss = running_loss / dataset_sizes[phase]
94 # deep copy the model
95 if phase == "val":
96 if epoch_loss < best_loss:
97 best_loss = epoch_loss
98 best_model_wts = copy.deepcopy(baseline_net.state_dict())
99 early_stop_count = 0
100 else:
101 early_stop_count += 1
102
103 if early_stop_count >= early_stop_patience:
104 break
105
106 baseline_net.load_state_dict(best_model_wts)
107 baseline_net.eval()
108
109 # Save model weights
110 Path(model_path).parent.mkdir(parents=True, exist_ok=True)
111 torch.save(baseline_net.state_dict(), model_path)
112
113 return baseline_net
114
[end of examples/cvae/baseline.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/cvae/baseline.py b/examples/cvae/baseline.py
--- a/examples/cvae/baseline.py
+++ b/examples/cvae/baseline.py
@@ -34,8 +34,12 @@
def forward(self, input, target):
target = target.view(input.shape)
- loss = F.binary_cross_entropy(input, target, reduction="none")
- loss[target == self.masked_with] = 0
+ # only calculate loss on target pixels (value = -1)
+ loss = F.binary_cross_entropy(
+ input[target != self.masked_with],
+ target[target != self.masked_with],
+ reduction="none",
+ )
return loss.sum()
| {"golden_diff": "diff --git a/examples/cvae/baseline.py b/examples/cvae/baseline.py\n--- a/examples/cvae/baseline.py\n+++ b/examples/cvae/baseline.py\n@@ -34,8 +34,12 @@\n \n def forward(self, input, target):\n target = target.view(input.shape)\n- loss = F.binary_cross_entropy(input, target, reduction=\"none\")\n- loss[target == self.masked_with] = 0\n+ # only calculate loss on target pixels (value = -1)\n+ loss = F.binary_cross_entropy(\n+ input[target != self.masked_with],\n+ target[target != self.masked_with],\n+ reduction=\"none\",\n+ )\n return loss.sum()\n", "issue": "Bug in `CVAE` example\nThere is a bug in `CVAE` example. The `target` input to the `MaskedBCELoss` is not binary (it has values of -1, 0, 1). This was discovered by the PyTorch 2.1 update which started to validate the inputs of `F.binary_cross_entropy_loss`.\r\n\r\n> FAILED tests/test_examples.py::test_cpu[cvae/main.py --num-quadrant-inputs=1 --num-epochs=1] - subprocess.CalledProcessError: Command '['/opt/hostedtoolcache/Python/3.8.18/x64/bin/python', '/home/runner/work/pyro/pyro/examples/cvae/main.py', '--num-quadrant-inputs=1', '--num-epochs=1']' returned non-zero exit status 1.\r\n= 1 failed, 148 passed, 97 skipped, 26558 deselected, 2 warnings in 1948.89s (0:32:28) =\n", "before_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport copy\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom tqdm import tqdm\n\n\nclass BaselineNet(nn.Module):\n def __init__(self, hidden_1, hidden_2):\n super().__init__()\n self.fc1 = nn.Linear(784, hidden_1)\n self.fc2 = nn.Linear(hidden_1, hidden_2)\n self.fc3 = nn.Linear(hidden_2, 784)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = x.view(-1, 784)\n hidden = self.relu(self.fc1(x))\n hidden = self.relu(self.fc2(hidden))\n y = torch.sigmoid(self.fc3(hidden))\n return y\n\n\nclass MaskedBCELoss(nn.Module):\n def __init__(self, masked_with=-1):\n super().__init__()\n self.masked_with = masked_with\n\n def forward(self, input, target):\n target = target.view(input.shape)\n loss = F.binary_cross_entropy(input, target, reduction=\"none\")\n loss[target == self.masked_with] = 0\n return loss.sum()\n\n\ndef train(\n device,\n dataloaders,\n dataset_sizes,\n learning_rate,\n num_epochs,\n early_stop_patience,\n model_path,\n):\n # Train baseline\n baseline_net = BaselineNet(500, 500)\n baseline_net.to(device)\n optimizer = torch.optim.Adam(baseline_net.parameters(), lr=learning_rate)\n criterion = MaskedBCELoss()\n best_loss = np.inf\n early_stop_count = 0\n\n for epoch in range(num_epochs):\n for phase in [\"train\", \"val\"]:\n if phase == \"train\":\n baseline_net.train()\n else:\n baseline_net.eval()\n\n running_loss = 0.0\n num_preds = 0\n\n bar = tqdm(\n dataloaders[phase], desc=\"NN Epoch {} {}\".format(epoch, phase).ljust(20)\n )\n for i, batch in enumerate(bar):\n inputs = batch[\"input\"].to(device)\n outputs = batch[\"output\"].to(device)\n\n optimizer.zero_grad()\n\n with torch.set_grad_enabled(phase == \"train\"):\n preds = baseline_net(inputs)\n loss = criterion(preds, outputs) / inputs.size(0)\n if phase == \"train\":\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n num_preds += 1\n if i % 10 == 0:\n bar.set_postfix(\n loss=\"{:.2f}\".format(running_loss / num_preds),\n early_stop_count=early_stop_count,\n )\n\n epoch_loss = running_loss / dataset_sizes[phase]\n # deep copy the model\n if phase == \"val\":\n if epoch_loss < best_loss:\n best_loss = epoch_loss\n best_model_wts = copy.deepcopy(baseline_net.state_dict())\n early_stop_count = 0\n else:\n early_stop_count += 1\n\n if early_stop_count >= early_stop_patience:\n break\n\n baseline_net.load_state_dict(best_model_wts)\n baseline_net.eval()\n\n # Save model weights\n Path(model_path).parent.mkdir(parents=True, exist_ok=True)\n torch.save(baseline_net.state_dict(), model_path)\n\n return baseline_net\n", "path": "examples/cvae/baseline.py"}]} | 1,763 | 156 |
gh_patches_debug_36871 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-2851 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Building Dict is too slow
**Bug description**
```
from parlai.scripts.train_model import TrainModel
TrainModel.main(
# similar to before
task='amazon_qa',
model='projects.wizard_of_wikipedia.generator.agents:EndToEndAgent',
model_file='/tmp/end2end_generator/model',
# initialize with a pretrained model
init_model='zoo:wizard_of_wikipedia/end2end_generator/model',
# arguments we get from the pretrained model.
# Unfortunately, these must be looked up separately for each model.
# eps
dict_lower=True,
dict_tokenizer='bpe',
n_layers=5,
n_heads=2,
dropout=0.20,
ffn_size=512,
embedding_size=256,
log_every_n_secs=10,
validation_patience=12,
validation_metric='ppl',
validation_metric_mode='min',
validation_every_n_epochs=0.5,
n_positions=128,
truncate=128,
max_knowledge=32,
knowledge_alpha=0.95,
knowledge_truncate=32,
learningrate=5e-4,
warmup_updates=5000,
clip=0.1,
lr_scheduler='invsqrt',
embedding_type='fasttext',
beam_size=1,
skip_generation=False,
batchsize=64,
)
```
I am trying to train amazon_qa task on wizard of Wikipedia model, just to experiment it out, I am not sure if it will work but when I run this script it says creating a task and goes to next stage building dictionary it just becomes to slow of a process.
<img width="1021" alt="Screenshot 2020-07-18 at 10 42 32 PM" src="https://user-images.githubusercontent.com/45225143/87858114-2f34b380-c949-11ea-9928-3bfc77fa91c8.png">
Like has been around 2 hrs and it hasn't crossed 0% yet.
Can anyone please point me out the error, thanks.
</issue>
<code>
[start of parlai/tasks/amazon_qa/agents.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 from parlai.core.teachers import FixedDialogTeacher
8 from .build import build, RESOURCES
9 import os
10 import json
11
12
13 class DefaultTeacher(FixedDialogTeacher):
14 def __init__(self, opt, shared=None):
15 # store datatype
16 super().__init__(opt, shared)
17
18 dt = opt['datatype'].split(':')[0]
19 if dt != 'train':
20 raise RuntimeError('Not valid datatype (only train).')
21
22 if shared:
23 self.data = shared['data']
24 else:
25 build(opt)
26 self._setup_data()
27 self.reset()
28
29 def num_episodes(self):
30 return len(self.data)
31
32 def num_examples(self):
33 return sum([len(x) for x in self.data])
34
35 def _setup_data(self):
36 self.existing_keys = [
37 'question',
38 'answer',
39 'asin',
40 'questionType',
41 'questionTime',
42 'askerID',
43 'answerType',
44 'answerTime',
45 'unixTime',
46 'answererID',
47 'helpful',
48 'answerScore',
49 ]
50
51 self.data = []
52
53 def create_entry_single(episode):
54 entry = []
55 for key in self.existing_keys:
56 if key in episode:
57 entry.append(str(episode[key]))
58 else:
59 entry.append('N/A')
60 return entry
61
62 def create_entry_multiple(episode):
63 entries = []
64
65 for question in episode['questions']:
66 new_episode = dict()
67 new_episode['asin'] = episode['asin']
68 new_episode['askerID'] = question['askerID']
69 new_episode['questionTime'] = question['questionTime']
70 new_episode['quesitonType'] = question['questionType']
71 new_episode['question'] = question['questionText']
72
73 for answer in question['answers']:
74 answer.update(new_episode)
75 answer['answer'] = answer['answerText']
76 entries.append([create_entry_single(answer)])
77
78 return entries
79
80 fpath = os.path.join(self.opt['datapath'], 'AmazonQA')
81 for i, f in enumerate(RESOURCES):
82 json_file = f.file_name[:-3]
83 file_path = os.path.join(fpath, json_file)
84
85 with open(file_path, 'r') as infile:
86 data = infile.read()
87 new_data = data.replace('}\n{', '},\n{')
88 json_data = json.loads(f'[{new_data}]')
89
90 for ep in json_data:
91 # First 20 datasets have a different format than those later
92 if i < 21:
93 self.data.append([create_entry_single(ep)])
94 else:
95 self.data += create_entry_multiple(ep)
96
97 def get(self, episode_idx, entry_idx=0):
98 ep = self.data[episode_idx]
99 entry = ep[entry_idx]
100 action = dict()
101 action['id'] = episode_idx
102 for i, key in enumerate(self.existing_keys):
103 if i < 2:
104 continue
105 action[key] = entry[i]
106 action['episode_done'] = True
107 action['text'] = entry[0]
108 action['labels'] = [entry[1]]
109
110 return action
111
112 def share(self):
113 shared = super().share()
114 shared['data'] = self.data
115 return shared
116
[end of parlai/tasks/amazon_qa/agents.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parlai/tasks/amazon_qa/agents.py b/parlai/tasks/amazon_qa/agents.py
--- a/parlai/tasks/amazon_qa/agents.py
+++ b/parlai/tasks/amazon_qa/agents.py
@@ -10,6 +10,22 @@
import json
+EXISTING_KEYS = [
+ 'question',
+ 'answer',
+ 'asin',
+ 'questionType',
+ 'questionTime',
+ 'askerID',
+ 'answerType',
+ 'answerTime',
+ 'unixTime',
+ 'answererID',
+ 'helpful',
+ 'answerScore',
+]
+
+
class DefaultTeacher(FixedDialogTeacher):
def __init__(self, opt, shared=None):
# store datatype
@@ -21,38 +37,27 @@
if shared:
self.data = shared['data']
+ self.num_ex = shared['num_ex']
+ self.num_ep = shared['num_ep']
else:
build(opt)
self._setup_data()
+ self.num_ex = sum([len(x) for x in self.data])
+ self.num_ep = len(self.data)
self.reset()
def num_episodes(self):
- return len(self.data)
+ return self.num_ep
def num_examples(self):
- return sum([len(x) for x in self.data])
+ return self.num_ex
def _setup_data(self):
- self.existing_keys = [
- 'question',
- 'answer',
- 'asin',
- 'questionType',
- 'questionTime',
- 'askerID',
- 'answerType',
- 'answerTime',
- 'unixTime',
- 'answererID',
- 'helpful',
- 'answerScore',
- ]
-
self.data = []
def create_entry_single(episode):
entry = []
- for key in self.existing_keys:
+ for key in EXISTING_KEYS:
if key in episode:
entry.append(str(episode[key]))
else:
@@ -99,7 +104,7 @@
entry = ep[entry_idx]
action = dict()
action['id'] = episode_idx
- for i, key in enumerate(self.existing_keys):
+ for i, key in enumerate(EXISTING_KEYS):
if i < 2:
continue
action[key] = entry[i]
@@ -112,4 +117,6 @@
def share(self):
shared = super().share()
shared['data'] = self.data
+ shared['num_ex'] = self.num_ex
+ shared['num_ep'] = self.num_ep
return shared
| {"golden_diff": "diff --git a/parlai/tasks/amazon_qa/agents.py b/parlai/tasks/amazon_qa/agents.py\n--- a/parlai/tasks/amazon_qa/agents.py\n+++ b/parlai/tasks/amazon_qa/agents.py\n@@ -10,6 +10,22 @@\n import json\n \n \n+EXISTING_KEYS = [\n+ 'question',\n+ 'answer',\n+ 'asin',\n+ 'questionType',\n+ 'questionTime',\n+ 'askerID',\n+ 'answerType',\n+ 'answerTime',\n+ 'unixTime',\n+ 'answererID',\n+ 'helpful',\n+ 'answerScore',\n+]\n+\n+\n class DefaultTeacher(FixedDialogTeacher):\n def __init__(self, opt, shared=None):\n # store datatype\n@@ -21,38 +37,27 @@\n \n if shared:\n self.data = shared['data']\n+ self.num_ex = shared['num_ex']\n+ self.num_ep = shared['num_ep']\n else:\n build(opt)\n self._setup_data()\n+ self.num_ex = sum([len(x) for x in self.data])\n+ self.num_ep = len(self.data)\n self.reset()\n \n def num_episodes(self):\n- return len(self.data)\n+ return self.num_ep\n \n def num_examples(self):\n- return sum([len(x) for x in self.data])\n+ return self.num_ex\n \n def _setup_data(self):\n- self.existing_keys = [\n- 'question',\n- 'answer',\n- 'asin',\n- 'questionType',\n- 'questionTime',\n- 'askerID',\n- 'answerType',\n- 'answerTime',\n- 'unixTime',\n- 'answererID',\n- 'helpful',\n- 'answerScore',\n- ]\n-\n self.data = []\n \n def create_entry_single(episode):\n entry = []\n- for key in self.existing_keys:\n+ for key in EXISTING_KEYS:\n if key in episode:\n entry.append(str(episode[key]))\n else:\n@@ -99,7 +104,7 @@\n entry = ep[entry_idx]\n action = dict()\n action['id'] = episode_idx\n- for i, key in enumerate(self.existing_keys):\n+ for i, key in enumerate(EXISTING_KEYS):\n if i < 2:\n continue\n action[key] = entry[i]\n@@ -112,4 +117,6 @@\n def share(self):\n shared = super().share()\n shared['data'] = self.data\n+ shared['num_ex'] = self.num_ex\n+ shared['num_ep'] = self.num_ep\n return shared\n", "issue": "Building Dict is too slow\n**Bug description**\r\n```\r\nfrom parlai.scripts.train_model import TrainModel\r\n\r\nTrainModel.main(\r\n # similar to before\r\n task='amazon_qa',\r\n model='projects.wizard_of_wikipedia.generator.agents:EndToEndAgent',\r\n model_file='/tmp/end2end_generator/model',\r\n\r\n # initialize with a pretrained model\r\n init_model='zoo:wizard_of_wikipedia/end2end_generator/model',\r\n\r\n # arguments we get from the pretrained model.\r\n # Unfortunately, these must be looked up separately for each model.\r\n # eps\r\n dict_lower=True,\r\n dict_tokenizer='bpe',\r\n n_layers=5,\r\n n_heads=2,\r\n dropout=0.20,\r\n ffn_size=512,\r\n embedding_size=256,\r\n log_every_n_secs=10,\r\n validation_patience=12,\r\n validation_metric='ppl',\r\n validation_metric_mode='min',\r\n validation_every_n_epochs=0.5,\r\n n_positions=128,\r\n truncate=128,\r\n max_knowledge=32,\r\n knowledge_alpha=0.95,\r\n knowledge_truncate=32,\r\n learningrate=5e-4,\r\n warmup_updates=5000,\r\n clip=0.1,\r\n lr_scheduler='invsqrt',\r\n embedding_type='fasttext',\r\n beam_size=1,\r\n skip_generation=False,\r\n batchsize=64,\r\n)\r\n\r\n```\r\nI am trying to train amazon_qa task on wizard of Wikipedia model, just to experiment it out, I am not sure if it will work but when I run this script it says creating a task and goes to next stage building dictionary it just becomes to slow of a process.\r\n\r\n<img width=\"1021\" alt=\"Screenshot 2020-07-18 at 10 42 32 PM\" src=\"https://user-images.githubusercontent.com/45225143/87858114-2f34b380-c949-11ea-9928-3bfc77fa91c8.png\">\r\n\r\nLike has been around 2 hrs and it hasn't crossed 0% yet.\r\nCan anyone please point me out the error, thanks.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom parlai.core.teachers import FixedDialogTeacher\nfrom .build import build, RESOURCES\nimport os\nimport json\n\n\nclass DefaultTeacher(FixedDialogTeacher):\n def __init__(self, opt, shared=None):\n # store datatype\n super().__init__(opt, shared)\n\n dt = opt['datatype'].split(':')[0]\n if dt != 'train':\n raise RuntimeError('Not valid datatype (only train).')\n\n if shared:\n self.data = shared['data']\n else:\n build(opt)\n self._setup_data()\n self.reset()\n\n def num_episodes(self):\n return len(self.data)\n\n def num_examples(self):\n return sum([len(x) for x in self.data])\n\n def _setup_data(self):\n self.existing_keys = [\n 'question',\n 'answer',\n 'asin',\n 'questionType',\n 'questionTime',\n 'askerID',\n 'answerType',\n 'answerTime',\n 'unixTime',\n 'answererID',\n 'helpful',\n 'answerScore',\n ]\n\n self.data = []\n\n def create_entry_single(episode):\n entry = []\n for key in self.existing_keys:\n if key in episode:\n entry.append(str(episode[key]))\n else:\n entry.append('N/A')\n return entry\n\n def create_entry_multiple(episode):\n entries = []\n\n for question in episode['questions']:\n new_episode = dict()\n new_episode['asin'] = episode['asin']\n new_episode['askerID'] = question['askerID']\n new_episode['questionTime'] = question['questionTime']\n new_episode['quesitonType'] = question['questionType']\n new_episode['question'] = question['questionText']\n\n for answer in question['answers']:\n answer.update(new_episode)\n answer['answer'] = answer['answerText']\n entries.append([create_entry_single(answer)])\n\n return entries\n\n fpath = os.path.join(self.opt['datapath'], 'AmazonQA')\n for i, f in enumerate(RESOURCES):\n json_file = f.file_name[:-3]\n file_path = os.path.join(fpath, json_file)\n\n with open(file_path, 'r') as infile:\n data = infile.read()\n new_data = data.replace('}\\n{', '},\\n{')\n json_data = json.loads(f'[{new_data}]')\n\n for ep in json_data:\n # First 20 datasets have a different format than those later\n if i < 21:\n self.data.append([create_entry_single(ep)])\n else:\n self.data += create_entry_multiple(ep)\n\n def get(self, episode_idx, entry_idx=0):\n ep = self.data[episode_idx]\n entry = ep[entry_idx]\n action = dict()\n action['id'] = episode_idx\n for i, key in enumerate(self.existing_keys):\n if i < 2:\n continue\n action[key] = entry[i]\n action['episode_done'] = True\n action['text'] = entry[0]\n action['labels'] = [entry[1]]\n\n return action\n\n def share(self):\n shared = super().share()\n shared['data'] = self.data\n return shared\n", "path": "parlai/tasks/amazon_qa/agents.py"}]} | 2,043 | 612 |
gh_patches_debug_30682 | rasdani/github-patches | git_diff | freedomofpress__securedrop-6032 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
QA: Automate basic server testing
The default QA test plan includes a basic testing section that mostly checks server configuration. These tests are duplicated in the production testinfra tests, so with some work to get `testinfra` to use production settings where available (via `install_files/ansible-base/group_vars/all/site-specific`), it should be possible to reduce tester workload by removing Basic testing in favour of `testinfra`.
</issue>
<code>
[start of molecule/testinfra/conftest.py]
1 """
2 Configuration for TestInfra test suite for SecureDrop.
3 Handles importing host-specific test vars, so test functions
4 can be reused across multiple hosts, with varied targets.
5
6 Vars should be placed in `testinfra/vars/<hostname>.yml`.
7 """
8
9 import io
10 import os
11 import yaml
12 from typing import Any, Dict
13
14 import testutils
15
16
17 # The config tests target staging by default. It's possible to override
18 # for e.g. prod, but the associated vars files are not yet ported.
19 target_host = os.environ.get('SECUREDROP_TESTINFRA_TARGET_HOST', 'staging')
20
21
22 def securedrop_import_testinfra_vars(hostname, with_header=False):
23 """
24 Import vars from a YAML file to populate tests with host-specific
25 values used in checks. For instance, the SecureDrop docroot will
26 be under /vagrant in development, but /var/www/securedrop in staging.
27
28 Vars must be stored in `testinfra/vars/<hostname>.yml`.
29 """
30 filepath = os.path.join(os.path.dirname(__file__), "vars", hostname+".yml")
31 with io.open(filepath, 'r') as f:
32 hostvars = yaml.safe_load(f)
33
34 hostvars['securedrop_venv_site_packages'] = hostvars["securedrop_venv_site_packages"].format("3.8") # noqa: E501
35 hostvars['python_version'] = "3.8"
36 hostvars['apparmor_enforce_actual'] = hostvars['apparmor_enforce']['focal']
37
38 if with_header:
39 hostvars = dict(securedrop_test_vars=hostvars)
40
41 return hostvars
42
43
44 class TestVars(dict):
45 managed_attrs = {} # type: Dict[str, Any]
46
47 def __init__(self, initial: Dict[str, Any]) -> None:
48 self.securedrop_target_distribution = os.environ.get("SECUREDROP_TARGET_DISTRIBUTION")
49 self.managed_attrs.update(initial)
50
51 def __getattr__(self, name: str) -> Any:
52 """
53 If the requested attribute names a dict in managed_attrs and that
54 contains a key with the name of the target distribution,
55 e.g. "focal", return that. Otherwise return the entire item
56 under the requested name.
57 """
58 try:
59 attr = self.managed_attrs[name]
60 if isinstance(attr, dict) and self.securedrop_target_distribution in attr:
61 return attr[self.securedrop_target_distribution]
62 return attr
63 except KeyError:
64 raise AttributeError(name)
65
66 def __str__(self) -> str:
67 return str(self.managed_attrs)
68
69
70 testutils.securedrop_test_vars = TestVars(securedrop_import_testinfra_vars(target_host))
71
[end of molecule/testinfra/conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/molecule/testinfra/conftest.py b/molecule/testinfra/conftest.py
--- a/molecule/testinfra/conftest.py
+++ b/molecule/testinfra/conftest.py
@@ -14,8 +14,7 @@
import testutils
-# The config tests target staging by default. It's possible to override
-# for e.g. prod, but the associated vars files are not yet ported.
+# The config tests target staging by default.
target_host = os.environ.get('SECUREDROP_TESTINFRA_TARGET_HOST', 'staging')
@@ -35,6 +34,34 @@
hostvars['python_version'] = "3.8"
hostvars['apparmor_enforce_actual'] = hostvars['apparmor_enforce']['focal']
+ # If the tests are run against a production environment, check local config
+ # and override as necessary.
+ prod_filepath = os.path.join(os.path.dirname(__file__),
+ "../../install_files/ansible-base/group_vars/all/site-specific")
+ if os.path.isfile(prod_filepath):
+ with io.open(prod_filepath, 'r') as f:
+ prodvars = yaml.safe_load(f)
+
+ def _prod_override(vars_key, prod_key):
+ if prod_key in prodvars:
+ hostvars[vars_key] = prodvars[prod_key]
+
+ _prod_override('app_ip', 'app_ip')
+ _prod_override('mon_ip', 'monitor_ip')
+ _prod_override('sasl_domain', 'sasl_domain')
+ _prod_override('sasl_username', 'sasl_username')
+ _prod_override('sasl_password', 'sasl_password')
+ _prod_override('daily_reboot_time', 'daily_reboot_time')
+
+ # Check repo targeting, and update vars
+ repo_filepath = os.path.join(os.path.dirname(__file__),
+ "../../install_files/ansible-base/roles/install-fpf-repo/defaults/main.yml") # noqa: E501
+ if os.path.isfile(repo_filepath):
+ with io.open(repo_filepath, 'r') as f:
+ repovars = yaml.safe_load(f)
+ if 'apt_repo_url' in repovars:
+ hostvars['fpf_apt_repo_url'] = repovars['apt_repo_url']
+
if with_header:
hostvars = dict(securedrop_test_vars=hostvars)
| {"golden_diff": "diff --git a/molecule/testinfra/conftest.py b/molecule/testinfra/conftest.py\n--- a/molecule/testinfra/conftest.py\n+++ b/molecule/testinfra/conftest.py\n@@ -14,8 +14,7 @@\n import testutils\n \n \n-# The config tests target staging by default. It's possible to override\n-# for e.g. prod, but the associated vars files are not yet ported.\n+# The config tests target staging by default.\n target_host = os.environ.get('SECUREDROP_TESTINFRA_TARGET_HOST', 'staging')\n \n \n@@ -35,6 +34,34 @@\n hostvars['python_version'] = \"3.8\"\n hostvars['apparmor_enforce_actual'] = hostvars['apparmor_enforce']['focal']\n \n+ # If the tests are run against a production environment, check local config\n+ # and override as necessary.\n+ prod_filepath = os.path.join(os.path.dirname(__file__),\n+ \"../../install_files/ansible-base/group_vars/all/site-specific\")\n+ if os.path.isfile(prod_filepath):\n+ with io.open(prod_filepath, 'r') as f:\n+ prodvars = yaml.safe_load(f)\n+\n+ def _prod_override(vars_key, prod_key):\n+ if prod_key in prodvars:\n+ hostvars[vars_key] = prodvars[prod_key]\n+\n+ _prod_override('app_ip', 'app_ip')\n+ _prod_override('mon_ip', 'monitor_ip')\n+ _prod_override('sasl_domain', 'sasl_domain')\n+ _prod_override('sasl_username', 'sasl_username')\n+ _prod_override('sasl_password', 'sasl_password')\n+ _prod_override('daily_reboot_time', 'daily_reboot_time')\n+\n+ # Check repo targeting, and update vars\n+ repo_filepath = os.path.join(os.path.dirname(__file__),\n+ \"../../install_files/ansible-base/roles/install-fpf-repo/defaults/main.yml\") # noqa: E501\n+ if os.path.isfile(repo_filepath):\n+ with io.open(repo_filepath, 'r') as f:\n+ repovars = yaml.safe_load(f)\n+ if 'apt_repo_url' in repovars:\n+ hostvars['fpf_apt_repo_url'] = repovars['apt_repo_url']\n+\n if with_header:\n hostvars = dict(securedrop_test_vars=hostvars)\n", "issue": "QA: Automate basic server testing\nThe default QA test plan includes a basic testing section that mostly checks server configuration. These tests are duplicated in the production testinfra tests, so with some work to get `testinfra` to use production settings where available (via `install_files/ansible-base/group_vars/all/site-specific`), it should be possible to reduce tester workload by removing Basic testing in favour of `testinfra`.\n", "before_files": [{"content": "\"\"\"\nConfiguration for TestInfra test suite for SecureDrop.\nHandles importing host-specific test vars, so test functions\ncan be reused across multiple hosts, with varied targets.\n\nVars should be placed in `testinfra/vars/<hostname>.yml`.\n\"\"\"\n\nimport io\nimport os\nimport yaml\nfrom typing import Any, Dict\n\nimport testutils\n\n\n# The config tests target staging by default. It's possible to override\n# for e.g. prod, but the associated vars files are not yet ported.\ntarget_host = os.environ.get('SECUREDROP_TESTINFRA_TARGET_HOST', 'staging')\n\n\ndef securedrop_import_testinfra_vars(hostname, with_header=False):\n \"\"\"\n Import vars from a YAML file to populate tests with host-specific\n values used in checks. For instance, the SecureDrop docroot will\n be under /vagrant in development, but /var/www/securedrop in staging.\n\n Vars must be stored in `testinfra/vars/<hostname>.yml`.\n \"\"\"\n filepath = os.path.join(os.path.dirname(__file__), \"vars\", hostname+\".yml\")\n with io.open(filepath, 'r') as f:\n hostvars = yaml.safe_load(f)\n\n hostvars['securedrop_venv_site_packages'] = hostvars[\"securedrop_venv_site_packages\"].format(\"3.8\") # noqa: E501\n hostvars['python_version'] = \"3.8\"\n hostvars['apparmor_enforce_actual'] = hostvars['apparmor_enforce']['focal']\n\n if with_header:\n hostvars = dict(securedrop_test_vars=hostvars)\n\n return hostvars\n\n\nclass TestVars(dict):\n managed_attrs = {} # type: Dict[str, Any]\n\n def __init__(self, initial: Dict[str, Any]) -> None:\n self.securedrop_target_distribution = os.environ.get(\"SECUREDROP_TARGET_DISTRIBUTION\")\n self.managed_attrs.update(initial)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"\n If the requested attribute names a dict in managed_attrs and that\n contains a key with the name of the target distribution,\n e.g. \"focal\", return that. Otherwise return the entire item\n under the requested name.\n \"\"\"\n try:\n attr = self.managed_attrs[name]\n if isinstance(attr, dict) and self.securedrop_target_distribution in attr:\n return attr[self.securedrop_target_distribution]\n return attr\n except KeyError:\n raise AttributeError(name)\n\n def __str__(self) -> str:\n return str(self.managed_attrs)\n\n\ntestutils.securedrop_test_vars = TestVars(securedrop_import_testinfra_vars(target_host))\n", "path": "molecule/testinfra/conftest.py"}]} | 1,342 | 533 |
gh_patches_debug_2091 | rasdani/github-patches | git_diff | ddionrails__ddionrails-801 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add dynamic range slider for publication year facet
see <https://opensource.appbase.io/reactive-manual/vue/range-components/dynamicrangeslider.html>
</issue>
<code>
[start of ddionrails/publications/documents.py]
1 # -*- coding: utf-8 -*-
2
3 """ Search documents for indexing models from ddionrails.publications app into Elasticsearch
4
5
6 Authors:
7 * 2019 Heinz-Alexander Fütterer (DIW Berlin)
8
9 License:
10 | **AGPL-3.0 GNU AFFERO GENERAL PUBLIC LICENSE (AGPL) 3.0**.
11 | See LICENSE at the GitHub
12 `repository <https://github.com/ddionrails/ddionrails/blob/master/LICENSE.md>`_
13 | or at
14 `<https://www.gnu.org/licenses/agpl-3.0.txt>`_.
15 """
16
17 from django.conf import settings
18 from django.db.models import QuerySet
19 from django_elasticsearch_dsl import Document, fields
20 from django_elasticsearch_dsl.registries import registry
21
22 from .models import Publication
23
24
25 @registry.register_document
26 class PublicationDocument(Document):
27 """ Search document for publications.Publication """
28
29 # doc_type was removed in Elasticsearch 7
30 type = fields.KeywordField()
31
32 @staticmethod
33 def prepare_type(publication: Publication) -> str:
34 return "publication"
35
36 # facets
37 sub_type = fields.KeywordField()
38 study = fields.KeywordField()
39 year = fields.KeywordField()
40
41 # prepare_FIELD will be executed while indexing FIELD
42 @staticmethod
43 def prepare_study(publication: Publication) -> str:
44 """ Return the related study """
45 return publication.study.title()
46
47 class Index: # pylint: disable=missing-docstring,too-few-public-methods
48 # Name of the Elasticsearch index
49 name = f"{settings.ELASTICSEARCH_DSL_INDEX_PREFIX}publications"
50
51 class Django: # pylint: disable=missing-docstring,too-few-public-methods
52 model = Publication # The model associated with this Document
53
54 # The fields of the model you want to be indexed in Elasticsearch
55 fields = ("abstract", "author", "cite", "doi", "name", "title", "url")
56
57 def get_queryset(self) -> QuerySet:
58 """
59 Return the queryset that should be indexed by this doc type,
60 with select related study.
61 """
62 return super().get_queryset().select_related("study")
63
[end of ddionrails/publications/documents.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddionrails/publications/documents.py b/ddionrails/publications/documents.py
--- a/ddionrails/publications/documents.py
+++ b/ddionrails/publications/documents.py
@@ -36,7 +36,7 @@
# facets
sub_type = fields.KeywordField()
study = fields.KeywordField()
- year = fields.KeywordField()
+ year = fields.IntegerField()
# prepare_FIELD will be executed while indexing FIELD
@staticmethod
| {"golden_diff": "diff --git a/ddionrails/publications/documents.py b/ddionrails/publications/documents.py\n--- a/ddionrails/publications/documents.py\n+++ b/ddionrails/publications/documents.py\n@@ -36,7 +36,7 @@\n # facets\n sub_type = fields.KeywordField()\n study = fields.KeywordField()\n- year = fields.KeywordField()\n+ year = fields.IntegerField()\n \n # prepare_FIELD will be executed while indexing FIELD\n @staticmethod\n", "issue": "Add dynamic range slider for publication year facet\nsee <https://opensource.appbase.io/reactive-manual/vue/range-components/dynamicrangeslider.html>\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\" Search documents for indexing models from ddionrails.publications app into Elasticsearch\n\n\nAuthors:\n * 2019 Heinz-Alexander F\u00fctterer (DIW Berlin)\n\nLicense:\n | **AGPL-3.0 GNU AFFERO GENERAL PUBLIC LICENSE (AGPL) 3.0**.\n | See LICENSE at the GitHub\n `repository <https://github.com/ddionrails/ddionrails/blob/master/LICENSE.md>`_\n | or at\n `<https://www.gnu.org/licenses/agpl-3.0.txt>`_.\n\"\"\"\n\nfrom django.conf import settings\nfrom django.db.models import QuerySet\nfrom django_elasticsearch_dsl import Document, fields\nfrom django_elasticsearch_dsl.registries import registry\n\nfrom .models import Publication\n\n\[email protected]_document\nclass PublicationDocument(Document):\n \"\"\" Search document for publications.Publication \"\"\"\n\n # doc_type was removed in Elasticsearch 7\n type = fields.KeywordField()\n\n @staticmethod\n def prepare_type(publication: Publication) -> str:\n return \"publication\"\n\n # facets\n sub_type = fields.KeywordField()\n study = fields.KeywordField()\n year = fields.KeywordField()\n\n # prepare_FIELD will be executed while indexing FIELD\n @staticmethod\n def prepare_study(publication: Publication) -> str:\n \"\"\" Return the related study \"\"\"\n return publication.study.title()\n\n class Index: # pylint: disable=missing-docstring,too-few-public-methods\n # Name of the Elasticsearch index\n name = f\"{settings.ELASTICSEARCH_DSL_INDEX_PREFIX}publications\"\n\n class Django: # pylint: disable=missing-docstring,too-few-public-methods\n model = Publication # The model associated with this Document\n\n # The fields of the model you want to be indexed in Elasticsearch\n fields = (\"abstract\", \"author\", \"cite\", \"doi\", \"name\", \"title\", \"url\")\n\n def get_queryset(self) -> QuerySet:\n \"\"\"\n Return the queryset that should be indexed by this doc type,\n with select related study.\n \"\"\"\n return super().get_queryset().select_related(\"study\")\n", "path": "ddionrails/publications/documents.py"}]} | 1,163 | 103 |
gh_patches_debug_21420 | rasdani/github-patches | git_diff | getsentry__sentry-python-1931 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Patched task factory in AsyncioIntegration loses task result.
### How do you use Sentry?
Sentry Saas (sentry.io)
### Version
1.14.0
### Steps to Reproduce
```python
import asyncio
import sentry_sdk
from sentry_sdk.integrations.asyncio import AsyncioIntegration
async def add(a, b):
return a + b
async def main():
sentry_sdk.init('dsn', integrations=[AsyncioIntegration()])
result = await asyncio.create_task(add(1, 2))
assert result == 3, result
asyncio.run(main())
```
### Expected Result
No `AssertionError`.
### Actual Result
```python
assert result == 3, result
AssertionError: None
```
Patched task factory always loses task result.
</issue>
<code>
[start of sentry_sdk/integrations/asyncio.py]
1 from __future__ import absolute_import
2 import sys
3
4 from sentry_sdk._compat import reraise
5 from sentry_sdk.consts import OP
6 from sentry_sdk.hub import Hub
7 from sentry_sdk.integrations import Integration, DidNotEnable
8 from sentry_sdk._types import MYPY
9 from sentry_sdk.utils import event_from_exception
10
11 try:
12 import asyncio
13 from asyncio.tasks import Task
14 except ImportError:
15 raise DidNotEnable("asyncio not available")
16
17
18 if MYPY:
19 from typing import Any
20
21 from sentry_sdk._types import ExcInfo
22
23
24 def patch_asyncio():
25 # type: () -> None
26 orig_task_factory = None
27 try:
28 loop = asyncio.get_running_loop()
29 orig_task_factory = loop.get_task_factory()
30
31 def _sentry_task_factory(loop, coro):
32 # type: (Any, Any) -> Any
33
34 async def _coro_creating_hub_and_span():
35 # type: () -> None
36 hub = Hub(Hub.current)
37 with hub:
38 with hub.start_span(op=OP.FUNCTION, description=coro.__qualname__):
39 try:
40 await coro
41 except Exception:
42 reraise(*_capture_exception(hub))
43
44 # Trying to use user set task factory (if there is one)
45 if orig_task_factory:
46 return orig_task_factory(loop, _coro_creating_hub_and_span()) # type: ignore
47
48 # The default task factory in `asyncio` does not have its own function
49 # but is just a couple of lines in `asyncio.base_events.create_task()`
50 # Those lines are copied here.
51
52 # WARNING:
53 # If the default behavior of the task creation in asyncio changes,
54 # this will break!
55 task = Task(_coro_creating_hub_and_span(), loop=loop)
56 if task._source_traceback: # type: ignore
57 del task._source_traceback[-1] # type: ignore
58
59 return task
60
61 loop.set_task_factory(_sentry_task_factory)
62 except RuntimeError:
63 # When there is no running loop, we have nothing to patch.
64 pass
65
66
67 def _capture_exception(hub):
68 # type: (Hub) -> ExcInfo
69 exc_info = sys.exc_info()
70
71 integration = hub.get_integration(AsyncioIntegration)
72 if integration is not None:
73 # If an integration is there, a client has to be there.
74 client = hub.client # type: Any
75
76 event, hint = event_from_exception(
77 exc_info,
78 client_options=client.options,
79 mechanism={"type": "asyncio", "handled": False},
80 )
81 hub.capture_event(event, hint=hint)
82
83 return exc_info
84
85
86 class AsyncioIntegration(Integration):
87 identifier = "asyncio"
88
89 @staticmethod
90 def setup_once():
91 # type: () -> None
92 patch_asyncio()
93
[end of sentry_sdk/integrations/asyncio.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry_sdk/integrations/asyncio.py b/sentry_sdk/integrations/asyncio.py
--- a/sentry_sdk/integrations/asyncio.py
+++ b/sentry_sdk/integrations/asyncio.py
@@ -32,15 +32,19 @@
# type: (Any, Any) -> Any
async def _coro_creating_hub_and_span():
- # type: () -> None
+ # type: () -> Any
hub = Hub(Hub.current)
+ result = None
+
with hub:
with hub.start_span(op=OP.FUNCTION, description=coro.__qualname__):
try:
- await coro
+ result = await coro
except Exception:
reraise(*_capture_exception(hub))
+ return result
+
# Trying to use user set task factory (if there is one)
if orig_task_factory:
return orig_task_factory(loop, _coro_creating_hub_and_span()) # type: ignore
| {"golden_diff": "diff --git a/sentry_sdk/integrations/asyncio.py b/sentry_sdk/integrations/asyncio.py\n--- a/sentry_sdk/integrations/asyncio.py\n+++ b/sentry_sdk/integrations/asyncio.py\n@@ -32,15 +32,19 @@\n # type: (Any, Any) -> Any\n \n async def _coro_creating_hub_and_span():\n- # type: () -> None\n+ # type: () -> Any\n hub = Hub(Hub.current)\n+ result = None\n+\n with hub:\n with hub.start_span(op=OP.FUNCTION, description=coro.__qualname__):\n try:\n- await coro\n+ result = await coro\n except Exception:\n reraise(*_capture_exception(hub))\n \n+ return result\n+\n # Trying to use user set task factory (if there is one)\n if orig_task_factory:\n return orig_task_factory(loop, _coro_creating_hub_and_span()) # type: ignore\n", "issue": "Patched task factory in AsyncioIntegration loses task result.\n### How do you use Sentry?\n\nSentry Saas (sentry.io)\n\n### Version\n\n1.14.0\n\n### Steps to Reproduce\n\n```python\r\nimport asyncio\r\nimport sentry_sdk\r\nfrom sentry_sdk.integrations.asyncio import AsyncioIntegration\r\n\r\nasync def add(a, b):\r\n return a + b\r\n\r\nasync def main():\r\n sentry_sdk.init('dsn', integrations=[AsyncioIntegration()])\r\n result = await asyncio.create_task(add(1, 2))\r\n assert result == 3, result\r\n\r\nasyncio.run(main())\r\n```\n\n### Expected Result\n\nNo `AssertionError`.\n\n### Actual Result\n\n```python\r\n assert result == 3, result\r\nAssertionError: None\r\n```\r\n\r\nPatched task factory always loses task result.\n", "before_files": [{"content": "from __future__ import absolute_import\nimport sys\n\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.consts import OP\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration, DidNotEnable\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.utils import event_from_exception\n\ntry:\n import asyncio\n from asyncio.tasks import Task\nexcept ImportError:\n raise DidNotEnable(\"asyncio not available\")\n\n\nif MYPY:\n from typing import Any\n\n from sentry_sdk._types import ExcInfo\n\n\ndef patch_asyncio():\n # type: () -> None\n orig_task_factory = None\n try:\n loop = asyncio.get_running_loop()\n orig_task_factory = loop.get_task_factory()\n\n def _sentry_task_factory(loop, coro):\n # type: (Any, Any) -> Any\n\n async def _coro_creating_hub_and_span():\n # type: () -> None\n hub = Hub(Hub.current)\n with hub:\n with hub.start_span(op=OP.FUNCTION, description=coro.__qualname__):\n try:\n await coro\n except Exception:\n reraise(*_capture_exception(hub))\n\n # Trying to use user set task factory (if there is one)\n if orig_task_factory:\n return orig_task_factory(loop, _coro_creating_hub_and_span()) # type: ignore\n\n # The default task factory in `asyncio` does not have its own function\n # but is just a couple of lines in `asyncio.base_events.create_task()`\n # Those lines are copied here.\n\n # WARNING:\n # If the default behavior of the task creation in asyncio changes,\n # this will break!\n task = Task(_coro_creating_hub_and_span(), loop=loop)\n if task._source_traceback: # type: ignore\n del task._source_traceback[-1] # type: ignore\n\n return task\n\n loop.set_task_factory(_sentry_task_factory)\n except RuntimeError:\n # When there is no running loop, we have nothing to patch.\n pass\n\n\ndef _capture_exception(hub):\n # type: (Hub) -> ExcInfo\n exc_info = sys.exc_info()\n\n integration = hub.get_integration(AsyncioIntegration)\n if integration is not None:\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n event, hint = event_from_exception(\n exc_info,\n client_options=client.options,\n mechanism={\"type\": \"asyncio\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n return exc_info\n\n\nclass AsyncioIntegration(Integration):\n identifier = \"asyncio\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n patch_asyncio()\n", "path": "sentry_sdk/integrations/asyncio.py"}]} | 1,535 | 230 |
gh_patches_debug_19032 | rasdani/github-patches | git_diff | streamlit__streamlit-3975 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Slider value and description text overlap for slider in sidebar with long description
### Summary
If you use a slider in the sidebar with a long description text, the slider value and the description text overlap. See screenshot:

### Steps to reproduce
Code snippet:
```python
import streamlit as st
topn_ranking = st.sidebar.slider(
"Select the maximum amount of words for classification (higher value adds additional less frequent words to results)",
10,
1000,
(100),
)
```
**Expected behavior:**
To have non-overlapping slider value and description text.
For example, this is how it should look like (with Streamlit version 0.78.0):

**Actual behavior:**
When I start the example of the code snippet, the current value and the description text of the slider overlap.
### Is this a regression?
Yes, it was looking good with Streamlit 0.78.0. My tests showed, that it changed in version 0.83.0.
### Debug info
- Streamlit version: 0.88.0
- Python version: 3.8.10
- Using poetry with pyenv
- OS version: Ubuntu 20.04
- Browser version: Google Chrome 93.0.4577.63
</issue>
<code>
[start of e2e/scripts/st_slider.py]
1 # Copyright 2018-2021 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16
17 w1 = st.slider("Label 1", 0, 100, 25, 1)
18 st.write("Value 1:", w1)
19
20 w2 = st.slider("Label 2", 0.0, 100.0, (25.0, 75.0), 0.5)
21 st.write("Value 2:", w2)
22
23 if st._is_running_with_streamlit:
24
25 def on_change():
26 st.session_state.slider_changed = True
27
28 st.slider(
29 "Label 3",
30 min_value=0,
31 max_value=100,
32 value=25,
33 step=1,
34 key="slider3",
35 on_change=on_change,
36 )
37 st.write("Value 3:", st.session_state.slider3)
38 st.write("Slider changed:", "slider_changed" in st.session_state)
39
[end of e2e/scripts/st_slider.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/e2e/scripts/st_slider.py b/e2e/scripts/st_slider.py
--- a/e2e/scripts/st_slider.py
+++ b/e2e/scripts/st_slider.py
@@ -20,19 +20,28 @@
w2 = st.slider("Label 2", 0.0, 100.0, (25.0, 75.0), 0.5)
st.write("Value 2:", w2)
+w3 = st.slider(
+ "Label 3 - This is a very very very very very very very very very very very very very very very very very very very very long label",
+ 0.0,
+ 100.0,
+ (25.0, 75.0),
+ 0.5,
+)
+st.write("Value 3:", w3)
+
if st._is_running_with_streamlit:
def on_change():
st.session_state.slider_changed = True
st.slider(
- "Label 3",
+ "Label 4",
min_value=0,
max_value=100,
value=25,
step=1,
- key="slider3",
+ key="slider4",
on_change=on_change,
)
- st.write("Value 3:", st.session_state.slider3)
+ st.write("Value 4:", st.session_state.slider4)
st.write("Slider changed:", "slider_changed" in st.session_state)
| {"golden_diff": "diff --git a/e2e/scripts/st_slider.py b/e2e/scripts/st_slider.py\n--- a/e2e/scripts/st_slider.py\n+++ b/e2e/scripts/st_slider.py\n@@ -20,19 +20,28 @@\n w2 = st.slider(\"Label 2\", 0.0, 100.0, (25.0, 75.0), 0.5)\n st.write(\"Value 2:\", w2)\n \n+w3 = st.slider(\n+ \"Label 3 - This is a very very very very very very very very very very very very very very very very very very very very long label\",\n+ 0.0,\n+ 100.0,\n+ (25.0, 75.0),\n+ 0.5,\n+)\n+st.write(\"Value 3:\", w3)\n+\n if st._is_running_with_streamlit:\n \n def on_change():\n st.session_state.slider_changed = True\n \n st.slider(\n- \"Label 3\",\n+ \"Label 4\",\n min_value=0,\n max_value=100,\n value=25,\n step=1,\n- key=\"slider3\",\n+ key=\"slider4\",\n on_change=on_change,\n )\n- st.write(\"Value 3:\", st.session_state.slider3)\n+ st.write(\"Value 4:\", st.session_state.slider4)\n st.write(\"Slider changed:\", \"slider_changed\" in st.session_state)\n", "issue": "Slider value and description text overlap for slider in sidebar with long description\n### Summary\r\n\r\nIf you use a slider in the sidebar with a long description text, the slider value and the description text overlap. See screenshot:\r\n\r\n\r\n\r\n\r\n### Steps to reproduce\r\n\r\nCode snippet:\r\n\r\n```python\r\nimport streamlit as st\r\n\r\ntopn_ranking = st.sidebar.slider(\r\n \"Select the maximum amount of words for classification (higher value adds additional less frequent words to results)\",\r\n 10,\r\n 1000,\r\n (100),\r\n)\r\n```\r\n\r\n**Expected behavior:**\r\n\r\nTo have non-overlapping slider value and description text.\r\n\r\nFor example, this is how it should look like (with Streamlit version 0.78.0):\r\n\r\n\r\n**Actual behavior:**\r\n\r\nWhen I start the example of the code snippet, the current value and the description text of the slider overlap.\r\n\r\n### Is this a regression?\r\n\r\nYes, it was looking good with Streamlit 0.78.0. My tests showed, that it changed in version 0.83.0.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 0.88.0\r\n- Python version: 3.8.10\r\n- Using poetry with pyenv\r\n- OS version: Ubuntu 20.04\r\n- Browser version: Google Chrome 93.0.4577.63\r\n\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nw1 = st.slider(\"Label 1\", 0, 100, 25, 1)\nst.write(\"Value 1:\", w1)\n\nw2 = st.slider(\"Label 2\", 0.0, 100.0, (25.0, 75.0), 0.5)\nst.write(\"Value 2:\", w2)\n\nif st._is_running_with_streamlit:\n\n def on_change():\n st.session_state.slider_changed = True\n\n st.slider(\n \"Label 3\",\n min_value=0,\n max_value=100,\n value=25,\n step=1,\n key=\"slider3\",\n on_change=on_change,\n )\n st.write(\"Value 3:\", st.session_state.slider3)\n st.write(\"Slider changed:\", \"slider_changed\" in st.session_state)\n", "path": "e2e/scripts/st_slider.py"}]} | 1,375 | 331 |
gh_patches_debug_12152 | rasdani/github-patches | git_diff | Pylons__pyramid-3657 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sphinx doesn't like the reify decorator
> Failed to get a method signature for kinto.core.resource.Resource.timestamp: <pyramid.decorator.reify object at 0x7f175ae45640> is not a callable object
If I understand correctly this is because the reify decorator doesn't pass along the function signature.
</issue>
<code>
[start of src/pyramid/decorator.py]
1 from functools import update_wrapper
2
3
4 class reify:
5 """Use as a class method decorator. It operates almost exactly like the
6 Python ``@property`` decorator, but it puts the result of the method it
7 decorates into the instance dict after the first call, effectively
8 replacing the function it decorates with an instance variable. It is, in
9 Python parlance, a non-data descriptor. The following is an example and
10 its usage:
11
12 .. doctest::
13
14 >>> from pyramid.decorator import reify
15
16 >>> class Foo:
17 ... @reify
18 ... def jammy(self):
19 ... print('jammy called')
20 ... return 1
21
22 >>> f = Foo()
23 >>> v = f.jammy
24 jammy called
25 >>> print(v)
26 1
27 >>> f.jammy
28 1
29 >>> # jammy func not called the second time; it replaced itself with 1
30 >>> # Note: reassignment is possible
31 >>> f.jammy = 2
32 >>> f.jammy
33 2
34 """
35
36 def __init__(self, wrapped):
37 self.wrapped = wrapped
38 update_wrapper(self, wrapped)
39
40 def __get__(self, inst, objtype=None):
41 if inst is None:
42 return self
43 val = self.wrapped(inst)
44 # reify is a non-data-descriptor which is leveraging the fact
45 # that it is not invoked if the equivalent attribute is defined in the
46 # object's dict, so the setattr here effectively hides this descriptor
47 # from subsequent lookups
48 setattr(inst, self.wrapped.__name__, val)
49 return val
50
[end of src/pyramid/decorator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pyramid/decorator.py b/src/pyramid/decorator.py
--- a/src/pyramid/decorator.py
+++ b/src/pyramid/decorator.py
@@ -1,6 +1,3 @@
-from functools import update_wrapper
-
-
class reify:
"""Use as a class method decorator. It operates almost exactly like the
Python ``@property`` decorator, but it puts the result of the method it
@@ -35,7 +32,7 @@
def __init__(self, wrapped):
self.wrapped = wrapped
- update_wrapper(self, wrapped)
+ self.__doc__ = wrapped.__doc__
def __get__(self, inst, objtype=None):
if inst is None:
| {"golden_diff": "diff --git a/src/pyramid/decorator.py b/src/pyramid/decorator.py\n--- a/src/pyramid/decorator.py\n+++ b/src/pyramid/decorator.py\n@@ -1,6 +1,3 @@\n-from functools import update_wrapper\n-\n-\n class reify:\n \"\"\"Use as a class method decorator. It operates almost exactly like the\n Python ``@property`` decorator, but it puts the result of the method it\n@@ -35,7 +32,7 @@\n \n def __init__(self, wrapped):\n self.wrapped = wrapped\n- update_wrapper(self, wrapped)\n+ self.__doc__ = wrapped.__doc__\n \n def __get__(self, inst, objtype=None):\n if inst is None:\n", "issue": "Sphinx doesn't like the reify decorator\n> Failed to get a method signature for kinto.core.resource.Resource.timestamp: <pyramid.decorator.reify object at 0x7f175ae45640> is not a callable object\r\n\r\nIf I understand correctly this is because the reify decorator doesn't pass along the function signature.\r\n\n", "before_files": [{"content": "from functools import update_wrapper\n\n\nclass reify:\n \"\"\"Use as a class method decorator. It operates almost exactly like the\n Python ``@property`` decorator, but it puts the result of the method it\n decorates into the instance dict after the first call, effectively\n replacing the function it decorates with an instance variable. It is, in\n Python parlance, a non-data descriptor. The following is an example and\n its usage:\n\n .. doctest::\n\n >>> from pyramid.decorator import reify\n\n >>> class Foo:\n ... @reify\n ... def jammy(self):\n ... print('jammy called')\n ... return 1\n\n >>> f = Foo()\n >>> v = f.jammy\n jammy called\n >>> print(v)\n 1\n >>> f.jammy\n 1\n >>> # jammy func not called the second time; it replaced itself with 1\n >>> # Note: reassignment is possible\n >>> f.jammy = 2\n >>> f.jammy\n 2\n \"\"\"\n\n def __init__(self, wrapped):\n self.wrapped = wrapped\n update_wrapper(self, wrapped)\n\n def __get__(self, inst, objtype=None):\n if inst is None:\n return self\n val = self.wrapped(inst)\n # reify is a non-data-descriptor which is leveraging the fact\n # that it is not invoked if the equivalent attribute is defined in the\n # object's dict, so the setattr here effectively hides this descriptor\n # from subsequent lookups\n setattr(inst, self.wrapped.__name__, val)\n return val\n", "path": "src/pyramid/decorator.py"}]} | 1,081 | 166 |
gh_patches_debug_12432 | rasdani/github-patches | git_diff | pyca__cryptography-8403 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ignore FRP256v1 in wycheproof tests
</issue>
<code>
[start of src/cryptography/hazmat/backends/openssl/utils.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 import typing
6
7 from cryptography.hazmat.primitives import hashes
8 from cryptography.hazmat.primitives.asymmetric.utils import Prehashed
9
10 if typing.TYPE_CHECKING:
11 from cryptography.hazmat.backends.openssl.backend import Backend
12
13
14 def _evp_pkey_derive(backend: "Backend", evp_pkey, peer_public_key) -> bytes:
15 ctx = backend._lib.EVP_PKEY_CTX_new(evp_pkey, backend._ffi.NULL)
16 backend.openssl_assert(ctx != backend._ffi.NULL)
17 ctx = backend._ffi.gc(ctx, backend._lib.EVP_PKEY_CTX_free)
18 res = backend._lib.EVP_PKEY_derive_init(ctx)
19 backend.openssl_assert(res == 1)
20 res = backend._lib.EVP_PKEY_derive_set_peer(ctx, peer_public_key._evp_pkey)
21 backend.openssl_assert(res == 1)
22 keylen = backend._ffi.new("size_t *")
23 res = backend._lib.EVP_PKEY_derive(ctx, backend._ffi.NULL, keylen)
24 backend.openssl_assert(res == 1)
25 backend.openssl_assert(keylen[0] > 0)
26 buf = backend._ffi.new("unsigned char[]", keylen[0])
27 res = backend._lib.EVP_PKEY_derive(ctx, buf, keylen)
28 if res != 1:
29 errors_with_text = backend._consume_errors_with_text()
30 raise ValueError("Error computing shared key.", errors_with_text)
31
32 return backend._ffi.buffer(buf, keylen[0])[:]
33
34
35 def _calculate_digest_and_algorithm(
36 data: bytes,
37 algorithm: typing.Union[Prehashed, hashes.HashAlgorithm],
38 ) -> typing.Tuple[bytes, hashes.HashAlgorithm]:
39 if not isinstance(algorithm, Prehashed):
40 hash_ctx = hashes.Hash(algorithm)
41 hash_ctx.update(data)
42 data = hash_ctx.finalize()
43 else:
44 algorithm = algorithm._algorithm
45
46 if len(data) != algorithm.digest_size:
47 raise ValueError(
48 "The provided data must be the same length as the hash "
49 "algorithm's digest size."
50 )
51
52 return (data, algorithm)
53
[end of src/cryptography/hazmat/backends/openssl/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/backends/openssl/utils.py b/src/cryptography/hazmat/backends/openssl/utils.py
--- a/src/cryptography/hazmat/backends/openssl/utils.py
+++ b/src/cryptography/hazmat/backends/openssl/utils.py
@@ -18,7 +18,10 @@
res = backend._lib.EVP_PKEY_derive_init(ctx)
backend.openssl_assert(res == 1)
res = backend._lib.EVP_PKEY_derive_set_peer(ctx, peer_public_key._evp_pkey)
- backend.openssl_assert(res == 1)
+ if res != 1:
+ errors_with_text = backend._consume_errors_with_text()
+ raise ValueError("Error computing shared key.", errors_with_text)
+
keylen = backend._ffi.new("size_t *")
res = backend._lib.EVP_PKEY_derive(ctx, backend._ffi.NULL, keylen)
backend.openssl_assert(res == 1)
| {"golden_diff": "diff --git a/src/cryptography/hazmat/backends/openssl/utils.py b/src/cryptography/hazmat/backends/openssl/utils.py\n--- a/src/cryptography/hazmat/backends/openssl/utils.py\n+++ b/src/cryptography/hazmat/backends/openssl/utils.py\n@@ -18,7 +18,10 @@\n res = backend._lib.EVP_PKEY_derive_init(ctx)\n backend.openssl_assert(res == 1)\n res = backend._lib.EVP_PKEY_derive_set_peer(ctx, peer_public_key._evp_pkey)\n- backend.openssl_assert(res == 1)\n+ if res != 1:\n+ errors_with_text = backend._consume_errors_with_text()\n+ raise ValueError(\"Error computing shared key.\", errors_with_text)\n+\n keylen = backend._ffi.new(\"size_t *\")\n res = backend._lib.EVP_PKEY_derive(ctx, backend._ffi.NULL, keylen)\n backend.openssl_assert(res == 1)\n", "issue": "ignore FRP256v1 in wycheproof tests\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport typing\n\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric.utils import Prehashed\n\nif typing.TYPE_CHECKING:\n from cryptography.hazmat.backends.openssl.backend import Backend\n\n\ndef _evp_pkey_derive(backend: \"Backend\", evp_pkey, peer_public_key) -> bytes:\n ctx = backend._lib.EVP_PKEY_CTX_new(evp_pkey, backend._ffi.NULL)\n backend.openssl_assert(ctx != backend._ffi.NULL)\n ctx = backend._ffi.gc(ctx, backend._lib.EVP_PKEY_CTX_free)\n res = backend._lib.EVP_PKEY_derive_init(ctx)\n backend.openssl_assert(res == 1)\n res = backend._lib.EVP_PKEY_derive_set_peer(ctx, peer_public_key._evp_pkey)\n backend.openssl_assert(res == 1)\n keylen = backend._ffi.new(\"size_t *\")\n res = backend._lib.EVP_PKEY_derive(ctx, backend._ffi.NULL, keylen)\n backend.openssl_assert(res == 1)\n backend.openssl_assert(keylen[0] > 0)\n buf = backend._ffi.new(\"unsigned char[]\", keylen[0])\n res = backend._lib.EVP_PKEY_derive(ctx, buf, keylen)\n if res != 1:\n errors_with_text = backend._consume_errors_with_text()\n raise ValueError(\"Error computing shared key.\", errors_with_text)\n\n return backend._ffi.buffer(buf, keylen[0])[:]\n\n\ndef _calculate_digest_and_algorithm(\n data: bytes,\n algorithm: typing.Union[Prehashed, hashes.HashAlgorithm],\n) -> typing.Tuple[bytes, hashes.HashAlgorithm]:\n if not isinstance(algorithm, Prehashed):\n hash_ctx = hashes.Hash(algorithm)\n hash_ctx.update(data)\n data = hash_ctx.finalize()\n else:\n algorithm = algorithm._algorithm\n\n if len(data) != algorithm.digest_size:\n raise ValueError(\n \"The provided data must be the same length as the hash \"\n \"algorithm's digest size.\"\n )\n\n return (data, algorithm)\n", "path": "src/cryptography/hazmat/backends/openssl/utils.py"}]} | 1,166 | 216 |
gh_patches_debug_331 | rasdani/github-patches | git_diff | InternLM__lmdeploy-205 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Serving gradio报错

报错: no module named lmdeploy.serve.gradio
环境里已安装了lmdeploy 0.0.2 python包。
</issue>
<code>
[start of lmdeploy/version.py]
1 # Copyright (c) OpenMMLab. All rights reserved.
2 from typing import Tuple
3
4 __version__ = '0.0.2'
5 short_version = __version__
6
7
8 def parse_version_info(version_str: str) -> Tuple:
9 """Parse version from a string.
10
11 Args:
12 version_str (str): A string represents a version info.
13
14 Returns:
15 tuple: A sequence of integer and string represents version.
16 """
17 _version_info = []
18 for x in version_str.split('.'):
19 if x.isdigit():
20 _version_info.append(int(x))
21 elif x.find('rc') != -1:
22 patch_version = x.split('rc')
23 _version_info.append(int(patch_version[0]))
24 _version_info.append(f'rc{patch_version[1]}')
25 return tuple(_version_info)
26
27
28 version_info = parse_version_info(__version__)
29
30 __all__ = ['__version__', 'version_info', 'parse_version_info']
31
[end of lmdeploy/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lmdeploy/version.py b/lmdeploy/version.py
--- a/lmdeploy/version.py
+++ b/lmdeploy/version.py
@@ -1,7 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
-__version__ = '0.0.2'
+__version__ = '0.0.3'
short_version = __version__
| {"golden_diff": "diff --git a/lmdeploy/version.py b/lmdeploy/version.py\n--- a/lmdeploy/version.py\n+++ b/lmdeploy/version.py\n@@ -1,7 +1,7 @@\n # Copyright (c) OpenMMLab. All rights reserved.\n from typing import Tuple\n \n-__version__ = '0.0.2'\n+__version__ = '0.0.3'\n short_version = __version__\n", "issue": "Serving gradio\u62a5\u9519\n\r\n\r\n\u62a5\u9519: no module named lmdeploy.serve.gradio\r\n\r\n\u73af\u5883\u91cc\u5df2\u5b89\u88c5\u4e86lmdeploy 0.0.2 python\u5305\u3002\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple\n\n__version__ = '0.0.2'\nshort_version = __version__\n\n\ndef parse_version_info(version_str: str) -> Tuple:\n \"\"\"Parse version from a string.\n\n Args:\n version_str (str): A string represents a version info.\n\n Returns:\n tuple: A sequence of integer and string represents version.\n \"\"\"\n _version_info = []\n for x in version_str.split('.'):\n if x.isdigit():\n _version_info.append(int(x))\n elif x.find('rc') != -1:\n patch_version = x.split('rc')\n _version_info.append(int(patch_version[0]))\n _version_info.append(f'rc{patch_version[1]}')\n return tuple(_version_info)\n\n\nversion_info = parse_version_info(__version__)\n\n__all__ = ['__version__', 'version_info', 'parse_version_info']\n", "path": "lmdeploy/version.py"}]} | 880 | 92 |
gh_patches_debug_935 | rasdani/github-patches | git_diff | e-valuation__EvaP-817 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
+x on update.sh, earlier apache restart
update_production.sh is missing the x bit, also because of the cache clearing the apache is restarted 2min after the code has changed.
</issue>
<code>
[start of evap/evaluation/management/commands/refresh_results_cache.py]
1 from django.core.management.base import BaseCommand
2 from django.core.serializers.base import ProgressBar
3 from django.core.cache import cache
4
5 from evap.evaluation.models import Course
6 from evap.evaluation.tools import calculate_results
7
8
9 class Command(BaseCommand):
10 args = ''
11 help = 'Clears the cache and pre-warms it with the results of all courses'
12
13 def handle(self, *args, **options):
14 self.stdout.write("Clearing cache...")
15 cache.clear()
16 total_count = Course.objects.count()
17
18 self.stdout.write("Calculating results for all courses...")
19
20 self.stdout.ending = None
21 progress_bar = ProgressBar(self.stdout, total_count)
22
23 for counter, course in enumerate(Course.objects.all()):
24 progress_bar.update(counter + 1)
25 calculate_results(course)
26
27 self.stdout.write("Done with updating cache.\n")
28
[end of evap/evaluation/management/commands/refresh_results_cache.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/evaluation/management/commands/refresh_results_cache.py b/evap/evaluation/management/commands/refresh_results_cache.py
--- a/evap/evaluation/management/commands/refresh_results_cache.py
+++ b/evap/evaluation/management/commands/refresh_results_cache.py
@@ -24,4 +24,4 @@
progress_bar.update(counter + 1)
calculate_results(course)
- self.stdout.write("Done with updating cache.\n")
+ self.stdout.write("Results cache has been refreshed.\n")
| {"golden_diff": "diff --git a/evap/evaluation/management/commands/refresh_results_cache.py b/evap/evaluation/management/commands/refresh_results_cache.py\n--- a/evap/evaluation/management/commands/refresh_results_cache.py\n+++ b/evap/evaluation/management/commands/refresh_results_cache.py\n@@ -24,4 +24,4 @@\n progress_bar.update(counter + 1)\n calculate_results(course)\n \n- self.stdout.write(\"Done with updating cache.\\n\")\n+ self.stdout.write(\"Results cache has been refreshed.\\n\")\n", "issue": "+x on update.sh, earlier apache restart\nupdate_production.sh is missing the x bit, also because of the cache clearing the apache is restarted 2min after the code has changed.\n\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\nfrom django.core.serializers.base import ProgressBar\nfrom django.core.cache import cache\n\nfrom evap.evaluation.models import Course\nfrom evap.evaluation.tools import calculate_results\n\n\nclass Command(BaseCommand):\n args = ''\n help = 'Clears the cache and pre-warms it with the results of all courses'\n\n def handle(self, *args, **options):\n self.stdout.write(\"Clearing cache...\")\n cache.clear()\n total_count = Course.objects.count()\n\n self.stdout.write(\"Calculating results for all courses...\")\n\n self.stdout.ending = None\n progress_bar = ProgressBar(self.stdout, total_count)\n\n for counter, course in enumerate(Course.objects.all()):\n progress_bar.update(counter + 1)\n calculate_results(course)\n\n self.stdout.write(\"Done with updating cache.\\n\")\n", "path": "evap/evaluation/management/commands/refresh_results_cache.py"}]} | 818 | 123 |
gh_patches_debug_7052 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2712 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/context/process_group_initializer/initializer_sequence.py]
1 #!/usr/bin/env python
2 # -*- encoding: utf-8 -*-
3 import torch.distributed as dist
4
5 from colossalai.registry import DIST_GROUP_INITIALIZER
6 from .initializer_tensor import Initializer_Tensor
7 from .process_group_initializer import ProcessGroupInitializer
8 from ..parallel_mode import ParallelMode
9
10
11 @DIST_GROUP_INITIALIZER.register_module
12 class Initializer_Sequence_DP(ProcessGroupInitializer):
13 """A ProcessGroupInitializer for sequence parallelism all-reduce.
14
15 In Sequence Parallelism, each GPU holds the full copy of model weights,
16 thus, gradient all-reduce occurs across all processes in the same pipeline stage
17
18 Args:
19 rank (int): The rank of current process
20 world_size (int): Size of whole communication world
21 config (Config): Running configuration
22 data_parallel_size (int): Size of data parallel
23 pipeline_parallel_size (int): Size of pipeline parallel
24 tensor_parallel_size (int): Size of tensor parallel
25 """
26
27 def __init__(self, *args, **kwargs):
28 super().__init__(*args, **kwargs)
29 self.dp_size = self.world_size // self.pipeline_parallel_size
30 self.num_group = self.pipeline_parallel_size
31
32 def init_dist_group(self):
33 """Initialize Sequence Parallel process groups used for gradient all-reduce.
34
35 Returns:
36 Tuple: A tuple (local_rank, group_world_size, process_group, ranks_in_group, mode).
37 """
38 local_rank = None
39 ranks_in_group = None
40 process_group = None
41 cpu_group = None
42 group_world_size = None
43 mode = ParallelMode.SEQUENCE_DP
44
45 for i in range(self.num_group):
46 ranks = [i * self.dp_size + j for j in range(self.dp_size)]
47 group = dist.new_group(ranks)
48 group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group
49
50 if self.rank in ranks:
51 local_rank = ranks.index(self.rank)
52 group_world_size = len(ranks)
53 process_group = group
54 cpu_group = group_cpu
55 ranks_in_group = ranks
56
57 return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode
58
59
60 @DIST_GROUP_INITIALIZER.register_module
61 class Initializer_Sequence(ProcessGroupInitializer):
62 """A ProcessGroupInitializer for sequence parallelism.
63
64 Args:
65 rank (int): The rank of current process.
66 world_size (int): Size of whole communication world.
67 config (Config): Running configuration.
68 data_parallel_size (int): Size of data parallel.
69 pipeline_parallel_size (int): Size of pipeline parallel.
70 tensor_parallel_size (int): Size of tensor parallel.
71 """
72
73 def __init__(self, *args, **kwargs):
74 super().__init__(*args, **kwargs)
75 # reuse tensor parallel initializer code
76 self._sequence_initializer = Initializer_Tensor(*args, **kwargs)
77 self._sequence_dp_initializer = Initializer_Sequence_DP(*args, **kwargs)
78
79 def init_dist_group(self):
80 """Initialize Sequence parallel process groups and assign local_ranks and groups to each gpu.
81
82 Sequence parallelism requires 2 process groups. The first is for model forward where several processes
83 exchange partial query, key and value embedding to compute self attention values. The second is for
84 all-reduce to synchronize the model parameters.
85
86 Returns:
87 List[Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode)]:
88 A Sequence parallelism's information in list of tuples.
89 """
90
91 parallel_setting = []
92
93 local_rank, group_world_size, process_group, cpu_grop, ranks_in_group, mode = \
94 self._sequence_initializer.init_dist_group()
95 # change mode to sequence
96 mode = ParallelMode.SEQUENCE
97
98 parallel_setting.append((local_rank, group_world_size, process_group, cpu_grop, ranks_in_group, mode))
99 parallel_setting.append(self._sequence_dp_initializer.init_dist_group())
100 return parallel_setting
101
[end of colossalai/context/process_group_initializer/initializer_sequence.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/context/process_group_initializer/initializer_sequence.py b/colossalai/context/process_group_initializer/initializer_sequence.py
--- a/colossalai/context/process_group_initializer/initializer_sequence.py
+++ b/colossalai/context/process_group_initializer/initializer_sequence.py
@@ -3,9 +3,10 @@
import torch.distributed as dist
from colossalai.registry import DIST_GROUP_INITIALIZER
+
+from ..parallel_mode import ParallelMode
from .initializer_tensor import Initializer_Tensor
from .process_group_initializer import ProcessGroupInitializer
-from ..parallel_mode import ParallelMode
@DIST_GROUP_INITIALIZER.register_module
| {"golden_diff": "diff --git a/colossalai/context/process_group_initializer/initializer_sequence.py b/colossalai/context/process_group_initializer/initializer_sequence.py\n--- a/colossalai/context/process_group_initializer/initializer_sequence.py\n+++ b/colossalai/context/process_group_initializer/initializer_sequence.py\n@@ -3,9 +3,10 @@\n import torch.distributed as dist\n \n from colossalai.registry import DIST_GROUP_INITIALIZER\n+\n+from ..parallel_mode import ParallelMode\n from .initializer_tensor import Initializer_Tensor\n from .process_group_initializer import ProcessGroupInitializer\n-from ..parallel_mode import ParallelMode\n \n \n @DIST_GROUP_INITIALIZER.register_module\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nimport torch.distributed as dist\n\nfrom colossalai.registry import DIST_GROUP_INITIALIZER\nfrom .initializer_tensor import Initializer_Tensor\nfrom .process_group_initializer import ProcessGroupInitializer\nfrom ..parallel_mode import ParallelMode\n\n\n@DIST_GROUP_INITIALIZER.register_module\nclass Initializer_Sequence_DP(ProcessGroupInitializer):\n \"\"\"A ProcessGroupInitializer for sequence parallelism all-reduce.\n\n In Sequence Parallelism, each GPU holds the full copy of model weights,\n thus, gradient all-reduce occurs across all processes in the same pipeline stage\n\n Args:\n rank (int): The rank of current process\n world_size (int): Size of whole communication world\n config (Config): Running configuration\n data_parallel_size (int): Size of data parallel\n pipeline_parallel_size (int): Size of pipeline parallel\n tensor_parallel_size (int): Size of tensor parallel\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.dp_size = self.world_size // self.pipeline_parallel_size\n self.num_group = self.pipeline_parallel_size\n\n def init_dist_group(self):\n \"\"\"Initialize Sequence Parallel process groups used for gradient all-reduce.\n\n Returns:\n Tuple: A tuple (local_rank, group_world_size, process_group, ranks_in_group, mode).\n \"\"\"\n local_rank = None\n ranks_in_group = None\n process_group = None\n cpu_group = None\n group_world_size = None\n mode = ParallelMode.SEQUENCE_DP\n\n for i in range(self.num_group):\n ranks = [i * self.dp_size + j for j in range(self.dp_size)]\n group = dist.new_group(ranks)\n group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group\n\n if self.rank in ranks:\n local_rank = ranks.index(self.rank)\n group_world_size = len(ranks)\n process_group = group\n cpu_group = group_cpu\n ranks_in_group = ranks\n\n return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode\n\n\n@DIST_GROUP_INITIALIZER.register_module\nclass Initializer_Sequence(ProcessGroupInitializer):\n \"\"\"A ProcessGroupInitializer for sequence parallelism.\n\n Args:\n rank (int): The rank of current process.\n world_size (int): Size of whole communication world.\n config (Config): Running configuration.\n data_parallel_size (int): Size of data parallel.\n pipeline_parallel_size (int): Size of pipeline parallel.\n tensor_parallel_size (int): Size of tensor parallel.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # reuse tensor parallel initializer code\n self._sequence_initializer = Initializer_Tensor(*args, **kwargs)\n self._sequence_dp_initializer = Initializer_Sequence_DP(*args, **kwargs)\n\n def init_dist_group(self):\n \"\"\"Initialize Sequence parallel process groups and assign local_ranks and groups to each gpu.\n\n Sequence parallelism requires 2 process groups. The first is for model forward where several processes\n exchange partial query, key and value embedding to compute self attention values. The second is for\n all-reduce to synchronize the model parameters.\n\n Returns:\n List[Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode)]:\n A Sequence parallelism's information in list of tuples.\n \"\"\"\n\n parallel_setting = []\n\n local_rank, group_world_size, process_group, cpu_grop, ranks_in_group, mode = \\\n self._sequence_initializer.init_dist_group()\n # change mode to sequence\n mode = ParallelMode.SEQUENCE\n\n parallel_setting.append((local_rank, group_world_size, process_group, cpu_grop, ranks_in_group, mode))\n parallel_setting.append(self._sequence_dp_initializer.init_dist_group())\n return parallel_setting\n", "path": "colossalai/context/process_group_initializer/initializer_sequence.py"}]} | 1,620 | 134 |
gh_patches_debug_10437 | rasdani/github-patches | git_diff | Project-MONAI__MONAI-2060 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ignore `label` in the workflow if it's not tensor
**Is your feature request related to a problem? Please describe.**
During evaluation, now we use `prepare_batch` to extract `image` and `label` fields if existing the key. But maybe we don't want to load `label` for inference and didn't apply transforms, so need to ignore it for this case.
</issue>
<code>
[start of monai/engines/utils.py]
1 # Copyright 2020 - 2021 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Tuple, Union
13
14 import torch
15
16 from monai.utils import exact_version, optional_import
17 from monai.utils.enums import CommonKeys
18
19 if TYPE_CHECKING:
20 from ignite.engine import EventEnum
21 else:
22 EventEnum, _ = optional_import("ignite.engine", "0.4.4", exact_version, "EventEnum")
23
24 __all__ = [
25 "IterationEvents",
26 "GanKeys",
27 "get_devices_spec",
28 "default_prepare_batch",
29 "default_make_latent",
30 ]
31
32
33 class IterationEvents(EventEnum):
34 """
35 Additional Events engine can register and trigger in the iteration process.
36 Refer to the example in ignite: https://github.com/pytorch/ignite/blob/master/ignite/engine/events.py#L146
37 These Events can be triggered during training iteration:
38 `FORWARD_COMPLETED` is the Event when `network(image, label)` completed.
39 `LOSS_COMPLETED` is the Event when `loss(pred, label)` completed.
40 `BACKWARD_COMPLETED` is the Event when `loss.backward()` completed.
41 `MODEL_COMPLETED` is the Event when all the model related operations completed.
42
43 """
44
45 FORWARD_COMPLETED = "forward_completed"
46 LOSS_COMPLETED = "loss_completed"
47 BACKWARD_COMPLETED = "backward_completed"
48 MODEL_COMPLETED = "model_completed"
49
50
51 class GanKeys:
52 """
53 A set of common keys for generative adversarial networks.
54
55 """
56
57 REALS = "reals"
58 FAKES = "fakes"
59 LATENTS = "latents"
60 GLOSS = "g_loss"
61 DLOSS = "d_loss"
62
63
64 def get_devices_spec(devices: Optional[Sequence[torch.device]] = None) -> List[torch.device]:
65 """
66 Get a valid specification for one or more devices. If `devices` is None get devices for all CUDA devices available.
67 If `devices` is and zero-length structure a single CPU compute device is returned. In any other cases `devices` is
68 returned unchanged.
69
70 Args:
71 devices: list of devices to request, None for all GPU devices, [] for CPU.
72
73 Raises:
74 RuntimeError: When all GPUs are selected (``devices=None``) but no GPUs are available.
75
76 Returns:
77 list of torch.device: list of devices.
78
79 """
80 if devices is None:
81 devices = [torch.device(f"cuda:{d:d}") for d in range(torch.cuda.device_count())]
82
83 if len(devices) == 0:
84 raise RuntimeError("No GPU devices available.")
85
86 elif len(devices) == 0:
87 devices = [torch.device("cpu")]
88
89 else:
90 devices = list(devices)
91
92 return devices
93
94
95 def default_prepare_batch(
96 batchdata: Dict[str, torch.Tensor],
97 device: Optional[Union[str, torch.device]] = None,
98 non_blocking: bool = False,
99 ) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], torch.Tensor]:
100 """
101 Default function to prepare the data for current iteration.
102 Refer to ignite: https://github.com/pytorch/ignite/blob/v0.4.2/ignite/engine/__init__.py#L28.
103
104 Returns:
105 image, label(optional).
106
107 """
108 if not isinstance(batchdata, dict):
109 raise AssertionError("default prepare_batch expects dictionary input data.")
110 if CommonKeys.LABEL in batchdata:
111 return (
112 batchdata[CommonKeys.IMAGE].to(device=device, non_blocking=non_blocking),
113 batchdata[CommonKeys.LABEL].to(device=device, non_blocking=non_blocking),
114 )
115 if GanKeys.REALS in batchdata:
116 return batchdata[GanKeys.REALS].to(device=device, non_blocking=non_blocking)
117 return batchdata[CommonKeys.IMAGE].to(device=device, non_blocking=non_blocking), None
118
119
120 def default_make_latent(
121 num_latents: int,
122 latent_size: int,
123 device: Optional[Union[str, torch.device]] = None,
124 non_blocking: bool = False,
125 ) -> torch.Tensor:
126 return torch.randn(num_latents, latent_size).to(device=device, non_blocking=non_blocking)
127
[end of monai/engines/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/monai/engines/utils.py b/monai/engines/utils.py
--- a/monai/engines/utils.py
+++ b/monai/engines/utils.py
@@ -107,7 +107,7 @@
"""
if not isinstance(batchdata, dict):
raise AssertionError("default prepare_batch expects dictionary input data.")
- if CommonKeys.LABEL in batchdata:
+ if isinstance(batchdata.get(CommonKeys.LABEL, None), torch.Tensor):
return (
batchdata[CommonKeys.IMAGE].to(device=device, non_blocking=non_blocking),
batchdata[CommonKeys.LABEL].to(device=device, non_blocking=non_blocking),
| {"golden_diff": "diff --git a/monai/engines/utils.py b/monai/engines/utils.py\n--- a/monai/engines/utils.py\n+++ b/monai/engines/utils.py\n@@ -107,7 +107,7 @@\n \"\"\"\n if not isinstance(batchdata, dict):\n raise AssertionError(\"default prepare_batch expects dictionary input data.\")\n- if CommonKeys.LABEL in batchdata:\n+ if isinstance(batchdata.get(CommonKeys.LABEL, None), torch.Tensor):\n return (\n batchdata[CommonKeys.IMAGE].to(device=device, non_blocking=non_blocking),\n batchdata[CommonKeys.LABEL].to(device=device, non_blocking=non_blocking),\n", "issue": "ignore `label` in the workflow if it's not tensor\n**Is your feature request related to a problem? Please describe.**\r\nDuring evaluation, now we use `prepare_batch` to extract `image` and `label` fields if existing the key. But maybe we don't want to load `label` for inference and didn't apply transforms, so need to ignore it for this case.\r\n\n", "before_files": [{"content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Tuple, Union\n\nimport torch\n\nfrom monai.utils import exact_version, optional_import\nfrom monai.utils.enums import CommonKeys\n\nif TYPE_CHECKING:\n from ignite.engine import EventEnum\nelse:\n EventEnum, _ = optional_import(\"ignite.engine\", \"0.4.4\", exact_version, \"EventEnum\")\n\n__all__ = [\n \"IterationEvents\",\n \"GanKeys\",\n \"get_devices_spec\",\n \"default_prepare_batch\",\n \"default_make_latent\",\n]\n\n\nclass IterationEvents(EventEnum):\n \"\"\"\n Additional Events engine can register and trigger in the iteration process.\n Refer to the example in ignite: https://github.com/pytorch/ignite/blob/master/ignite/engine/events.py#L146\n These Events can be triggered during training iteration:\n `FORWARD_COMPLETED` is the Event when `network(image, label)` completed.\n `LOSS_COMPLETED` is the Event when `loss(pred, label)` completed.\n `BACKWARD_COMPLETED` is the Event when `loss.backward()` completed.\n `MODEL_COMPLETED` is the Event when all the model related operations completed.\n\n \"\"\"\n\n FORWARD_COMPLETED = \"forward_completed\"\n LOSS_COMPLETED = \"loss_completed\"\n BACKWARD_COMPLETED = \"backward_completed\"\n MODEL_COMPLETED = \"model_completed\"\n\n\nclass GanKeys:\n \"\"\"\n A set of common keys for generative adversarial networks.\n\n \"\"\"\n\n REALS = \"reals\"\n FAKES = \"fakes\"\n LATENTS = \"latents\"\n GLOSS = \"g_loss\"\n DLOSS = \"d_loss\"\n\n\ndef get_devices_spec(devices: Optional[Sequence[torch.device]] = None) -> List[torch.device]:\n \"\"\"\n Get a valid specification for one or more devices. If `devices` is None get devices for all CUDA devices available.\n If `devices` is and zero-length structure a single CPU compute device is returned. In any other cases `devices` is\n returned unchanged.\n\n Args:\n devices: list of devices to request, None for all GPU devices, [] for CPU.\n\n Raises:\n RuntimeError: When all GPUs are selected (``devices=None``) but no GPUs are available.\n\n Returns:\n list of torch.device: list of devices.\n\n \"\"\"\n if devices is None:\n devices = [torch.device(f\"cuda:{d:d}\") for d in range(torch.cuda.device_count())]\n\n if len(devices) == 0:\n raise RuntimeError(\"No GPU devices available.\")\n\n elif len(devices) == 0:\n devices = [torch.device(\"cpu\")]\n\n else:\n devices = list(devices)\n\n return devices\n\n\ndef default_prepare_batch(\n batchdata: Dict[str, torch.Tensor],\n device: Optional[Union[str, torch.device]] = None,\n non_blocking: bool = False,\n) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], torch.Tensor]:\n \"\"\"\n Default function to prepare the data for current iteration.\n Refer to ignite: https://github.com/pytorch/ignite/blob/v0.4.2/ignite/engine/__init__.py#L28.\n\n Returns:\n image, label(optional).\n\n \"\"\"\n if not isinstance(batchdata, dict):\n raise AssertionError(\"default prepare_batch expects dictionary input data.\")\n if CommonKeys.LABEL in batchdata:\n return (\n batchdata[CommonKeys.IMAGE].to(device=device, non_blocking=non_blocking),\n batchdata[CommonKeys.LABEL].to(device=device, non_blocking=non_blocking),\n )\n if GanKeys.REALS in batchdata:\n return batchdata[GanKeys.REALS].to(device=device, non_blocking=non_blocking)\n return batchdata[CommonKeys.IMAGE].to(device=device, non_blocking=non_blocking), None\n\n\ndef default_make_latent(\n num_latents: int,\n latent_size: int,\n device: Optional[Union[str, torch.device]] = None,\n non_blocking: bool = False,\n) -> torch.Tensor:\n return torch.randn(num_latents, latent_size).to(device=device, non_blocking=non_blocking)\n", "path": "monai/engines/utils.py"}]} | 1,925 | 150 |
gh_patches_debug_33202 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-439 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TracePlugin not working for bottle + python 2.7.x
Hi, we have a backend using python 2.7.x, Im integrating Datadog APM following the documentation and I have the following error:
```
Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/bottle.py", line 764, in _handle
return route.call(**args)
File "/usr/local/lib/python2.7/site-packages/bottle.py", line 1575, in wrapper
rv = callback(*a, **ka)
File "/usr/local/lib/python2.7/site-packages/ddtrace/contrib/bottle/trace.py", line 32, in wrapped
resource = "%s %s" % (request.method, request.route.rule)
File "/usr/local/lib/python2.7/site-packages/bottle.py", line 1237, in __getattr__
raise AttributeError('Attribute %r not defined.' % name)
```
when I go to trace.py line 32, I find this:
`resource = "%s %s" % (request.method, request.route.rule)`
somehow request.route doesn't exist, I think that probably is related to a different python or bottle version being used on our end.
Anyway `route` is already provided as a parameter for this method (`TracePlugin.apply(self, callback, route):`) so there's no need to call `request.route.rule`, you can call `route.rule` instead,
`resource = "%s %s" % (request.method, route.rule)`
Otherwise let me know if you have more details about this error and what's the right way to solve it,
thanks
</issue>
<code>
[start of ddtrace/contrib/bottle/trace.py]
1
2 # 3p
3 from bottle import response, request
4
5 # stdlib
6 import ddtrace
7 from ddtrace.ext import http, AppTypes
8
9 # project
10 from ...propagation.http import HTTPPropagator
11
12 class TracePlugin(object):
13
14 name = 'trace'
15 api = 2
16
17 def __init__(self, service="bottle", tracer=None, distributed_tracing=None):
18 self.service = service
19 self.tracer = tracer or ddtrace.tracer
20 self.tracer.set_service_info(
21 service=service,
22 app="bottle",
23 app_type=AppTypes.web)
24 self.distributed_tracing = distributed_tracing
25
26 def apply(self, callback, route):
27
28 def wrapped(*args, **kwargs):
29 if not self.tracer or not self.tracer.enabled:
30 return callback(*args, **kwargs)
31
32 resource = "%s %s" % (request.method, request.route.rule)
33
34 # Propagate headers such as x-datadog-trace-id.
35 if self.distributed_tracing:
36 propagator = HTTPPropagator()
37 context = propagator.extract(request.headers)
38 if context.trace_id:
39 self.tracer.context_provider.activate(context)
40
41 with self.tracer.trace("bottle.request", service=self.service, resource=resource) as s:
42 code = 0
43 try:
44 return callback(*args, **kwargs)
45 except Exception:
46 # bottle doesn't always translate unhandled exceptions, so
47 # we mark it here.
48 code = 500
49 raise
50 finally:
51 s.set_tag(http.STATUS_CODE, code or response.status_code)
52 s.set_tag(http.URL, request.path)
53 s.set_tag(http.METHOD, request.method)
54
55 return wrapped
56
[end of ddtrace/contrib/bottle/trace.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/contrib/bottle/trace.py b/ddtrace/contrib/bottle/trace.py
--- a/ddtrace/contrib/bottle/trace.py
+++ b/ddtrace/contrib/bottle/trace.py
@@ -1,4 +1,3 @@
-
# 3p
from bottle import response, request
@@ -10,18 +9,18 @@
from ...propagation.http import HTTPPropagator
class TracePlugin(object):
-
name = 'trace'
api = 2
- def __init__(self, service="bottle", tracer=None, distributed_tracing=None):
+ def __init__(self, service='bottle', tracer=None, distributed_tracing=None):
self.service = service
self.tracer = tracer or ddtrace.tracer
+ self.distributed_tracing = distributed_tracing
self.tracer.set_service_info(
service=service,
- app="bottle",
- app_type=AppTypes.web)
- self.distributed_tracing = distributed_tracing
+ app='bottle',
+ app_type=AppTypes.web,
+ )
def apply(self, callback, route):
@@ -29,7 +28,7 @@
if not self.tracer or not self.tracer.enabled:
return callback(*args, **kwargs)
- resource = "%s %s" % (request.method, request.route.rule)
+ resource = '{} {}'.format(request.method, route.rule)
# Propagate headers such as x-datadog-trace-id.
if self.distributed_tracing:
@@ -38,7 +37,7 @@
if context.trace_id:
self.tracer.context_provider.activate(context)
- with self.tracer.trace("bottle.request", service=self.service, resource=resource) as s:
+ with self.tracer.trace('bottle.request', service=self.service, resource=resource) as s:
code = 0
try:
return callback(*args, **kwargs)
| {"golden_diff": "diff --git a/ddtrace/contrib/bottle/trace.py b/ddtrace/contrib/bottle/trace.py\n--- a/ddtrace/contrib/bottle/trace.py\n+++ b/ddtrace/contrib/bottle/trace.py\n@@ -1,4 +1,3 @@\n-\n # 3p\n from bottle import response, request\n \n@@ -10,18 +9,18 @@\n from ...propagation.http import HTTPPropagator\n \n class TracePlugin(object):\n-\n name = 'trace'\n api = 2\n \n- def __init__(self, service=\"bottle\", tracer=None, distributed_tracing=None):\n+ def __init__(self, service='bottle', tracer=None, distributed_tracing=None):\n self.service = service\n self.tracer = tracer or ddtrace.tracer\n+ self.distributed_tracing = distributed_tracing\n self.tracer.set_service_info(\n service=service,\n- app=\"bottle\",\n- app_type=AppTypes.web)\n- self.distributed_tracing = distributed_tracing\n+ app='bottle',\n+ app_type=AppTypes.web,\n+ )\n \n def apply(self, callback, route):\n \n@@ -29,7 +28,7 @@\n if not self.tracer or not self.tracer.enabled:\n return callback(*args, **kwargs)\n \n- resource = \"%s %s\" % (request.method, request.route.rule)\n+ resource = '{} {}'.format(request.method, route.rule)\n \n # Propagate headers such as x-datadog-trace-id.\n if self.distributed_tracing:\n@@ -38,7 +37,7 @@\n if context.trace_id:\n self.tracer.context_provider.activate(context)\n \n- with self.tracer.trace(\"bottle.request\", service=self.service, resource=resource) as s:\n+ with self.tracer.trace('bottle.request', service=self.service, resource=resource) as s:\n code = 0\n try:\n return callback(*args, **kwargs)\n", "issue": "TracePlugin not working for bottle + python 2.7.x\nHi, we have a backend using python 2.7.x, Im integrating Datadog APM following the documentation and I have the following error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python2.7/site-packages/bottle.py\", line 764, in _handle\r\n return route.call(**args)\r\n File \"/usr/local/lib/python2.7/site-packages/bottle.py\", line 1575, in wrapper\r\n rv = callback(*a, **ka)\r\n File \"/usr/local/lib/python2.7/site-packages/ddtrace/contrib/bottle/trace.py\", line 32, in wrapped\r\n resource = \"%s %s\" % (request.method, request.route.rule)\r\n File \"/usr/local/lib/python2.7/site-packages/bottle.py\", line 1237, in __getattr__\r\n raise AttributeError('Attribute %r not defined.' % name)\r\n```\r\n\r\nwhen I go to trace.py line 32, I find this:\r\n`resource = \"%s %s\" % (request.method, request.route.rule)`\r\n\r\nsomehow request.route doesn't exist, I think that probably is related to a different python or bottle version being used on our end.\r\nAnyway `route` is already provided as a parameter for this method (`TracePlugin.apply(self, callback, route):`) so there's no need to call `request.route.rule`, you can call `route.rule` instead,\r\n`resource = \"%s %s\" % (request.method, route.rule)`\r\n\r\nOtherwise let me know if you have more details about this error and what's the right way to solve it,\r\nthanks\r\n\n", "before_files": [{"content": "\n# 3p\nfrom bottle import response, request\n\n# stdlib\nimport ddtrace\nfrom ddtrace.ext import http, AppTypes\n\n# project\nfrom ...propagation.http import HTTPPropagator\n\nclass TracePlugin(object):\n\n name = 'trace'\n api = 2\n\n def __init__(self, service=\"bottle\", tracer=None, distributed_tracing=None):\n self.service = service\n self.tracer = tracer or ddtrace.tracer\n self.tracer.set_service_info(\n service=service,\n app=\"bottle\",\n app_type=AppTypes.web)\n self.distributed_tracing = distributed_tracing\n\n def apply(self, callback, route):\n\n def wrapped(*args, **kwargs):\n if not self.tracer or not self.tracer.enabled:\n return callback(*args, **kwargs)\n\n resource = \"%s %s\" % (request.method, request.route.rule)\n\n # Propagate headers such as x-datadog-trace-id.\n if self.distributed_tracing:\n propagator = HTTPPropagator()\n context = propagator.extract(request.headers)\n if context.trace_id:\n self.tracer.context_provider.activate(context)\n\n with self.tracer.trace(\"bottle.request\", service=self.service, resource=resource) as s:\n code = 0\n try:\n return callback(*args, **kwargs)\n except Exception:\n # bottle doesn't always translate unhandled exceptions, so\n # we mark it here.\n code = 500\n raise\n finally:\n s.set_tag(http.STATUS_CODE, code or response.status_code)\n s.set_tag(http.URL, request.path)\n s.set_tag(http.METHOD, request.method)\n\n return wrapped\n", "path": "ddtrace/contrib/bottle/trace.py"}]} | 1,386 | 439 |
gh_patches_debug_3827 | rasdani/github-patches | git_diff | kivy__python-for-android-1427 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pin pyjnius version
This pull request adds a version pin for pyjnius as requested here: #1415
Please note I am proposing this as a **permanent measure** because this is such a core component - not this specific version of course, but that it is always pinned.
Even if you just randomly bump the version up in any random commit without checking, this is a huge improvement: it will prevent p4a master builds from randomly failing out of the blue *when not even changing the commit*, and people will be able to go back to an earlier p4a master commit to avoid sudden pyjnius breakages instead of patching around in the recipes folder (which depending on the build pipeline might be quite a time waster to do, especially compared to just going back to a known working p4a master commit).
Summed up, please pin this, carelessly bump it whenever, and have less unhappy users. :smile:
(And I'm not proposing pinning *everything*, I know you have way too many recipes and people would forget to bump it, I understand - but at least the core components like pyjnius, would that possibly sound feasible?)
</issue>
<code>
[start of pythonforandroid/recipes/pyjnius/__init__.py]
1 from pythonforandroid.recipe import CythonRecipe
2 from pythonforandroid.toolchain import shprint, current_directory, info
3 from pythonforandroid.patching import will_build
4 import sh
5 from os.path import join
6
7
8 class PyjniusRecipe(CythonRecipe):
9 version = 'master'
10 url = 'https://github.com/kivy/pyjnius/archive/{version}.zip'
11 name = 'pyjnius'
12 depends = [('python2', 'python3crystax'), ('genericndkbuild', 'sdl2', 'sdl'), 'six']
13 site_packages_name = 'jnius'
14
15 patches = [('sdl2_jnienv_getter.patch', will_build('sdl2')),
16 ('genericndkbuild_jnienv_getter.patch', will_build('genericndkbuild'))]
17
18 def postbuild_arch(self, arch):
19 super(PyjniusRecipe, self).postbuild_arch(arch)
20 info('Copying pyjnius java class to classes build dir')
21 with current_directory(self.get_build_dir(arch.arch)):
22 shprint(sh.cp, '-a', join('jnius', 'src', 'org'), self.ctx.javaclass_dir)
23
24
25 recipe = PyjniusRecipe()
26
[end of pythonforandroid/recipes/pyjnius/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pythonforandroid/recipes/pyjnius/__init__.py b/pythonforandroid/recipes/pyjnius/__init__.py
--- a/pythonforandroid/recipes/pyjnius/__init__.py
+++ b/pythonforandroid/recipes/pyjnius/__init__.py
@@ -6,7 +6,7 @@
class PyjniusRecipe(CythonRecipe):
- version = 'master'
+ version = '1.1.3'
url = 'https://github.com/kivy/pyjnius/archive/{version}.zip'
name = 'pyjnius'
depends = [('python2', 'python3crystax'), ('genericndkbuild', 'sdl2', 'sdl'), 'six']
| {"golden_diff": "diff --git a/pythonforandroid/recipes/pyjnius/__init__.py b/pythonforandroid/recipes/pyjnius/__init__.py\n--- a/pythonforandroid/recipes/pyjnius/__init__.py\n+++ b/pythonforandroid/recipes/pyjnius/__init__.py\n@@ -6,7 +6,7 @@\n \n \n class PyjniusRecipe(CythonRecipe):\n- version = 'master'\n+ version = '1.1.3'\n url = 'https://github.com/kivy/pyjnius/archive/{version}.zip'\n name = 'pyjnius'\n depends = [('python2', 'python3crystax'), ('genericndkbuild', 'sdl2', 'sdl'), 'six']\n", "issue": "Pin pyjnius version\nThis pull request adds a version pin for pyjnius as requested here: #1415 \r\n\r\nPlease note I am proposing this as a **permanent measure** because this is such a core component - not this specific version of course, but that it is always pinned.\r\n\r\nEven if you just randomly bump the version up in any random commit without checking, this is a huge improvement: it will prevent p4a master builds from randomly failing out of the blue *when not even changing the commit*, and people will be able to go back to an earlier p4a master commit to avoid sudden pyjnius breakages instead of patching around in the recipes folder (which depending on the build pipeline might be quite a time waster to do, especially compared to just going back to a known working p4a master commit).\r\n\r\nSummed up, please pin this, carelessly bump it whenever, and have less unhappy users. :smile:\r\n\r\n(And I'm not proposing pinning *everything*, I know you have way too many recipes and people would forget to bump it, I understand - but at least the core components like pyjnius, would that possibly sound feasible?)\n", "before_files": [{"content": "from pythonforandroid.recipe import CythonRecipe\nfrom pythonforandroid.toolchain import shprint, current_directory, info\nfrom pythonforandroid.patching import will_build\nimport sh\nfrom os.path import join\n\n\nclass PyjniusRecipe(CythonRecipe):\n version = 'master'\n url = 'https://github.com/kivy/pyjnius/archive/{version}.zip'\n name = 'pyjnius'\n depends = [('python2', 'python3crystax'), ('genericndkbuild', 'sdl2', 'sdl'), 'six']\n site_packages_name = 'jnius'\n\n patches = [('sdl2_jnienv_getter.patch', will_build('sdl2')),\n ('genericndkbuild_jnienv_getter.patch', will_build('genericndkbuild'))]\n\n def postbuild_arch(self, arch):\n super(PyjniusRecipe, self).postbuild_arch(arch)\n info('Copying pyjnius java class to classes build dir')\n with current_directory(self.get_build_dir(arch.arch)):\n shprint(sh.cp, '-a', join('jnius', 'src', 'org'), self.ctx.javaclass_dir)\n\n\nrecipe = PyjniusRecipe()\n", "path": "pythonforandroid/recipes/pyjnius/__init__.py"}]} | 1,096 | 158 |
gh_patches_debug_39067 | rasdani/github-patches | git_diff | ultrabug__py3status-958 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
selinux module install check refers to binary not in user path
The selinux module tries to check whether selinux is installed.
`if not self.py3.check_commands(['getenforce']):`
this effectively runs `which getenforce`. The getenforce binary is installed under /usr/sbin (gentoo, also fedora as far as i read) which is not in the regular user path and therefore not found, resulting in the message `selinux: isn't installed`.
Removing the check makes the module work as expected.
</issue>
<code>
[start of py3status/modules/selinux.py]
1 # -*- coding: utf-8 -*-
2 """
3 Display SELinux state.
4
5 This module displays the state of SELinux on your machine:
6 Enforcing (good), Permissive (bad), or Disabled (bad).
7
8 Configuration parameters:
9 cache_timeout: refresh interval for this module (default 10)
10 format: display format for this module (default 'selinux: {state}')
11 state_disabled: show when no SELinux policy is loaded.
12 (default 'disabled')
13 state_enforcing: show when SELinux security policy is enforced.
14 (default 'enforcing')
15 state_permissive: show when SELinux prints warnings instead of enforcing.
16 (default 'permissive')
17
18 Format placeholders:
19 {state} SELinux state
20
21 Color options:
22 color_bad: Enforcing
23 color_degraded: Permissive
24 color_good: Disabled
25
26 Requires:
27 libselinux-python: SELinux python bindings for libselinux
28
29 @author bstinsonmhk
30 @license BSD
31
32 SAMPLE OUTPUT
33 {'full_text': 'selinux: enforcing', 'color': '#00FF00'}
34
35 permissive
36 {'full_text': 'selinux: permissive', 'color': '#FFFF00'}
37
38 disabled
39 {'full_text': 'selinux: disabled', 'color': '#FF0000'}
40 """
41 from __future__ import absolute_import
42 import selinux
43 STRING_UNAVAILABLE = "selinux: isn't installed"
44
45
46 class Py3status:
47 """
48 """
49 # available configuration parameters
50 cache_timeout = 10
51 format = 'selinux: {state}'
52 state_disabled = 'disabled'
53 state_enforcing = 'enforcing'
54 state_permissive = 'permissive'
55
56 def selinux(self):
57 if not self.py3.check_commands(['getenforce']):
58 return {'cache_until': self.py3.CACHE_FOREVER,
59 'color': self.py3.COLOR_BAD,
60 'full_text': STRING_UNAVAILABLE}
61 try:
62 if selinux.security_getenforce():
63 state = self.state_enforcing
64 color = self.py3.COLOR_GOOD
65 else:
66 state = self.state_permissive
67 color = self.py3.COLOR_BAD
68 except:
69 state = self.state_disabled
70 color = self.py3.COLOR_BAD
71
72 return {'cached_until': self.py3.time_in(self.cache_timeout),
73 'full_text': self.py3.safe_format(self.format, {'state': state}),
74 'color': color}
75
76
77 if __name__ == '__main__':
78 """
79 Run module in test mode.
80 """
81 from py3status.module_test import module_test
82 module_test(Py3status)
83
[end of py3status/modules/selinux.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py3status/modules/selinux.py b/py3status/modules/selinux.py
--- a/py3status/modules/selinux.py
+++ b/py3status/modules/selinux.py
@@ -3,11 +3,11 @@
Display SELinux state.
This module displays the state of SELinux on your machine:
-Enforcing (good), Permissive (bad), or Disabled (bad).
+Enforcing (good), Permissive (degraded), or Disabled (bad).
Configuration parameters:
cache_timeout: refresh interval for this module (default 10)
- format: display format for this module (default 'selinux: {state}')
+ format: display format for this module (default 'SELinux: {state}')
state_disabled: show when no SELinux policy is loaded.
(default 'disabled')
state_enforcing: show when SELinux security policy is enforced.
@@ -30,17 +30,17 @@
@license BSD
SAMPLE OUTPUT
-{'full_text': 'selinux: enforcing', 'color': '#00FF00'}
+{'full_text': 'SELinux: enforcing', 'color': '#00FF00'}
permissive
-{'full_text': 'selinux: permissive', 'color': '#FFFF00'}
+{'full_text': 'SELinux: permissive', 'color': '#FFFF00'}
disabled
-{'full_text': 'selinux: disabled', 'color': '#FF0000'}
+{'full_text': 'SELinux: disabled', 'color': '#FF0000'}
"""
+
from __future__ import absolute_import
import selinux
-STRING_UNAVAILABLE = "selinux: isn't installed"
class Py3status:
@@ -48,30 +48,28 @@
"""
# available configuration parameters
cache_timeout = 10
- format = 'selinux: {state}'
+ format = 'SELinux: {state}'
state_disabled = 'disabled'
state_enforcing = 'enforcing'
state_permissive = 'permissive'
def selinux(self):
- if not self.py3.check_commands(['getenforce']):
- return {'cache_until': self.py3.CACHE_FOREVER,
- 'color': self.py3.COLOR_BAD,
- 'full_text': STRING_UNAVAILABLE}
try:
if selinux.security_getenforce():
state = self.state_enforcing
color = self.py3.COLOR_GOOD
else:
state = self.state_permissive
- color = self.py3.COLOR_BAD
+ color = self.py3.COLOR_DEGRADED
except:
state = self.state_disabled
color = self.py3.COLOR_BAD
- return {'cached_until': self.py3.time_in(self.cache_timeout),
- 'full_text': self.py3.safe_format(self.format, {'state': state}),
- 'color': color}
+ return {
+ 'cached_until': self.py3.time_in(self.cache_timeout),
+ 'full_text': self.py3.safe_format(self.format, {'state': state}),
+ 'color': color
+ }
if __name__ == '__main__':
| {"golden_diff": "diff --git a/py3status/modules/selinux.py b/py3status/modules/selinux.py\n--- a/py3status/modules/selinux.py\n+++ b/py3status/modules/selinux.py\n@@ -3,11 +3,11 @@\n Display SELinux state.\n \n This module displays the state of SELinux on your machine:\n-Enforcing (good), Permissive (bad), or Disabled (bad).\n+Enforcing (good), Permissive (degraded), or Disabled (bad).\n \n Configuration parameters:\n cache_timeout: refresh interval for this module (default 10)\n- format: display format for this module (default 'selinux: {state}')\n+ format: display format for this module (default 'SELinux: {state}')\n state_disabled: show when no SELinux policy is loaded.\n (default 'disabled')\n state_enforcing: show when SELinux security policy is enforced.\n@@ -30,17 +30,17 @@\n @license BSD\n \n SAMPLE OUTPUT\n-{'full_text': 'selinux: enforcing', 'color': '#00FF00'}\n+{'full_text': 'SELinux: enforcing', 'color': '#00FF00'}\n \n permissive\n-{'full_text': 'selinux: permissive', 'color': '#FFFF00'}\n+{'full_text': 'SELinux: permissive', 'color': '#FFFF00'}\n \n disabled\n-{'full_text': 'selinux: disabled', 'color': '#FF0000'}\n+{'full_text': 'SELinux: disabled', 'color': '#FF0000'}\n \"\"\"\n+\n from __future__ import absolute_import\n import selinux\n-STRING_UNAVAILABLE = \"selinux: isn't installed\"\n \n \n class Py3status:\n@@ -48,30 +48,28 @@\n \"\"\"\n # available configuration parameters\n cache_timeout = 10\n- format = 'selinux: {state}'\n+ format = 'SELinux: {state}'\n state_disabled = 'disabled'\n state_enforcing = 'enforcing'\n state_permissive = 'permissive'\n \n def selinux(self):\n- if not self.py3.check_commands(['getenforce']):\n- return {'cache_until': self.py3.CACHE_FOREVER,\n- 'color': self.py3.COLOR_BAD,\n- 'full_text': STRING_UNAVAILABLE}\n try:\n if selinux.security_getenforce():\n state = self.state_enforcing\n color = self.py3.COLOR_GOOD\n else:\n state = self.state_permissive\n- color = self.py3.COLOR_BAD\n+ color = self.py3.COLOR_DEGRADED\n except:\n state = self.state_disabled\n color = self.py3.COLOR_BAD\n \n- return {'cached_until': self.py3.time_in(self.cache_timeout),\n- 'full_text': self.py3.safe_format(self.format, {'state': state}),\n- 'color': color}\n+ return {\n+ 'cached_until': self.py3.time_in(self.cache_timeout),\n+ 'full_text': self.py3.safe_format(self.format, {'state': state}),\n+ 'color': color\n+ }\n \n \n if __name__ == '__main__':\n", "issue": "selinux module install check refers to binary not in user path\nThe selinux module tries to check whether selinux is installed.\r\n\r\n`if not self.py3.check_commands(['getenforce']):`\r\n\r\nthis effectively runs `which getenforce`. The getenforce binary is installed under /usr/sbin (gentoo, also fedora as far as i read) which is not in the regular user path and therefore not found, resulting in the message `selinux: isn't installed`.\r\nRemoving the check makes the module work as expected.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay SELinux state.\n\nThis module displays the state of SELinux on your machine:\nEnforcing (good), Permissive (bad), or Disabled (bad).\n\nConfiguration parameters:\n cache_timeout: refresh interval for this module (default 10)\n format: display format for this module (default 'selinux: {state}')\n state_disabled: show when no SELinux policy is loaded.\n (default 'disabled')\n state_enforcing: show when SELinux security policy is enforced.\n (default 'enforcing')\n state_permissive: show when SELinux prints warnings instead of enforcing.\n (default 'permissive')\n\nFormat placeholders:\n {state} SELinux state\n\nColor options:\n color_bad: Enforcing\n color_degraded: Permissive\n color_good: Disabled\n\nRequires:\n libselinux-python: SELinux python bindings for libselinux\n\n@author bstinsonmhk\n@license BSD\n\nSAMPLE OUTPUT\n{'full_text': 'selinux: enforcing', 'color': '#00FF00'}\n\npermissive\n{'full_text': 'selinux: permissive', 'color': '#FFFF00'}\n\ndisabled\n{'full_text': 'selinux: disabled', 'color': '#FF0000'}\n\"\"\"\nfrom __future__ import absolute_import\nimport selinux\nSTRING_UNAVAILABLE = \"selinux: isn't installed\"\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n cache_timeout = 10\n format = 'selinux: {state}'\n state_disabled = 'disabled'\n state_enforcing = 'enforcing'\n state_permissive = 'permissive'\n\n def selinux(self):\n if not self.py3.check_commands(['getenforce']):\n return {'cache_until': self.py3.CACHE_FOREVER,\n 'color': self.py3.COLOR_BAD,\n 'full_text': STRING_UNAVAILABLE}\n try:\n if selinux.security_getenforce():\n state = self.state_enforcing\n color = self.py3.COLOR_GOOD\n else:\n state = self.state_permissive\n color = self.py3.COLOR_BAD\n except:\n state = self.state_disabled\n color = self.py3.COLOR_BAD\n\n return {'cached_until': self.py3.time_in(self.cache_timeout),\n 'full_text': self.py3.safe_format(self.format, {'state': state}),\n 'color': color}\n\n\nif __name__ == '__main__':\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n module_test(Py3status)\n", "path": "py3status/modules/selinux.py"}]} | 1,371 | 695 |
gh_patches_debug_1998 | rasdani/github-patches | git_diff | PrefectHQ__prefect-2609 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consider promoting `case` to the top level
## Current behavior
*Please describe how the feature works today*
Currently, the `case` context manager must be imported from `prefect.tasks.control_flow.case`.
## Proposed behavior
*Please describe your proposed change to the current behavior*
I think we should consider promoting `case` to being importable as `prefect.case`, since it forms a fundamental part of the Python API. Other control flow utilities have "task-like" semantics (even if they are called as functions), and it's more appropriate for them to live in a `tasks` submodule. However, like `task`, `Flow`, `tags`, and `unmapped`, I believe `case` represents a significant component of Prefect's Python syntax and warrants top-level availability.
## Example
*Please give an example of how the enhancement would be useful*
```
from prefect import Flow, case
with Flow("example"):
with case(is_this_easy, True):
do_stuff()
with prefect.tasks.control_flow.case(is_this_easy, False):
do_other_stuff()
```
</issue>
<code>
[start of src/prefect/__init__.py]
1 import prefect.utilities
2 from prefect.configuration import config
3
4 from prefect.utilities.context import context
5
6 from prefect.client import Client
7 import prefect.schedules
8 import prefect.triggers
9 import prefect.environments
10
11 from prefect.core import Task, Flow, Parameter
12 import prefect.engine
13 import prefect.tasks
14 from prefect.utilities.tasks import task, tags, unmapped
15
16 import prefect.serialization
17
18 import prefect.agent
19
20 from ._version import get_versions
21
22 __version__ = get_versions()["version"] # type: ignore
23 del get_versions
24
25 try:
26 import signal as _signal
27 from ._siginfo import sig_handler as _sig_handler
28
29 _signal.signal(29, _sig_handler)
30 except:
31 pass
32
[end of src/prefect/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/prefect/__init__.py b/src/prefect/__init__.py
--- a/src/prefect/__init__.py
+++ b/src/prefect/__init__.py
@@ -11,6 +11,7 @@
from prefect.core import Task, Flow, Parameter
import prefect.engine
import prefect.tasks
+from prefect.tasks.control_flow import case
from prefect.utilities.tasks import task, tags, unmapped
import prefect.serialization
| {"golden_diff": "diff --git a/src/prefect/__init__.py b/src/prefect/__init__.py\n--- a/src/prefect/__init__.py\n+++ b/src/prefect/__init__.py\n@@ -11,6 +11,7 @@\n from prefect.core import Task, Flow, Parameter\n import prefect.engine\n import prefect.tasks\n+from prefect.tasks.control_flow import case\n from prefect.utilities.tasks import task, tags, unmapped\n \n import prefect.serialization\n", "issue": "Consider promoting `case` to the top level\n## Current behavior\r\n*Please describe how the feature works today*\r\nCurrently, the `case` context manager must be imported from `prefect.tasks.control_flow.case`.\r\n\r\n\r\n\r\n## Proposed behavior\r\n*Please describe your proposed change to the current behavior*\r\nI think we should consider promoting `case` to being importable as `prefect.case`, since it forms a fundamental part of the Python API. Other control flow utilities have \"task-like\" semantics (even if they are called as functions), and it's more appropriate for them to live in a `tasks` submodule. However, like `task`, `Flow`, `tags`, and `unmapped`, I believe `case` represents a significant component of Prefect's Python syntax and warrants top-level availability.\r\n\r\n\r\n\r\n\r\n## Example\r\n*Please give an example of how the enhancement would be useful*\r\n```\r\nfrom prefect import Flow, case\r\n\r\nwith Flow(\"example\"):\r\n with case(is_this_easy, True):\r\n do_stuff()\r\n\r\n with prefect.tasks.control_flow.case(is_this_easy, False):\r\n do_other_stuff()\r\n```\n", "before_files": [{"content": "import prefect.utilities\nfrom prefect.configuration import config\n\nfrom prefect.utilities.context import context\n\nfrom prefect.client import Client\nimport prefect.schedules\nimport prefect.triggers\nimport prefect.environments\n\nfrom prefect.core import Task, Flow, Parameter\nimport prefect.engine\nimport prefect.tasks\nfrom prefect.utilities.tasks import task, tags, unmapped\n\nimport prefect.serialization\n\nimport prefect.agent\n\nfrom ._version import get_versions\n\n__version__ = get_versions()[\"version\"] # type: ignore\ndel get_versions\n\ntry:\n import signal as _signal\n from ._siginfo import sig_handler as _sig_handler\n\n _signal.signal(29, _sig_handler)\nexcept:\n pass\n", "path": "src/prefect/__init__.py"}]} | 974 | 100 |
gh_patches_debug_20034 | rasdani/github-patches | git_diff | python-discord__bot-790 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Filters should not apply to staff members
Currently, we have certain channels whitelisted for certain filters and some filters apply to all members, even Owners cannot post filetypes that aren't on the whitelist!
Please change this so that absolutely all filters will ignore all staff members. It is not necessary to whitelist staff channels or to keep any other kind of exceptions to the filters once this very simple exception has been added, so please clean up any such exceptions while you're at it.
</issue>
<code>
[start of bot/cogs/antimalware.py]
1 import logging
2
3 from discord import Embed, Message, NotFound
4 from discord.ext.commands import Cog
5
6 from bot.bot import Bot
7 from bot.constants import AntiMalware as AntiMalwareConfig, Channels, URLs
8
9 log = logging.getLogger(__name__)
10
11
12 class AntiMalware(Cog):
13 """Delete messages which contain attachments with non-whitelisted file extensions."""
14
15 def __init__(self, bot: Bot):
16 self.bot = bot
17
18 @Cog.listener()
19 async def on_message(self, message: Message) -> None:
20 """Identify messages with prohibited attachments."""
21 if not message.attachments:
22 return
23
24 embed = Embed()
25 for attachment in message.attachments:
26 filename = attachment.filename.lower()
27 if filename.endswith('.py'):
28 embed.description = (
29 f"It looks like you tried to attach a Python file - please "
30 f"use a code-pasting service such as {URLs.site_schema}{URLs.site_paste}"
31 )
32 break # Other detections irrelevant because we prioritize the .py message.
33 if not filename.endswith(tuple(AntiMalwareConfig.whitelist)):
34 whitelisted_types = ', '.join(AntiMalwareConfig.whitelist)
35 meta_channel = self.bot.get_channel(Channels.meta)
36 embed.description = (
37 f"It looks like you tried to attach a file type that we "
38 f"do not allow. We currently allow the following file "
39 f"types: **{whitelisted_types}**. \n\n Feel free to ask "
40 f"in {meta_channel.mention} if you think this is a mistake."
41 )
42 if embed.description:
43 await message.channel.send(f"Hey {message.author.mention}!", embed=embed)
44
45 # Delete the offending message:
46 try:
47 await message.delete()
48 except NotFound:
49 log.info(f"Tried to delete message `{message.id}`, but message could not be found.")
50
51
52 def setup(bot: Bot) -> None:
53 """Load the AntiMalware cog."""
54 bot.add_cog(AntiMalware(bot))
55
[end of bot/cogs/antimalware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/cogs/antimalware.py b/bot/cogs/antimalware.py
--- a/bot/cogs/antimalware.py
+++ b/bot/cogs/antimalware.py
@@ -4,7 +4,7 @@
from discord.ext.commands import Cog
from bot.bot import Bot
-from bot.constants import AntiMalware as AntiMalwareConfig, Channels, URLs
+from bot.constants import AntiMalware as AntiMalwareConfig, Channels, STAFF_ROLES, URLs
log = logging.getLogger(__name__)
@@ -18,7 +18,13 @@
@Cog.listener()
async def on_message(self, message: Message) -> None:
"""Identify messages with prohibited attachments."""
- if not message.attachments:
+ # Return when message don't have attachment and don't moderate DMs
+ if not message.attachments or not message.guild:
+ return
+
+ # Check if user is staff, if is, return
+ # Since we only care that roles exist to iterate over, check for the attr rather than a User/Member instance
+ if hasattr(message.author, "roles") and any(role.id in STAFF_ROLES for role in message.author.roles):
return
embed = Embed()
| {"golden_diff": "diff --git a/bot/cogs/antimalware.py b/bot/cogs/antimalware.py\n--- a/bot/cogs/antimalware.py\n+++ b/bot/cogs/antimalware.py\n@@ -4,7 +4,7 @@\n from discord.ext.commands import Cog\n \n from bot.bot import Bot\n-from bot.constants import AntiMalware as AntiMalwareConfig, Channels, URLs\n+from bot.constants import AntiMalware as AntiMalwareConfig, Channels, STAFF_ROLES, URLs\n \n log = logging.getLogger(__name__)\n \n@@ -18,7 +18,13 @@\n @Cog.listener()\n async def on_message(self, message: Message) -> None:\n \"\"\"Identify messages with prohibited attachments.\"\"\"\n- if not message.attachments:\n+ # Return when message don't have attachment and don't moderate DMs\n+ if not message.attachments or not message.guild:\n+ return\n+\n+ # Check if user is staff, if is, return\n+ # Since we only care that roles exist to iterate over, check for the attr rather than a User/Member instance\n+ if hasattr(message.author, \"roles\") and any(role.id in STAFF_ROLES for role in message.author.roles):\n return\n \n embed = Embed()\n", "issue": "Filters should not apply to staff members\nCurrently, we have certain channels whitelisted for certain filters and some filters apply to all members, even Owners cannot post filetypes that aren't on the whitelist!\r\n\r\nPlease change this so that absolutely all filters will ignore all staff members. It is not necessary to whitelist staff channels or to keep any other kind of exceptions to the filters once this very simple exception has been added, so please clean up any such exceptions while you're at it.\n", "before_files": [{"content": "import logging\n\nfrom discord import Embed, Message, NotFound\nfrom discord.ext.commands import Cog\n\nfrom bot.bot import Bot\nfrom bot.constants import AntiMalware as AntiMalwareConfig, Channels, URLs\n\nlog = logging.getLogger(__name__)\n\n\nclass AntiMalware(Cog):\n \"\"\"Delete messages which contain attachments with non-whitelisted file extensions.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @Cog.listener()\n async def on_message(self, message: Message) -> None:\n \"\"\"Identify messages with prohibited attachments.\"\"\"\n if not message.attachments:\n return\n\n embed = Embed()\n for attachment in message.attachments:\n filename = attachment.filename.lower()\n if filename.endswith('.py'):\n embed.description = (\n f\"It looks like you tried to attach a Python file - please \"\n f\"use a code-pasting service such as {URLs.site_schema}{URLs.site_paste}\"\n )\n break # Other detections irrelevant because we prioritize the .py message.\n if not filename.endswith(tuple(AntiMalwareConfig.whitelist)):\n whitelisted_types = ', '.join(AntiMalwareConfig.whitelist)\n meta_channel = self.bot.get_channel(Channels.meta)\n embed.description = (\n f\"It looks like you tried to attach a file type that we \"\n f\"do not allow. We currently allow the following file \"\n f\"types: **{whitelisted_types}**. \\n\\n Feel free to ask \"\n f\"in {meta_channel.mention} if you think this is a mistake.\"\n )\n if embed.description:\n await message.channel.send(f\"Hey {message.author.mention}!\", embed=embed)\n\n # Delete the offending message:\n try:\n await message.delete()\n except NotFound:\n log.info(f\"Tried to delete message `{message.id}`, but message could not be found.\")\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the AntiMalware cog.\"\"\"\n bot.add_cog(AntiMalware(bot))\n", "path": "bot/cogs/antimalware.py"}]} | 1,177 | 278 |
gh_patches_debug_265 | rasdani/github-patches | git_diff | Nitrate__Nitrate-603 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upgrade celery to 4.3.0
As per title. Remove `skipIf` from test `test_uses_celery`.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 from setuptools import setup, find_packages
4
5
6 with open('VERSION.txt', 'r') as f:
7 pkg_version = f.read().strip()
8
9
10 def get_long_description():
11 with open('README.rst', 'r') as f:
12 return f.read()
13
14
15 install_requires = [
16 'beautifulsoup4 >= 4.1.1',
17 'django >= 2.1,<3.0',
18 'django-contrib-comments == 1.9.1',
19 'django-tinymce == 2.7.0',
20 'django-uuslug == 1.1.8',
21 'html2text',
22 'odfpy >= 0.9.6',
23 'python-bugzilla',
24 'xmltodict',
25 'kobo == 0.9.0'
26 ]
27
28 extras_require = {
29 'mysql': ['mysqlclient >= 1.2.3'],
30 'pgsql': ['psycopg2 == 2.7.5'],
31
32 # Required for tcms.auth.backends.KerberosBackend
33 'krbauth': [
34 'kerberos == 1.2.5'
35 ],
36
37 # Packages for building documentation
38 'docs': [
39 'Sphinx >= 1.1.2',
40 'sphinx_rtd_theme',
41 ],
42
43 # Necessary packages for running tests
44 'tests': [
45 'beautifulsoup4',
46 'coverage',
47 'factory_boy',
48 'flake8',
49 'pytest',
50 'pytest-cov',
51 'pytest-django',
52 ],
53
54 # Contain tools that assists the development
55 'devtools': [
56 'django-debug-toolbar',
57 'tox',
58 'django-extensions',
59 'pygraphviz',
60 ],
61
62 # Required packages required to run async tasks
63 'async': [
64 'celery == 4.2.0',
65 ],
66
67 'multiauth': [
68 'social-auth-app-django == 3.1.0',
69 ]
70 }
71
72 setup(
73 name='nitrate-tcms',
74 version=pkg_version,
75 description='A full-featured Test Case Management System',
76 long_description=get_long_description(),
77 author='Nitrate Team',
78 maintainer='Chenxiong Qi',
79 maintainer_email='[email protected]',
80 url='https://github.com/Nitrate/Nitrate/',
81 license='GPLv2+',
82 keywords='test case',
83 install_requires=install_requires,
84 extras_require=extras_require,
85 python_requires='>=3.6',
86 package_dir={'': 'src'},
87 packages=find_packages('src', exclude=['test*']),
88 include_package_data=True,
89 zip_safe=False,
90 classifiers=[
91 'Framework :: Django',
92 'Framework :: Django :: 2.0',
93 'Framework :: Django :: 2.1',
94 'Framework :: Django :: 2.2',
95 'Intended Audience :: Developers',
96 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
97 'Programming Language :: Python :: 3',
98 'Programming Language :: Python :: 3.6',
99 'Programming Language :: Python :: 3.7',
100 'Programming Language :: Python :: 3 :: Only',
101 'Topic :: Software Development :: Quality Assurance',
102 'Topic :: Software Development :: Testing',
103 ],
104 project_urls={
105 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',
106 'Source Code': 'https://github.com/Nitrate/Nitrate',
107 'Documentation': 'https://nitrate.readthedocs.io/',
108 },
109 )
110
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@
# Required packages required to run async tasks
'async': [
- 'celery == 4.2.0',
+ 'celery == 4.4.2',
],
'multiauth': [
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,7 +61,7 @@\n \n # Required packages required to run async tasks\n 'async': [\n- 'celery == 4.2.0',\n+ 'celery == 4.4.2',\n ],\n \n 'multiauth': [\n", "issue": "Upgrade celery to 4.3.0\nAs per title. Remove `skipIf` from test `test_uses_celery`.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.1,<3.0',\n 'django-contrib-comments == 1.9.1',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='nitrate-tcms',\n version=pkg_version,\n description='A full-featured Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}]} | 1,563 | 85 |
gh_patches_debug_1675 | rasdani/github-patches | git_diff | translate__pootle-4882 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make `pootle webpack` not require system checks
`pootle webpack` fails if eg the db is not set up/correctly. It would be helpful if it didnt
</issue>
<code>
[start of pootle/apps/pootle_app/management/commands/webpack.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import os
10 os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
11 import subprocess
12 import sys
13
14 from django.conf import settings
15 from django.core.management.base import BaseCommand, CommandError
16
17 from pootle_misc.baseurl import l
18
19
20 class Command(BaseCommand):
21 help = 'Builds and bundles static assets using webpack'
22
23 def add_arguments(self, parser):
24 parser.add_argument(
25 '--dev',
26 action='store_true',
27 dest='dev',
28 default=False,
29 help='Enable development builds and watch for changes.',
30 )
31 parser.add_argument(
32 '--nowatch',
33 action='store_false',
34 dest='watch',
35 default=True,
36 help='Disable watching for changes.',
37 )
38 parser.add_argument(
39 '--progress',
40 action='store_true',
41 default=False,
42 help='Show progress (implied if --dev is present).',
43 )
44 parser.add_argument(
45 '--extra',
46 action='append',
47 default=[],
48 help='Additional options to pass to the JavaScript webpack tool.',
49 )
50
51 def handle(self, **options):
52 default_static_dir = os.path.join(settings.WORKING_DIR, 'static')
53 custom_static_dirs = filter(lambda x: x != default_static_dir,
54 settings.STATICFILES_DIRS)
55 default_js_dir = os.path.join(default_static_dir, 'js')
56
57 webpack_config_file = os.path.join(default_js_dir, 'webpack.config.js')
58
59 webpack_bin = os.path.join(default_js_dir, 'node_modules/.bin/webpack')
60 if os.name == 'nt':
61 webpack_bin = '%s.cmd' % webpack_bin
62
63 webpack_progress = (
64 '--progress' if options['progress'] or options['dev'] else ''
65 )
66 webpack_colors = '--colors' if not options['no_color'] else ''
67
68 webpack_args = [webpack_bin, '--config=%s' % webpack_config_file]
69 if webpack_progress:
70 webpack_args.append(webpack_progress)
71 if webpack_colors:
72 webpack_args.append(webpack_colors)
73
74 if options['dev']:
75 watch = '--watch' if options['watch'] else ''
76 webpack_args.extend([watch, '--display-error-details'])
77 else:
78 os.environ['NODE_ENV'] = 'production'
79 webpack_args.append("--bail")
80
81 webpack_args.extend(options['extra'])
82
83 static_base = l(settings.STATIC_URL)
84 suffix = 'js/' if static_base.endswith('/') else '/js/'
85 os.environ['WEBPACK_PUBLIC_PATH'] = static_base + suffix
86
87 if custom_static_dirs:
88 # XXX: review this for css
89 # Append `js/` so that it's not necessary to reference it from the
90 # `webpack.config.js` file
91 custom_static_dirs = map(lambda x: os.path.join(x, 'js/'),
92 custom_static_dirs)
93 os.environ['WEBPACK_ROOT'] = ':'.join(custom_static_dirs)
94
95 try:
96 subprocess.call(webpack_args)
97 except OSError:
98 raise CommandError(
99 'webpack executable not found.\n'
100 'Make sure to install it by running '
101 '`cd %s && npm install`' % default_js_dir
102 )
103 sys.exit(0)
104
[end of pootle/apps/pootle_app/management/commands/webpack.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_app/management/commands/webpack.py b/pootle/apps/pootle_app/management/commands/webpack.py
--- a/pootle/apps/pootle_app/management/commands/webpack.py
+++ b/pootle/apps/pootle_app/management/commands/webpack.py
@@ -19,6 +19,7 @@
class Command(BaseCommand):
help = 'Builds and bundles static assets using webpack'
+ requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument(
| {"golden_diff": "diff --git a/pootle/apps/pootle_app/management/commands/webpack.py b/pootle/apps/pootle_app/management/commands/webpack.py\n--- a/pootle/apps/pootle_app/management/commands/webpack.py\n+++ b/pootle/apps/pootle_app/management/commands/webpack.py\n@@ -19,6 +19,7 @@\n \n class Command(BaseCommand):\n help = 'Builds and bundles static assets using webpack'\n+ requires_system_checks = False\n \n def add_arguments(self, parser):\n parser.add_argument(\n", "issue": "Make `pootle webpack` not require system checks\n`pootle webpack` fails if eg the db is not set up/correctly. It would be helpful if it didnt\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\nimport subprocess\nimport sys\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom pootle_misc.baseurl import l\n\n\nclass Command(BaseCommand):\n help = 'Builds and bundles static assets using webpack'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--dev',\n action='store_true',\n dest='dev',\n default=False,\n help='Enable development builds and watch for changes.',\n )\n parser.add_argument(\n '--nowatch',\n action='store_false',\n dest='watch',\n default=True,\n help='Disable watching for changes.',\n )\n parser.add_argument(\n '--progress',\n action='store_true',\n default=False,\n help='Show progress (implied if --dev is present).',\n )\n parser.add_argument(\n '--extra',\n action='append',\n default=[],\n help='Additional options to pass to the JavaScript webpack tool.',\n )\n\n def handle(self, **options):\n default_static_dir = os.path.join(settings.WORKING_DIR, 'static')\n custom_static_dirs = filter(lambda x: x != default_static_dir,\n settings.STATICFILES_DIRS)\n default_js_dir = os.path.join(default_static_dir, 'js')\n\n webpack_config_file = os.path.join(default_js_dir, 'webpack.config.js')\n\n webpack_bin = os.path.join(default_js_dir, 'node_modules/.bin/webpack')\n if os.name == 'nt':\n webpack_bin = '%s.cmd' % webpack_bin\n\n webpack_progress = (\n '--progress' if options['progress'] or options['dev'] else ''\n )\n webpack_colors = '--colors' if not options['no_color'] else ''\n\n webpack_args = [webpack_bin, '--config=%s' % webpack_config_file]\n if webpack_progress:\n webpack_args.append(webpack_progress)\n if webpack_colors:\n webpack_args.append(webpack_colors)\n\n if options['dev']:\n watch = '--watch' if options['watch'] else ''\n webpack_args.extend([watch, '--display-error-details'])\n else:\n os.environ['NODE_ENV'] = 'production'\n webpack_args.append(\"--bail\")\n\n webpack_args.extend(options['extra'])\n\n static_base = l(settings.STATIC_URL)\n suffix = 'js/' if static_base.endswith('/') else '/js/'\n os.environ['WEBPACK_PUBLIC_PATH'] = static_base + suffix\n\n if custom_static_dirs:\n # XXX: review this for css\n # Append `js/` so that it's not necessary to reference it from the\n # `webpack.config.js` file\n custom_static_dirs = map(lambda x: os.path.join(x, 'js/'),\n custom_static_dirs)\n os.environ['WEBPACK_ROOT'] = ':'.join(custom_static_dirs)\n\n try:\n subprocess.call(webpack_args)\n except OSError:\n raise CommandError(\n 'webpack executable not found.\\n'\n 'Make sure to install it by running '\n '`cd %s && npm install`' % default_js_dir\n )\n sys.exit(0)\n", "path": "pootle/apps/pootle_app/management/commands/webpack.py"}]} | 1,546 | 125 |
gh_patches_debug_16748 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-375 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`val_split` + `overfit_batches` gives infinite recursion error
## 🐛 Bug
`val_split` + `overfit_batches` gives infinite recursion error on master
</issue>
<code>
[start of flash/core/data/splits.py]
1 from typing import Any, List
2
3 import numpy as np
4 from pytorch_lightning.utilities.exceptions import MisconfigurationException
5 from torch.utils.data import Dataset
6
7
8 class SplitDataset(Dataset):
9 """
10 SplitDataset is used to create Dataset Subset using indices.
11
12 Args:
13
14 dataset: A dataset to be splitted
15 indices: List of indices to expose from the dataset
16 use_duplicated_indices: Wether to allow duplicated indices.
17
18 Example::
19
20 split_ds = SplitDataset(dataset, indices=[10, 14, 25])
21
22 split_ds = SplitDataset(dataset, indices=[10, 10, 10, 14, 25], use_duplicated_indices=True)
23
24 """
25
26 _INTERNAL_KEYS = ("dataset", "indices", "data")
27
28 def __init__(self, dataset: Any, indices: List[int] = [], use_duplicated_indices: bool = False) -> None:
29 if not isinstance(indices, list):
30 raise MisconfigurationException("indices should be a list")
31
32 if use_duplicated_indices:
33 indices = list(indices)
34 else:
35 indices = list(np.unique(indices))
36
37 if np.max(indices) >= len(dataset) or np.min(indices) < 0:
38 raise MisconfigurationException(f"`indices` should be within [0, {len(dataset) -1}].")
39
40 self.dataset = dataset
41 self.indices = indices
42
43 def __getattr__(self, key: str):
44 if key in self._INTERNAL_KEYS:
45 return getattr(self, key)
46 return getattr(self.dataset, key)
47
48 def __setattr__(self, name: str, value: Any) -> None:
49 if name in self._INTERNAL_KEYS:
50 self.__dict__[name] = value
51 else:
52 setattr(self.dataset, name, value)
53
54 def __getitem__(self, index: int) -> Any:
55 return self.dataset[self.indices[index]]
56
57 def __len__(self) -> int:
58 return len(self.indices)
59
[end of flash/core/data/splits.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flash/core/data/splits.py b/flash/core/data/splits.py
--- a/flash/core/data/splits.py
+++ b/flash/core/data/splits.py
@@ -13,7 +13,7 @@
dataset: A dataset to be splitted
indices: List of indices to expose from the dataset
- use_duplicated_indices: Wether to allow duplicated indices.
+ use_duplicated_indices: Whether to allow duplicated indices.
Example::
@@ -41,9 +41,9 @@
self.indices = indices
def __getattr__(self, key: str):
- if key in self._INTERNAL_KEYS:
- return getattr(self, key)
- return getattr(self.dataset, key)
+ if key not in self._INTERNAL_KEYS:
+ return self.dataset.__getattribute__(key)
+ raise AttributeError
def __setattr__(self, name: str, value: Any) -> None:
if name in self._INTERNAL_KEYS:
| {"golden_diff": "diff --git a/flash/core/data/splits.py b/flash/core/data/splits.py\n--- a/flash/core/data/splits.py\n+++ b/flash/core/data/splits.py\n@@ -13,7 +13,7 @@\n \n dataset: A dataset to be splitted\n indices: List of indices to expose from the dataset\n- use_duplicated_indices: Wether to allow duplicated indices.\n+ use_duplicated_indices: Whether to allow duplicated indices.\n \n Example::\n \n@@ -41,9 +41,9 @@\n self.indices = indices\n \n def __getattr__(self, key: str):\n- if key in self._INTERNAL_KEYS:\n- return getattr(self, key)\n- return getattr(self.dataset, key)\n+ if key not in self._INTERNAL_KEYS:\n+ return self.dataset.__getattribute__(key)\n+ raise AttributeError\n \n def __setattr__(self, name: str, value: Any) -> None:\n if name in self._INTERNAL_KEYS:\n", "issue": "`val_split` + `overfit_batches` gives infinite recursion error\n## \ud83d\udc1b Bug\r\n\r\n`val_split` + `overfit_batches` gives infinite recursion error on master\n", "before_files": [{"content": "from typing import Any, List\n\nimport numpy as np\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch.utils.data import Dataset\n\n\nclass SplitDataset(Dataset):\n \"\"\"\n SplitDataset is used to create Dataset Subset using indices.\n\n Args:\n\n dataset: A dataset to be splitted\n indices: List of indices to expose from the dataset\n use_duplicated_indices: Wether to allow duplicated indices.\n\n Example::\n\n split_ds = SplitDataset(dataset, indices=[10, 14, 25])\n\n split_ds = SplitDataset(dataset, indices=[10, 10, 10, 14, 25], use_duplicated_indices=True)\n\n \"\"\"\n\n _INTERNAL_KEYS = (\"dataset\", \"indices\", \"data\")\n\n def __init__(self, dataset: Any, indices: List[int] = [], use_duplicated_indices: bool = False) -> None:\n if not isinstance(indices, list):\n raise MisconfigurationException(\"indices should be a list\")\n\n if use_duplicated_indices:\n indices = list(indices)\n else:\n indices = list(np.unique(indices))\n\n if np.max(indices) >= len(dataset) or np.min(indices) < 0:\n raise MisconfigurationException(f\"`indices` should be within [0, {len(dataset) -1}].\")\n\n self.dataset = dataset\n self.indices = indices\n\n def __getattr__(self, key: str):\n if key in self._INTERNAL_KEYS:\n return getattr(self, key)\n return getattr(self.dataset, key)\n\n def __setattr__(self, name: str, value: Any) -> None:\n if name in self._INTERNAL_KEYS:\n self.__dict__[name] = value\n else:\n setattr(self.dataset, name, value)\n\n def __getitem__(self, index: int) -> Any:\n return self.dataset[self.indices[index]]\n\n def __len__(self) -> int:\n return len(self.indices)\n", "path": "flash/core/data/splits.py"}]} | 1,117 | 217 |
gh_patches_debug_7021 | rasdani/github-patches | git_diff | zulip__zulip-26839 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update documentation on previews permissions
The help center should be updated following #27783.
</issue>
<code>
[start of zerver/lib/url_redirects.py]
1 from dataclasses import dataclass
2 from typing import List
3
4
5 @dataclass
6 class URLRedirect:
7 old_url: str
8 new_url: str
9
10
11 API_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
12 # Add URL redirects for REST API documentation here:
13 URLRedirect("/api/delete-stream", "/api/archive-stream"),
14 ]
15
16 POLICY_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
17 # Add URL redirects for policy documentation here:
18 URLRedirect("/privacy/", "/policies/privacy"),
19 URLRedirect("/terms/", "/policies/terms"),
20 ]
21
22 HELP_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
23 # Add URL redirects for help center documentation here:
24 URLRedirect("/help/pm-mention-alert-notifications", "/help/dm-mention-alert-notifications"),
25 URLRedirect("/help/restrict-private-messages", "/help/restrict-direct-messages"),
26 URLRedirect("/help/reading-pms", "/help/reading-dms"),
27 URLRedirect("/help/private-messages", "/help/direct-messages"),
28 URLRedirect("/help/configure-who-can-edit-topics", "/help/restrict-moving-messages"),
29 URLRedirect(
30 "/help/configure-message-editing-and-deletion",
31 "/help/restrict-message-editing-and-deletion",
32 ),
33 URLRedirect("/help/restrict-visibility-of-email-addresses", "/help/configure-email-visibility"),
34 URLRedirect("/help/change-default-view", "/help/configure-default-view"),
35 URLRedirect("/help/recent-topics", "/help/recent-conversations"),
36 URLRedirect(
37 "/help/add-custom-profile-fields",
38 "/help/custom-profile-fields",
39 ),
40 URLRedirect(
41 "/help/enable-enter-to-send",
42 "/help/mastering-the-compose-box#toggle-between-ctrl-enter-and-enter-to-send-a-message",
43 ),
44 URLRedirect(
45 "/help/change-the-default-language-for-your-organization",
46 "/help/configure-organization-language",
47 ),
48 URLRedirect("/help/delete-a-stream", "/help/archive-a-stream"),
49 URLRedirect("/help/change-the-topic-of-a-message", "/help/rename-a-topic"),
50 URLRedirect("/help/configure-missed-message-emails", "/help/email-notifications"),
51 URLRedirect("/help/add-an-alert-word", "/help/dm-mention-alert-notifications#alert-words"),
52 URLRedirect("/help/test-mobile-notifications", "/help/mobile-notifications"),
53 URLRedirect(
54 "/help/troubleshooting-desktop-notifications",
55 "/help/desktop-notifications#troubleshooting-desktop-notifications",
56 ),
57 URLRedirect(
58 "/help/change-notification-sound", "/help/desktop-notifications#change-notification-sound"
59 ),
60 URLRedirect("/help/configure-message-notification-emails", "/help/email-notifications"),
61 URLRedirect("/help/disable-new-login-emails", "/help/email-notifications#new-login-emails"),
62 # The `help/about-streams-and-topics` redirect is particularly important,
63 # because the old URL appears in links from Welcome Bot messages.
64 URLRedirect("/help/about-streams-and-topics", "/help/streams-and-topics"),
65 URLRedirect("/help/community-topic-edits", "/help/restrict-moving-messages"),
66 URLRedirect(
67 "/help/only-allow-admins-to-add-emoji", "/help/custom-emoji#change-who-can-add-custom-emoji"
68 ),
69 URLRedirect(
70 "/help/configure-who-can-add-custom-emoji",
71 "/help/custom-emoji#change-who-can-add-custom-emoji",
72 ),
73 URLRedirect("/help/add-custom-emoji", "/help/custom-emoji"),
74 URLRedirect("/help/night-mode", "/help/dark-theme"),
75 URLRedirect("/help/enable-emoticon-translations", "/help/configure-emoticon-translations"),
76 URLRedirect("/help/web-public-streams", "/help/public-access-option"),
77 URLRedirect("/help/starting-a-new-private-thread", "/help/starting-a-new-direct-message"),
78 URLRedirect("/help/edit-or-delete-a-message", "/help/delete-a-message"),
79 URLRedirect("/help/start-a-new-topic", "/help/starting-a-new-topic"),
80 URLRedirect("/help/configure-default-view", "/help/configure-home-view"),
81 URLRedirect("/help/reading-topics", "/help/reading-conversations"),
82 URLRedirect("/help/finding-a-topic-to-read", "/help/finding-a-conversation-to-read"),
83 ]
84
85 LANDING_PAGE_REDIRECTS = [
86 # Add URL redirects for corporate landing pages here.
87 URLRedirect("/new-user/", "/hello/"),
88 URLRedirect("/developer-community/", "/development-community"),
89 URLRedirect("/for/companies/", "/for/business"),
90 URLRedirect("/for/working-groups-and-communities/", "/for/communities"),
91 ]
92
93 DOCUMENTATION_REDIRECTS = (
94 API_DOCUMENTATION_REDIRECTS + POLICY_DOCUMENTATION_REDIRECTS + HELP_DOCUMENTATION_REDIRECTS
95 )
96
[end of zerver/lib/url_redirects.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/lib/url_redirects.py b/zerver/lib/url_redirects.py
--- a/zerver/lib/url_redirects.py
+++ b/zerver/lib/url_redirects.py
@@ -80,6 +80,7 @@
URLRedirect("/help/configure-default-view", "/help/configure-home-view"),
URLRedirect("/help/reading-topics", "/help/reading-conversations"),
URLRedirect("/help/finding-a-topic-to-read", "/help/finding-a-conversation-to-read"),
+ URLRedirect("/help/view-and-browse-images", "/help/view-images-and-videos"),
]
LANDING_PAGE_REDIRECTS = [
| {"golden_diff": "diff --git a/zerver/lib/url_redirects.py b/zerver/lib/url_redirects.py\n--- a/zerver/lib/url_redirects.py\n+++ b/zerver/lib/url_redirects.py\n@@ -80,6 +80,7 @@\n URLRedirect(\"/help/configure-default-view\", \"/help/configure-home-view\"),\n URLRedirect(\"/help/reading-topics\", \"/help/reading-conversations\"),\n URLRedirect(\"/help/finding-a-topic-to-read\", \"/help/finding-a-conversation-to-read\"),\n+ URLRedirect(\"/help/view-and-browse-images\", \"/help/view-images-and-videos\"),\n ]\n \n LANDING_PAGE_REDIRECTS = [\n", "issue": "Update documentation on previews permissions\nThe help center should be updated following #27783.\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom typing import List\n\n\n@dataclass\nclass URLRedirect:\n old_url: str\n new_url: str\n\n\nAPI_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for REST API documentation here:\n URLRedirect(\"/api/delete-stream\", \"/api/archive-stream\"),\n]\n\nPOLICY_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for policy documentation here:\n URLRedirect(\"/privacy/\", \"/policies/privacy\"),\n URLRedirect(\"/terms/\", \"/policies/terms\"),\n]\n\nHELP_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for help center documentation here:\n URLRedirect(\"/help/pm-mention-alert-notifications\", \"/help/dm-mention-alert-notifications\"),\n URLRedirect(\"/help/restrict-private-messages\", \"/help/restrict-direct-messages\"),\n URLRedirect(\"/help/reading-pms\", \"/help/reading-dms\"),\n URLRedirect(\"/help/private-messages\", \"/help/direct-messages\"),\n URLRedirect(\"/help/configure-who-can-edit-topics\", \"/help/restrict-moving-messages\"),\n URLRedirect(\n \"/help/configure-message-editing-and-deletion\",\n \"/help/restrict-message-editing-and-deletion\",\n ),\n URLRedirect(\"/help/restrict-visibility-of-email-addresses\", \"/help/configure-email-visibility\"),\n URLRedirect(\"/help/change-default-view\", \"/help/configure-default-view\"),\n URLRedirect(\"/help/recent-topics\", \"/help/recent-conversations\"),\n URLRedirect(\n \"/help/add-custom-profile-fields\",\n \"/help/custom-profile-fields\",\n ),\n URLRedirect(\n \"/help/enable-enter-to-send\",\n \"/help/mastering-the-compose-box#toggle-between-ctrl-enter-and-enter-to-send-a-message\",\n ),\n URLRedirect(\n \"/help/change-the-default-language-for-your-organization\",\n \"/help/configure-organization-language\",\n ),\n URLRedirect(\"/help/delete-a-stream\", \"/help/archive-a-stream\"),\n URLRedirect(\"/help/change-the-topic-of-a-message\", \"/help/rename-a-topic\"),\n URLRedirect(\"/help/configure-missed-message-emails\", \"/help/email-notifications\"),\n URLRedirect(\"/help/add-an-alert-word\", \"/help/dm-mention-alert-notifications#alert-words\"),\n URLRedirect(\"/help/test-mobile-notifications\", \"/help/mobile-notifications\"),\n URLRedirect(\n \"/help/troubleshooting-desktop-notifications\",\n \"/help/desktop-notifications#troubleshooting-desktop-notifications\",\n ),\n URLRedirect(\n \"/help/change-notification-sound\", \"/help/desktop-notifications#change-notification-sound\"\n ),\n URLRedirect(\"/help/configure-message-notification-emails\", \"/help/email-notifications\"),\n URLRedirect(\"/help/disable-new-login-emails\", \"/help/email-notifications#new-login-emails\"),\n # The `help/about-streams-and-topics` redirect is particularly important,\n # because the old URL appears in links from Welcome Bot messages.\n URLRedirect(\"/help/about-streams-and-topics\", \"/help/streams-and-topics\"),\n URLRedirect(\"/help/community-topic-edits\", \"/help/restrict-moving-messages\"),\n URLRedirect(\n \"/help/only-allow-admins-to-add-emoji\", \"/help/custom-emoji#change-who-can-add-custom-emoji\"\n ),\n URLRedirect(\n \"/help/configure-who-can-add-custom-emoji\",\n \"/help/custom-emoji#change-who-can-add-custom-emoji\",\n ),\n URLRedirect(\"/help/add-custom-emoji\", \"/help/custom-emoji\"),\n URLRedirect(\"/help/night-mode\", \"/help/dark-theme\"),\n URLRedirect(\"/help/enable-emoticon-translations\", \"/help/configure-emoticon-translations\"),\n URLRedirect(\"/help/web-public-streams\", \"/help/public-access-option\"),\n URLRedirect(\"/help/starting-a-new-private-thread\", \"/help/starting-a-new-direct-message\"),\n URLRedirect(\"/help/edit-or-delete-a-message\", \"/help/delete-a-message\"),\n URLRedirect(\"/help/start-a-new-topic\", \"/help/starting-a-new-topic\"),\n URLRedirect(\"/help/configure-default-view\", \"/help/configure-home-view\"),\n URLRedirect(\"/help/reading-topics\", \"/help/reading-conversations\"),\n URLRedirect(\"/help/finding-a-topic-to-read\", \"/help/finding-a-conversation-to-read\"),\n]\n\nLANDING_PAGE_REDIRECTS = [\n # Add URL redirects for corporate landing pages here.\n URLRedirect(\"/new-user/\", \"/hello/\"),\n URLRedirect(\"/developer-community/\", \"/development-community\"),\n URLRedirect(\"/for/companies/\", \"/for/business\"),\n URLRedirect(\"/for/working-groups-and-communities/\", \"/for/communities\"),\n]\n\nDOCUMENTATION_REDIRECTS = (\n API_DOCUMENTATION_REDIRECTS + POLICY_DOCUMENTATION_REDIRECTS + HELP_DOCUMENTATION_REDIRECTS\n)\n", "path": "zerver/lib/url_redirects.py"}]} | 1,761 | 138 |
gh_patches_debug_32222 | rasdani/github-patches | git_diff | explosion__spaCy-3281 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect tokenization of dash punctuation in Spanish
In Spanish text, the conventions for using dashes and em-dashes as punctuation seems to be considerably different than in English. Spacy often does not tokenize the dash or em-dash as a separate token, instead keeping it attached to the closest word.
For example, the Spanish sentence:
—Yo me llamo... –murmuró el niño– Emilio Sánchez Pérez.
English Translation:
"My name is...", murmured the boy, "Emilio Sanchez Perez."
Here, the Spanish dash is used like a comma. The em-dash at the beginning of the sentence is used like a double quote. I believe that the fact that there is no space between the dash and word is throwing off the tokenizer.
The Spanish sentence above is tokenized as:
—Yo
me
llamo
...
–murmuró
el
niño–
Emilio
Sánchez
Pérez
.
I would expect the tokenization to be
—
Yo
me
llamo
...
–
murmuró
el
niño
–
Emilio
Sánchez
Pérez
.
## Your Environment
* **spaCy version:** 2.0.12
* **Platform:** Darwin-18.0.0-x86_64-i386-64bit
* **Python version:** 3.7.0
* **Models:** de, es, en
</issue>
<code>
[start of spacy/lang/punctuation.py]
1 # coding: utf8
2 from __future__ import unicode_literals
3
4 from .char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY, LIST_ICONS
5 from .char_classes import HYPHENS
6 from .char_classes import CURRENCY, UNITS
7 from .char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA
8
9
10 _prefixes = (
11 ["§", "%", "=", r"\+(?![0-9])"]
12 + LIST_PUNCT
13 + LIST_ELLIPSES
14 + LIST_QUOTES
15 + LIST_CURRENCY
16 + LIST_ICONS
17 )
18
19
20 _suffixes = (
21 LIST_PUNCT
22 + LIST_ELLIPSES
23 + LIST_QUOTES
24 + LIST_ICONS
25 + ["'s", "'S", "’s", "’S"]
26 + [
27 r"(?<=[0-9])\+",
28 r"(?<=°[FfCcKk])\.",
29 r"(?<=[0-9])(?:{c})".format(c=CURRENCY),
30 r"(?<=[0-9])(?:{u})".format(u=UNITS),
31 r"(?<=[0-9{al}{e}(?:{q})])\.".format(al=ALPHA_LOWER, e=r"%²\-\+", q=CONCAT_QUOTES),
32 r"(?<=[{au}][{au}])\.".format(au=ALPHA_UPPER),
33 ]
34 )
35
36 _infixes = (
37 LIST_ELLIPSES
38 + LIST_ICONS
39 + [
40 r"(?<=[0-9])[+\-\*^](?=[0-9-])",
41 r"(?<=[{al}])\.(?=[{au}])".format(al=ALPHA_LOWER, au=ALPHA_UPPER),
42 r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
43 r'(?<=[{a}])(?:{h})(?=[{a}])'.format(a=ALPHA, h=HYPHENS),
44 r'(?<=[{a}])[:<>=/](?=[{a}])'.format(a=ALPHA),
45 ]
46 )
47
48 TOKENIZER_PREFIXES = _prefixes
49 TOKENIZER_SUFFIXES = _suffixes
50 TOKENIZER_INFIXES = _infixes
51
[end of spacy/lang/punctuation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/spacy/lang/punctuation.py b/spacy/lang/punctuation.py
--- a/spacy/lang/punctuation.py
+++ b/spacy/lang/punctuation.py
@@ -1,14 +1,13 @@
# coding: utf8
from __future__ import unicode_literals
-from .char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY, LIST_ICONS
-from .char_classes import HYPHENS
-from .char_classes import CURRENCY, UNITS
+from .char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY
+from .char_classes import LIST_ICONS, HYPHENS, CURRENCY, UNITS
from .char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA
_prefixes = (
- ["§", "%", "=", r"\+(?![0-9])"]
+ ["§", "%", "=", "—", "–", r"\+(?![0-9])"]
+ LIST_PUNCT
+ LIST_ELLIPSES
+ LIST_QUOTES
@@ -22,13 +21,15 @@
+ LIST_ELLIPSES
+ LIST_QUOTES
+ LIST_ICONS
- + ["'s", "'S", "’s", "’S"]
+ + ["'s", "'S", "’s", "’S", "—", "–"]
+ [
r"(?<=[0-9])\+",
r"(?<=°[FfCcKk])\.",
r"(?<=[0-9])(?:{c})".format(c=CURRENCY),
r"(?<=[0-9])(?:{u})".format(u=UNITS),
- r"(?<=[0-9{al}{e}(?:{q})])\.".format(al=ALPHA_LOWER, e=r"%²\-\+", q=CONCAT_QUOTES),
+ r"(?<=[0-9{al}{e}(?:{q})])\.".format(
+ al=ALPHA_LOWER, e=r"%²\-\+", q=CONCAT_QUOTES
+ ),
r"(?<=[{au}][{au}])\.".format(au=ALPHA_UPPER),
]
)
@@ -40,8 +41,8 @@
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}])\.(?=[{au}])".format(al=ALPHA_LOWER, au=ALPHA_UPPER),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
- r'(?<=[{a}])(?:{h})(?=[{a}])'.format(a=ALPHA, h=HYPHENS),
- r'(?<=[{a}])[:<>=/](?=[{a}])'.format(a=ALPHA),
+ r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
+ r"(?<=[{a}])[:<>=/](?=[{a}])".format(a=ALPHA),
]
)
| {"golden_diff": "diff --git a/spacy/lang/punctuation.py b/spacy/lang/punctuation.py\n--- a/spacy/lang/punctuation.py\n+++ b/spacy/lang/punctuation.py\n@@ -1,14 +1,13 @@\n # coding: utf8\n from __future__ import unicode_literals\n \n-from .char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY, LIST_ICONS\n-from .char_classes import HYPHENS\n-from .char_classes import CURRENCY, UNITS\n+from .char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY\n+from .char_classes import LIST_ICONS, HYPHENS, CURRENCY, UNITS\n from .char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA\n \n \n _prefixes = (\n- [\"\u00a7\", \"%\", \"=\", r\"\\+(?![0-9])\"]\n+ [\"\u00a7\", \"%\", \"=\", \"\u2014\", \"\u2013\", r\"\\+(?![0-9])\"]\n + LIST_PUNCT\n + LIST_ELLIPSES\n + LIST_QUOTES\n@@ -22,13 +21,15 @@\n + LIST_ELLIPSES\n + LIST_QUOTES\n + LIST_ICONS\n- + [\"'s\", \"'S\", \"\u2019s\", \"\u2019S\"]\n+ + [\"'s\", \"'S\", \"\u2019s\", \"\u2019S\", \"\u2014\", \"\u2013\"]\n + [\n r\"(?<=[0-9])\\+\",\n r\"(?<=\u00b0[FfCcKk])\\.\",\n r\"(?<=[0-9])(?:{c})\".format(c=CURRENCY),\n r\"(?<=[0-9])(?:{u})\".format(u=UNITS),\n- r\"(?<=[0-9{al}{e}(?:{q})])\\.\".format(al=ALPHA_LOWER, e=r\"%\u00b2\\-\\+\", q=CONCAT_QUOTES),\n+ r\"(?<=[0-9{al}{e}(?:{q})])\\.\".format(\n+ al=ALPHA_LOWER, e=r\"%\u00b2\\-\\+\", q=CONCAT_QUOTES\n+ ),\n r\"(?<=[{au}][{au}])\\.\".format(au=ALPHA_UPPER),\n ]\n )\n@@ -40,8 +41,8 @@\n r\"(?<=[0-9])[+\\-\\*^](?=[0-9-])\",\n r\"(?<=[{al}])\\.(?=[{au}])\".format(al=ALPHA_LOWER, au=ALPHA_UPPER),\n r\"(?<=[{a}]),(?=[{a}])\".format(a=ALPHA),\n- r'(?<=[{a}])(?:{h})(?=[{a}])'.format(a=ALPHA, h=HYPHENS),\n- r'(?<=[{a}])[:<>=/](?=[{a}])'.format(a=ALPHA),\n+ r\"(?<=[{a}])(?:{h})(?=[{a}])\".format(a=ALPHA, h=HYPHENS),\n+ r\"(?<=[{a}])[:<>=/](?=[{a}])\".format(a=ALPHA),\n ]\n )\n", "issue": "Incorrect tokenization of dash punctuation in Spanish\nIn Spanish text, the conventions for using dashes and em-dashes as punctuation seems to be considerably different than in English. Spacy often does not tokenize the dash or em-dash as a separate token, instead keeping it attached to the closest word.\r\n\r\nFor example, the Spanish sentence:\r\n\u2014Yo me llamo... \u2013murmur\u00f3 el ni\u00f1o\u2013 Emilio S\u00e1nchez P\u00e9rez.\r\nEnglish Translation:\r\n\"My name is...\", murmured the boy, \"Emilio Sanchez Perez.\"\r\n\r\nHere, the Spanish dash is used like a comma. The em-dash at the beginning of the sentence is used like a double quote. I believe that the fact that there is no space between the dash and word is throwing off the tokenizer.\r\n\r\nThe Spanish sentence above is tokenized as:\r\n\u2014Yo\r\nme\r\nllamo\r\n...\r\n\u2013murmur\u00f3\r\nel\r\nni\u00f1o\u2013\r\nEmilio\r\nS\u00e1nchez\r\nP\u00e9rez\r\n.\r\n\r\nI would expect the tokenization to be\r\n\u2014\r\nYo\r\nme\r\nllamo\r\n...\r\n\u2013\r\nmurmur\u00f3\r\nel\r\nni\u00f1o\r\n\u2013\r\nEmilio\r\nS\u00e1nchez\r\nP\u00e9rez\r\n.\r\n\r\n## Your Environment\r\n* **spaCy version:** 2.0.12\r\n* **Platform:** Darwin-18.0.0-x86_64-i386-64bit\r\n* **Python version:** 3.7.0\r\n* **Models:** de, es, en\r\n\n", "before_files": [{"content": "# coding: utf8\nfrom __future__ import unicode_literals\n\nfrom .char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY, LIST_ICONS\nfrom .char_classes import HYPHENS\nfrom .char_classes import CURRENCY, UNITS\nfrom .char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA\n\n\n_prefixes = (\n [\"\u00a7\", \"%\", \"=\", r\"\\+(?![0-9])\"]\n + LIST_PUNCT\n + LIST_ELLIPSES\n + LIST_QUOTES\n + LIST_CURRENCY\n + LIST_ICONS\n)\n\n\n_suffixes = (\n LIST_PUNCT\n + LIST_ELLIPSES\n + LIST_QUOTES\n + LIST_ICONS\n + [\"'s\", \"'S\", \"\u2019s\", \"\u2019S\"]\n + [\n r\"(?<=[0-9])\\+\",\n r\"(?<=\u00b0[FfCcKk])\\.\",\n r\"(?<=[0-9])(?:{c})\".format(c=CURRENCY),\n r\"(?<=[0-9])(?:{u})\".format(u=UNITS),\n r\"(?<=[0-9{al}{e}(?:{q})])\\.\".format(al=ALPHA_LOWER, e=r\"%\u00b2\\-\\+\", q=CONCAT_QUOTES),\n r\"(?<=[{au}][{au}])\\.\".format(au=ALPHA_UPPER),\n ]\n)\n\n_infixes = (\n LIST_ELLIPSES\n + LIST_ICONS\n + [\n r\"(?<=[0-9])[+\\-\\*^](?=[0-9-])\",\n r\"(?<=[{al}])\\.(?=[{au}])\".format(al=ALPHA_LOWER, au=ALPHA_UPPER),\n r\"(?<=[{a}]),(?=[{a}])\".format(a=ALPHA),\n r'(?<=[{a}])(?:{h})(?=[{a}])'.format(a=ALPHA, h=HYPHENS),\n r'(?<=[{a}])[:<>=/](?=[{a}])'.format(a=ALPHA),\n ]\n)\n\nTOKENIZER_PREFIXES = _prefixes\nTOKENIZER_SUFFIXES = _suffixes\nTOKENIZER_INFIXES = _infixes\n", "path": "spacy/lang/punctuation.py"}]} | 1,449 | 711 |
gh_patches_debug_47933 | rasdani/github-patches | git_diff | liqd__a4-opin-583 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
when I log out while I am on my profile page I get a 500
</issue>
<code>
[start of euth/follows/templatetags/follow_tags.py]
1 from django import template
2
3 from .. import models
4
5 register = template.Library()
6
7
8 @register.assignment_tag()
9 def is_following(user, project):
10 return models.Follow.objects.filter(
11 enabled=True,
12 project=project,
13 creator=user
14 ).exists()
15
[end of euth/follows/templatetags/follow_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/euth/follows/templatetags/follow_tags.py b/euth/follows/templatetags/follow_tags.py
--- a/euth/follows/templatetags/follow_tags.py
+++ b/euth/follows/templatetags/follow_tags.py
@@ -7,8 +7,11 @@
@register.assignment_tag()
def is_following(user, project):
- return models.Follow.objects.filter(
- enabled=True,
- project=project,
- creator=user
- ).exists()
+ if not user.is_anonymous():
+ return models.Follow.objects.filter(
+ enabled=True,
+ project=project,
+ creator=user
+ ).exists()
+ else:
+ return False
| {"golden_diff": "diff --git a/euth/follows/templatetags/follow_tags.py b/euth/follows/templatetags/follow_tags.py\n--- a/euth/follows/templatetags/follow_tags.py\n+++ b/euth/follows/templatetags/follow_tags.py\n@@ -7,8 +7,11 @@\n \n @register.assignment_tag()\n def is_following(user, project):\n- return models.Follow.objects.filter(\n- enabled=True,\n- project=project,\n- creator=user\n- ).exists()\n+ if not user.is_anonymous():\n+ return models.Follow.objects.filter(\n+ enabled=True,\n+ project=project,\n+ creator=user\n+ ).exists()\n+ else:\n+ return False\n", "issue": "when I log out while I am on my profile page I get a 500\n\n", "before_files": [{"content": "from django import template\n\nfrom .. import models\n\nregister = template.Library()\n\n\[email protected]_tag()\ndef is_following(user, project):\n return models.Follow.objects.filter(\n enabled=True,\n project=project,\n creator=user\n ).exists()\n", "path": "euth/follows/templatetags/follow_tags.py"}]} | 650 | 172 |
gh_patches_debug_10257 | rasdani/github-patches | git_diff | getsentry__sentry-48159 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to disable issue alert notifications for specific projects on old sentry accounts.
### Environment
SaaS (https://sentry.io/)
### Version
_No response_
### Link
_No response_
### DSN
_No response_
### Steps to Reproduce
In old accounts the default option for Fine tuning issue alert notifications was `--`, which no longer exists. (User Settings -> Notifications -> Issue alert notifications -> Fine Tune).
It seems to have been replaced with "Default" and in cases a user wants to disable the notifications for a project that currently has `--` selected, the changes to "Off" is not saved and notifications continue to be sent.
There is no workaround as turning issue alerts notifications to OFF completely disable the notifications for all projects in all organisation, not allowing to enable for projects that man wants to receive notifications from.
As a side note: This design is not optimal as having "On" and "Default" is redundant -> you can only set default to On, and when it is off there are no options available (Default(Off) does not exist).
There is a recoding available in this [internal ticket (contains personal information)](https://sentry.zendesk.com/agent/tickets/84240)
### Expected Result
Changing the notification from `--` to `Off` saves and stop sending emails.
### Actual Result
The change is not saved.
</issue>
<code>
[start of src/sentry/incidents/endpoints/bases.py]
1 from rest_framework.exceptions import PermissionDenied
2 from rest_framework.request import Request
3
4 from sentry import features
5 from sentry.api.bases.organization import OrganizationAlertRulePermission, OrganizationEndpoint
6 from sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint
7 from sentry.api.exceptions import ResourceDoesNotExist
8 from sentry.incidents.models import AlertRule, AlertRuleTrigger, AlertRuleTriggerAction
9
10
11 class ProjectAlertRuleEndpoint(ProjectEndpoint):
12 permission_classes = (ProjectAlertRulePermission,)
13
14 def convert_args(self, request: Request, alert_rule_id, *args, **kwargs):
15 args, kwargs = super().convert_args(request, *args, **kwargs)
16 project = kwargs["project"]
17
18 if not features.has("organizations:incidents", project.organization, actor=request.user):
19 raise ResourceDoesNotExist
20
21 if not request.access.has_project_access(project):
22 raise PermissionDenied
23
24 try:
25 kwargs["alert_rule"] = AlertRule.objects.get(
26 snuba_query__subscriptions__project=project, id=alert_rule_id
27 )
28 except AlertRule.DoesNotExist:
29 raise ResourceDoesNotExist
30
31 return args, kwargs
32
33
34 class OrganizationAlertRuleEndpoint(OrganizationEndpoint):
35 permission_classes = (OrganizationAlertRulePermission,)
36
37 def convert_args(self, request: Request, alert_rule_id, *args, **kwargs):
38 args, kwargs = super().convert_args(request, *args, **kwargs)
39 organization = kwargs["organization"]
40
41 # Allow orgs that have downgraded plans to delete metric alerts
42 if request.method != "DELETE" and not features.has(
43 "organizations:incidents", organization, actor=request.user
44 ):
45 raise ResourceDoesNotExist
46
47 try:
48 kwargs["alert_rule"] = AlertRule.objects.get(
49 organization=organization, id=alert_rule_id
50 )
51 except AlertRule.DoesNotExist:
52 raise ResourceDoesNotExist
53
54 return args, kwargs
55
56
57 class OrganizationAlertRuleTriggerEndpoint(OrganizationAlertRuleEndpoint):
58 def convert_args(self, request: Request, alert_rule_trigger_id, *args, **kwargs):
59 args, kwargs = super().convert_args(request, *args, **kwargs)
60 organization = kwargs["organization"]
61 alert_rule = kwargs["alert_rule"]
62
63 if not features.has("organizations:incidents", organization, actor=request.user):
64 raise ResourceDoesNotExist
65
66 try:
67 kwargs["alert_rule_trigger"] = AlertRuleTrigger.objects.get(
68 alert_rule=alert_rule, id=alert_rule_trigger_id
69 )
70 except AlertRuleTrigger.DoesNotExist:
71 raise ResourceDoesNotExist
72
73 return args, kwargs
74
75
76 class OrganizationAlertRuleTriggerActionEndpoint(OrganizationAlertRuleTriggerEndpoint):
77 def convert_args(self, request: Request, alert_rule_trigger_action_id, *args, **kwargs):
78 args, kwargs = super().convert_args(request, *args, **kwargs)
79 organization = kwargs["organization"]
80 trigger = kwargs["alert_rule_trigger"]
81
82 if not features.has("organizations:incidents", organization, actor=request.user):
83 raise ResourceDoesNotExist
84
85 try:
86 kwargs["alert_rule_trigger_action"] = AlertRuleTriggerAction.objects.get(
87 alert_rule_trigger=trigger, id=alert_rule_trigger_action_id
88 )
89 except AlertRuleTriggerAction.DoesNotExist:
90 raise ResourceDoesNotExist
91
92 return args, kwargs
93
[end of src/sentry/incidents/endpoints/bases.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/incidents/endpoints/bases.py b/src/sentry/incidents/endpoints/bases.py
--- a/src/sentry/incidents/endpoints/bases.py
+++ b/src/sentry/incidents/endpoints/bases.py
@@ -15,7 +15,10 @@
args, kwargs = super().convert_args(request, *args, **kwargs)
project = kwargs["project"]
- if not features.has("organizations:incidents", project.organization, actor=request.user):
+ # Allow orgs that have downgraded plans to delete metric alerts
+ if request.method != "DELETE" and not features.has(
+ "organizations:incidents", project.organization, actor=request.user
+ ):
raise ResourceDoesNotExist
if not request.access.has_project_access(project):
| {"golden_diff": "diff --git a/src/sentry/incidents/endpoints/bases.py b/src/sentry/incidents/endpoints/bases.py\n--- a/src/sentry/incidents/endpoints/bases.py\n+++ b/src/sentry/incidents/endpoints/bases.py\n@@ -15,7 +15,10 @@\n args, kwargs = super().convert_args(request, *args, **kwargs)\n project = kwargs[\"project\"]\n \n- if not features.has(\"organizations:incidents\", project.organization, actor=request.user):\n+ # Allow orgs that have downgraded plans to delete metric alerts\n+ if request.method != \"DELETE\" and not features.has(\n+ \"organizations:incidents\", project.organization, actor=request.user\n+ ):\n raise ResourceDoesNotExist\n \n if not request.access.has_project_access(project):\n", "issue": "Unable to disable issue alert notifications for specific projects on old sentry accounts.\n### Environment\n\nSaaS (https://sentry.io/)\n\n### Version\n\n_No response_\n\n### Link\n\n_No response_\n\n### DSN\n\n_No response_\n\n### Steps to Reproduce\n\nIn old accounts the default option for Fine tuning issue alert notifications was `--`, which no longer exists. (User Settings -> Notifications -> Issue alert notifications -> Fine Tune).\r\n\r\nIt seems to have been replaced with \"Default\" and in cases a user wants to disable the notifications for a project that currently has `--` selected, the changes to \"Off\" is not saved and notifications continue to be sent. \r\n\r\nThere is no workaround as turning issue alerts notifications to OFF completely disable the notifications for all projects in all organisation, not allowing to enable for projects that man wants to receive notifications from. \r\n\r\nAs a side note: This design is not optimal as having \"On\" and \"Default\" is redundant -> you can only set default to On, and when it is off there are no options available (Default(Off) does not exist). \r\n\r\nThere is a recoding available in this [internal ticket (contains personal information)](https://sentry.zendesk.com/agent/tickets/84240)\n\n### Expected Result\n\nChanging the notification from `--` to `Off` saves and stop sending emails.\n\n### Actual Result\n\nThe change is not saved.\n", "before_files": [{"content": "from rest_framework.exceptions import PermissionDenied\nfrom rest_framework.request import Request\n\nfrom sentry import features\nfrom sentry.api.bases.organization import OrganizationAlertRulePermission, OrganizationEndpoint\nfrom sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.incidents.models import AlertRule, AlertRuleTrigger, AlertRuleTriggerAction\n\n\nclass ProjectAlertRuleEndpoint(ProjectEndpoint):\n permission_classes = (ProjectAlertRulePermission,)\n\n def convert_args(self, request: Request, alert_rule_id, *args, **kwargs):\n args, kwargs = super().convert_args(request, *args, **kwargs)\n project = kwargs[\"project\"]\n\n if not features.has(\"organizations:incidents\", project.organization, actor=request.user):\n raise ResourceDoesNotExist\n\n if not request.access.has_project_access(project):\n raise PermissionDenied\n\n try:\n kwargs[\"alert_rule\"] = AlertRule.objects.get(\n snuba_query__subscriptions__project=project, id=alert_rule_id\n )\n except AlertRule.DoesNotExist:\n raise ResourceDoesNotExist\n\n return args, kwargs\n\n\nclass OrganizationAlertRuleEndpoint(OrganizationEndpoint):\n permission_classes = (OrganizationAlertRulePermission,)\n\n def convert_args(self, request: Request, alert_rule_id, *args, **kwargs):\n args, kwargs = super().convert_args(request, *args, **kwargs)\n organization = kwargs[\"organization\"]\n\n # Allow orgs that have downgraded plans to delete metric alerts\n if request.method != \"DELETE\" and not features.has(\n \"organizations:incidents\", organization, actor=request.user\n ):\n raise ResourceDoesNotExist\n\n try:\n kwargs[\"alert_rule\"] = AlertRule.objects.get(\n organization=organization, id=alert_rule_id\n )\n except AlertRule.DoesNotExist:\n raise ResourceDoesNotExist\n\n return args, kwargs\n\n\nclass OrganizationAlertRuleTriggerEndpoint(OrganizationAlertRuleEndpoint):\n def convert_args(self, request: Request, alert_rule_trigger_id, *args, **kwargs):\n args, kwargs = super().convert_args(request, *args, **kwargs)\n organization = kwargs[\"organization\"]\n alert_rule = kwargs[\"alert_rule\"]\n\n if not features.has(\"organizations:incidents\", organization, actor=request.user):\n raise ResourceDoesNotExist\n\n try:\n kwargs[\"alert_rule_trigger\"] = AlertRuleTrigger.objects.get(\n alert_rule=alert_rule, id=alert_rule_trigger_id\n )\n except AlertRuleTrigger.DoesNotExist:\n raise ResourceDoesNotExist\n\n return args, kwargs\n\n\nclass OrganizationAlertRuleTriggerActionEndpoint(OrganizationAlertRuleTriggerEndpoint):\n def convert_args(self, request: Request, alert_rule_trigger_action_id, *args, **kwargs):\n args, kwargs = super().convert_args(request, *args, **kwargs)\n organization = kwargs[\"organization\"]\n trigger = kwargs[\"alert_rule_trigger\"]\n\n if not features.has(\"organizations:incidents\", organization, actor=request.user):\n raise ResourceDoesNotExist\n\n try:\n kwargs[\"alert_rule_trigger_action\"] = AlertRuleTriggerAction.objects.get(\n alert_rule_trigger=trigger, id=alert_rule_trigger_action_id\n )\n except AlertRuleTriggerAction.DoesNotExist:\n raise ResourceDoesNotExist\n\n return args, kwargs\n", "path": "src/sentry/incidents/endpoints/bases.py"}]} | 1,719 | 174 |
gh_patches_debug_5127 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1064 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Duplicate email to algorithm user that is also an editor when job fails
A tiny improvement request: When an algorithm job fails, the editors and the job creator are emailed (#1018), but there is apparently no check whether the job creator is one of the algorithm editor and would receive an email anyway - I was testing a new algorithm and always received two emails when a job failed.
</issue>
<code>
[start of app/grandchallenge/algorithms/emails.py]
1 from django.conf import settings
2 from django.contrib.sites.models import Site
3 from django.core.mail import send_mail
4
5 from grandchallenge.core.utils.email import send_templated_email
6 from grandchallenge.evaluation.templatetags.evaluation_extras import user_error
7
8
9 def send_permission_request_email(obj):
10 """
11 Emails the editors that someone has requested to view an algorithm.
12
13 Parameters
14 ----------
15 obj:
16 AlgorithmPermissionRequest object containing info on which
17 user requested access to which algorithm.
18 """
19 title = f"[{obj.algorithm.title}] New access request"
20 kwargs = {
21 "user": obj.user,
22 "site": Site.objects.get_current(),
23 "algorithm": obj.algorithm,
24 }
25 for editor in obj.algorithm.editors_group.user_set.all():
26 kwargs["editor"] = editor
27 send_templated_email(
28 title,
29 "algorithms/emails/access_request.html",
30 kwargs,
31 [editor.email],
32 )
33
34
35 def send_permission_granted_email(obj):
36 """
37 Emails the requester that their request has been approved.
38
39 Parameters
40 ----------
41 obj:
42 AlgorithmPermissionRequest object containing info on which
43 user requested access to which algorithm.
44 """
45 title = f"[{obj.algorithm.title}] Access granted"
46 kwargs = {
47 "user": obj.user,
48 "site": Site.objects.get_current(),
49 "algorithm": obj.algorithm,
50 }
51 send_templated_email(
52 title,
53 "algorithms/emails/access_granted.html",
54 kwargs,
55 [obj.user.email],
56 )
57
58
59 def send_permission_denied_email(obj):
60 """
61 Emails the requester that their request has been approved.
62
63 Parameters
64 ----------
65 obj:
66 AlgorithmPermissionRequest object containing info on which
67 user requested access to which algorithm and optionally the
68 reason for rejection.
69 """
70 title = f"[{obj.algorithm.title}] Access denied"
71 kwargs = {
72 "user": obj.user,
73 "site": Site.objects.get_current(),
74 "algorithm": obj.algorithm,
75 "permission_request": obj,
76 }
77 send_templated_email(
78 title,
79 "algorithms/emails/access_denied.html",
80 kwargs,
81 [obj.user.email],
82 )
83
84
85 def send_failed_job_email(job):
86 algorithm = job.algorithm_image.algorithm
87 message = (
88 f"Unfortunately your job for algorithm "
89 f"'{algorithm.title}' failed with an error. "
90 f"The error message is:\n\n"
91 f"{user_error(job.output)}\n\n"
92 f"You may wish to try and correct this, or contact the challenge "
93 f"organizers. The following information may help them:\n"
94 f"User: {job.creator.username}\n"
95 f"Job ID: {job.pk}\n"
96 f"Submission ID: {job.pk}"
97 )
98 recipient_emails = [
99 o.email for o in algorithm.editors_group.user_set.all()
100 ]
101 recipient_emails.append(job.creator.email)
102 for email in recipient_emails:
103 send_mail(
104 subject=(
105 f"[{Site.objects.get_current().domain.lower()}] "
106 f"[{algorithm.title.lower()}] "
107 f"Job Failed"
108 ),
109 message=message,
110 from_email=settings.DEFAULT_FROM_EMAIL,
111 recipient_list=[email],
112 )
113
[end of app/grandchallenge/algorithms/emails.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/algorithms/emails.py b/app/grandchallenge/algorithms/emails.py
--- a/app/grandchallenge/algorithms/emails.py
+++ b/app/grandchallenge/algorithms/emails.py
@@ -99,7 +99,8 @@
o.email for o in algorithm.editors_group.user_set.all()
]
recipient_emails.append(job.creator.email)
- for email in recipient_emails:
+
+ for email in {*recipient_emails}:
send_mail(
subject=(
f"[{Site.objects.get_current().domain.lower()}] "
| {"golden_diff": "diff --git a/app/grandchallenge/algorithms/emails.py b/app/grandchallenge/algorithms/emails.py\n--- a/app/grandchallenge/algorithms/emails.py\n+++ b/app/grandchallenge/algorithms/emails.py\n@@ -99,7 +99,8 @@\n o.email for o in algorithm.editors_group.user_set.all()\n ]\n recipient_emails.append(job.creator.email)\n- for email in recipient_emails:\n+\n+ for email in {*recipient_emails}:\n send_mail(\n subject=(\n f\"[{Site.objects.get_current().domain.lower()}] \"\n", "issue": "Duplicate email to algorithm user that is also an editor when job fails\nA tiny improvement request: When an algorithm job fails, the editors and the job creator are emailed (#1018), but there is apparently no check whether the job creator is one of the algorithm editor and would receive an email anyway - I was testing a new algorithm and always received two emails when a job failed.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.core.mail import send_mail\n\nfrom grandchallenge.core.utils.email import send_templated_email\nfrom grandchallenge.evaluation.templatetags.evaluation_extras import user_error\n\n\ndef send_permission_request_email(obj):\n \"\"\"\n Emails the editors that someone has requested to view an algorithm.\n\n Parameters\n ----------\n obj:\n AlgorithmPermissionRequest object containing info on which\n user requested access to which algorithm.\n \"\"\"\n title = f\"[{obj.algorithm.title}] New access request\"\n kwargs = {\n \"user\": obj.user,\n \"site\": Site.objects.get_current(),\n \"algorithm\": obj.algorithm,\n }\n for editor in obj.algorithm.editors_group.user_set.all():\n kwargs[\"editor\"] = editor\n send_templated_email(\n title,\n \"algorithms/emails/access_request.html\",\n kwargs,\n [editor.email],\n )\n\n\ndef send_permission_granted_email(obj):\n \"\"\"\n Emails the requester that their request has been approved.\n\n Parameters\n ----------\n obj:\n AlgorithmPermissionRequest object containing info on which\n user requested access to which algorithm.\n \"\"\"\n title = f\"[{obj.algorithm.title}] Access granted\"\n kwargs = {\n \"user\": obj.user,\n \"site\": Site.objects.get_current(),\n \"algorithm\": obj.algorithm,\n }\n send_templated_email(\n title,\n \"algorithms/emails/access_granted.html\",\n kwargs,\n [obj.user.email],\n )\n\n\ndef send_permission_denied_email(obj):\n \"\"\"\n Emails the requester that their request has been approved.\n\n Parameters\n ----------\n obj:\n AlgorithmPermissionRequest object containing info on which\n user requested access to which algorithm and optionally the\n reason for rejection.\n \"\"\"\n title = f\"[{obj.algorithm.title}] Access denied\"\n kwargs = {\n \"user\": obj.user,\n \"site\": Site.objects.get_current(),\n \"algorithm\": obj.algorithm,\n \"permission_request\": obj,\n }\n send_templated_email(\n title,\n \"algorithms/emails/access_denied.html\",\n kwargs,\n [obj.user.email],\n )\n\n\ndef send_failed_job_email(job):\n algorithm = job.algorithm_image.algorithm\n message = (\n f\"Unfortunately your job for algorithm \"\n f\"'{algorithm.title}' failed with an error. \"\n f\"The error message is:\\n\\n\"\n f\"{user_error(job.output)}\\n\\n\"\n f\"You may wish to try and correct this, or contact the challenge \"\n f\"organizers. The following information may help them:\\n\"\n f\"User: {job.creator.username}\\n\"\n f\"Job ID: {job.pk}\\n\"\n f\"Submission ID: {job.pk}\"\n )\n recipient_emails = [\n o.email for o in algorithm.editors_group.user_set.all()\n ]\n recipient_emails.append(job.creator.email)\n for email in recipient_emails:\n send_mail(\n subject=(\n f\"[{Site.objects.get_current().domain.lower()}] \"\n f\"[{algorithm.title.lower()}] \"\n f\"Job Failed\"\n ),\n message=message,\n from_email=settings.DEFAULT_FROM_EMAIL,\n recipient_list=[email],\n )\n", "path": "app/grandchallenge/algorithms/emails.py"}]} | 1,549 | 125 |
gh_patches_debug_23994 | rasdani/github-patches | git_diff | jupyter__docker-stacks-2074 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Healthcheck fails when using a custom runtime dir
### What docker image(s) are you using?
scipy-notebook (but applies to all images based on the `base-notebook` image)
### Host OS system
RHEL 8.0
### Host architecture
x86_64
### What Docker command are you running?
The following command DOES work as expected (default runtime dir):
```
docker run --rm -p 8888:8888 --name jupyter quay.io/jupyter/scipy-notebook:2023-12-25 start-notebook.sh
```
The following command does NOT work as expected (customized runtime dir):
```
docker run --rm -p 8888:8888 --name jupyter -e JUPYTER_RUNTIME_DIR=/home/jovyan/custom-runtime quay.io/jupyter/scipy-notebook:2023-12-25 start-notebook.sh
```
### How to Reproduce the problem?
1. Start the Jupyter container using the commands above.
2. In another terminal, run the healtcheck script: `docker exec jupyter /etc/jupyter/docker_healthcheck.py`
3. Observe the healthcheck script failing due to server state JSON file(s) not being found.
### Command output
```bash session
$ docker run --rm -p 8888:8888 --name jupyter quay.io/jupyter/scipy-notebook:2023-12-25 start-notebook.sh
$ docker exec jupyter /etc/jupyter/docker_healthcheck.py
b'{"version": "2.12.1"}'
$ docker run --rm -p 8888:8888 --name jupyter -e JUPYTER_RUNTIME_DIR=/home/jovyan/custom-runtime quay.io/jupyter/scipy-notebook:2023-12-25 start-notebook.sh
$ docker exec jupyter /etc/jupyter/docker_healthcheck.py
Traceback (most recent call last):
File "/etc/jupyter/docker_healthcheck.py", line 14, in <module>
json_file = next(runtime_dir.glob("*server-*.json"))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
StopIteration
```
### Expected behavior
Healthcheck script to not fail, e.g. to display `b'{"version": "2.12.1"}'`, even with a customized runtime dir.
### Actual behavior
The healthcheck script fails because it cannot find server state JSON files in the hard-coded default runtime dir.
### Anything else?
The problem is that the `/etc/jupyter/docker_healthcheck.py` healtcheck script hard-codes the default runtime directory to search for server JSON state files as below:
https://github.com/jupyter/docker-stacks/blob/fcb20a914ed20e44a96053caf43eef6e12fb4c04/images/base-notebook/docker_healthcheck.py#L13
When this directory is customized for example via `JUPYTER_RUNTIME_DIR`, then the healthcheck script does not work.
The actual problem is when deploying Jupyter containers as services.
The Jupyter images have a default healthcheck configured as below:
https://github.com/jupyter/docker-stacks/blob/fcb20a914ed20e44a96053caf43eef6e12fb4c04/images/base-notebook/Dockerfile#L66-L70
When the healthcheck fails due to a custom runtime dir, the service is restarted continuously.
I think the healthcheck script should use the output of `jupyter --runtime-dir` which respects customizations:
```
$ docker run --rm -e JUPYTER_RUNTIME_DIR=/home/jovyan/custom-runtime quay.io/jupyter/scipy-notebook:2023-12-25 jupyter --runtime-dir
/home/jovyan/custom-runtime
```
If you agree with the above, I can send a PR with this fix.
### Latest Docker version
- [X] I've updated my Docker version to the latest available, and the issue persists
</issue>
<code>
[start of images/base-notebook/docker_healthcheck.py]
1 #!/usr/bin/env python3
2 # Copyright (c) Jupyter Development Team.
3 # Distributed under the terms of the Modified BSD License.
4 import json
5 import os
6 from pathlib import Path
7
8 import requests
9
10 # Several operations below deliberately don't check for possible errors
11 # As this is a healthcheck, it should succeed or raise an exception on error
12
13 runtime_dir = Path("/home/") / os.environ["NB_USER"] / ".local/share/jupyter/runtime/"
14 json_file = next(runtime_dir.glob("*server-*.json"))
15
16 url = json.loads(json_file.read_bytes())["url"]
17 url = url + "api"
18
19 proxies = {
20 "http": "",
21 "https": "",
22 }
23
24 r = requests.get(url, proxies=proxies, verify=False) # request without SSL verification
25 r.raise_for_status()
26 print(r.content)
27
[end of images/base-notebook/docker_healthcheck.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/images/base-notebook/docker_healthcheck.py b/images/base-notebook/docker_healthcheck.py
--- a/images/base-notebook/docker_healthcheck.py
+++ b/images/base-notebook/docker_healthcheck.py
@@ -3,6 +3,7 @@
# Distributed under the terms of the Modified BSD License.
import json
import os
+import subprocess
from pathlib import Path
import requests
@@ -10,7 +11,19 @@
# Several operations below deliberately don't check for possible errors
# As this is a healthcheck, it should succeed or raise an exception on error
-runtime_dir = Path("/home/") / os.environ["NB_USER"] / ".local/share/jupyter/runtime/"
+# Docker runs healtchecks using an exec
+# It uses the default user configured when running the image: root for the case of a custom NB_USER or jovyan for the case of the default image user.
+# We manually change HOME to make `jupyter --runtime-dir` report a correct path
+# More information: <https://github.com/jupyter/docker-stacks/pull/2074#issuecomment-1879778409>
+result = subprocess.run(
+ ["jupyter", "--runtime-dir"],
+ check=True,
+ capture_output=True,
+ text=True,
+ env=dict(os.environ) | {"HOME": "/home/" + os.environ["NB_USER"]},
+)
+runtime_dir = Path(result.stdout.rstrip())
+
json_file = next(runtime_dir.glob("*server-*.json"))
url = json.loads(json_file.read_bytes())["url"]
| {"golden_diff": "diff --git a/images/base-notebook/docker_healthcheck.py b/images/base-notebook/docker_healthcheck.py\n--- a/images/base-notebook/docker_healthcheck.py\n+++ b/images/base-notebook/docker_healthcheck.py\n@@ -3,6 +3,7 @@\n # Distributed under the terms of the Modified BSD License.\n import json\n import os\n+import subprocess\n from pathlib import Path\n \n import requests\n@@ -10,7 +11,19 @@\n # Several operations below deliberately don't check for possible errors\n # As this is a healthcheck, it should succeed or raise an exception on error\n \n-runtime_dir = Path(\"/home/\") / os.environ[\"NB_USER\"] / \".local/share/jupyter/runtime/\"\n+# Docker runs healtchecks using an exec\n+# It uses the default user configured when running the image: root for the case of a custom NB_USER or jovyan for the case of the default image user.\n+# We manually change HOME to make `jupyter --runtime-dir` report a correct path\n+# More information: <https://github.com/jupyter/docker-stacks/pull/2074#issuecomment-1879778409>\n+result = subprocess.run(\n+ [\"jupyter\", \"--runtime-dir\"],\n+ check=True,\n+ capture_output=True,\n+ text=True,\n+ env=dict(os.environ) | {\"HOME\": \"/home/\" + os.environ[\"NB_USER\"]},\n+)\n+runtime_dir = Path(result.stdout.rstrip())\n+\n json_file = next(runtime_dir.glob(\"*server-*.json\"))\n \n url = json.loads(json_file.read_bytes())[\"url\"]\n", "issue": "[BUG] Healthcheck fails when using a custom runtime dir\n### What docker image(s) are you using?\r\n\r\nscipy-notebook (but applies to all images based on the `base-notebook` image)\r\n\r\n### Host OS system\r\n\r\nRHEL 8.0\r\n\r\n### Host architecture\r\n\r\nx86_64\r\n\r\n### What Docker command are you running?\r\n\r\nThe following command DOES work as expected (default runtime dir):\r\n```\r\ndocker run --rm -p 8888:8888 --name jupyter quay.io/jupyter/scipy-notebook:2023-12-25 start-notebook.sh\r\n```\r\n\r\nThe following command does NOT work as expected (customized runtime dir):\r\n```\r\ndocker run --rm -p 8888:8888 --name jupyter -e JUPYTER_RUNTIME_DIR=/home/jovyan/custom-runtime quay.io/jupyter/scipy-notebook:2023-12-25 start-notebook.sh\r\n```\r\n\r\n### How to Reproduce the problem?\r\n\r\n1. Start the Jupyter container using the commands above.\r\n2. In another terminal, run the healtcheck script: `docker exec jupyter /etc/jupyter/docker_healthcheck.py`\r\n3. Observe the healthcheck script failing due to server state JSON file(s) not being found.\r\n\r\n### Command output\r\n\r\n```bash session\r\n$ docker run --rm -p 8888:8888 --name jupyter quay.io/jupyter/scipy-notebook:2023-12-25 start-notebook.sh\r\n$ docker exec jupyter /etc/jupyter/docker_healthcheck.py\r\nb'{\"version\": \"2.12.1\"}'\r\n\r\n$ docker run --rm -p 8888:8888 --name jupyter -e JUPYTER_RUNTIME_DIR=/home/jovyan/custom-runtime quay.io/jupyter/scipy-notebook:2023-12-25 start-notebook.sh\r\n$ docker exec jupyter /etc/jupyter/docker_healthcheck.py\r\nTraceback (most recent call last):\r\n File \"/etc/jupyter/docker_healthcheck.py\", line 14, in <module>\r\n json_file = next(runtime_dir.glob(\"*server-*.json\"))\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nStopIteration\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\nHealthcheck script to not fail, e.g. to display `b'{\"version\": \"2.12.1\"}'`, even with a customized runtime dir.\r\n\r\n### Actual behavior\r\n\r\nThe healthcheck script fails because it cannot find server state JSON files in the hard-coded default runtime dir.\r\n\r\n### Anything else?\r\n\r\nThe problem is that the `/etc/jupyter/docker_healthcheck.py` healtcheck script hard-codes the default runtime directory to search for server JSON state files as below:\r\nhttps://github.com/jupyter/docker-stacks/blob/fcb20a914ed20e44a96053caf43eef6e12fb4c04/images/base-notebook/docker_healthcheck.py#L13\r\n\r\nWhen this directory is customized for example via `JUPYTER_RUNTIME_DIR`, then the healthcheck script does not work.\r\n\r\nThe actual problem is when deploying Jupyter containers as services.\r\nThe Jupyter images have a default healthcheck configured as below:\r\nhttps://github.com/jupyter/docker-stacks/blob/fcb20a914ed20e44a96053caf43eef6e12fb4c04/images/base-notebook/Dockerfile#L66-L70\r\n\r\nWhen the healthcheck fails due to a custom runtime dir, the service is restarted continuously.\r\n\r\nI think the healthcheck script should use the output of `jupyter --runtime-dir` which respects customizations:\r\n```\r\n$ docker run --rm -e JUPYTER_RUNTIME_DIR=/home/jovyan/custom-runtime quay.io/jupyter/scipy-notebook:2023-12-25 jupyter --runtime-dir\r\n/home/jovyan/custom-runtime\r\n```\r\n\r\nIf you agree with the above, I can send a PR with this fix.\r\n\r\n### Latest Docker version\r\n\r\n- [X] I've updated my Docker version to the latest available, and the issue persists\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nimport json\nimport os\nfrom pathlib import Path\n\nimport requests\n\n# Several operations below deliberately don't check for possible errors\n# As this is a healthcheck, it should succeed or raise an exception on error\n\nruntime_dir = Path(\"/home/\") / os.environ[\"NB_USER\"] / \".local/share/jupyter/runtime/\"\njson_file = next(runtime_dir.glob(\"*server-*.json\"))\n\nurl = json.loads(json_file.read_bytes())[\"url\"]\nurl = url + \"api\"\n\nproxies = {\n \"http\": \"\",\n \"https\": \"\",\n}\n\nr = requests.get(url, proxies=proxies, verify=False) # request without SSL verification\nr.raise_for_status()\nprint(r.content)\n", "path": "images/base-notebook/docker_healthcheck.py"}]} | 1,675 | 339 |
gh_patches_debug_4691 | rasdani/github-patches | git_diff | mindsdb__lightwood-608 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
:wrench: Colored terminal output
## Task
Modify the lightwood [logger class](https://github.com/mindsdb/lightwood/blob/stable/lightwood/helpers/log.py) so that its output is colored, depending on the log level. Color scheme is not set in stone, but should be something that makes sense, e.g.: red for `CRITICAL`, orange for `ERROR`, yellow for `WARNING`, green for `INFO`, uncolored for `DEBUG`.
## Steps :male_detective: :female_detective:
- Fork the Lightwood repository, checkout the `staging` branch and from it create a new one.
- Implement the necessary changes. The package to achieve colored logging can be chosen as part of the PR, but we suggest using lightweight alternatives like [colored](https://pypi.org/project/colored/) or [termcolor](https://pypi.org/project/termcolor/).
- Check the logs are using the new color scheme. For this, you can run any of the integration tests, like [`test_boston_housing`](https://github.com/mindsdb/lightwood/blob/stable/tests/integration/basic/test_boston_housing.py), and analyze the output.
- Make the PR and address any comments that reviewers might make.
## Additional rewards :1st_place_medal:
Each documentation PR brings :one: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/
</issue>
<code>
[start of lightwood/helpers/log.py]
1 import logging
2 import os
3
4
5 def initialize_log():
6 pid = os.getpid()
7 logging.basicConfig()
8 log = logging.getLogger(f'lightwood-{pid}')
9 log.setLevel(logging.DEBUG)
10 return log
11
12
13 log = initialize_log()
14
[end of lightwood/helpers/log.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightwood/helpers/log.py b/lightwood/helpers/log.py
--- a/lightwood/helpers/log.py
+++ b/lightwood/helpers/log.py
@@ -1,10 +1,15 @@
import logging
import os
+import colorlog
def initialize_log():
pid = os.getpid()
- logging.basicConfig()
+
+ handler = colorlog.StreamHandler()
+ handler.setFormatter(colorlog.ColoredFormatter())
+
+ logging.basicConfig(handlers=[handler])
log = logging.getLogger(f'lightwood-{pid}')
log.setLevel(logging.DEBUG)
return log
| {"golden_diff": "diff --git a/lightwood/helpers/log.py b/lightwood/helpers/log.py\n--- a/lightwood/helpers/log.py\n+++ b/lightwood/helpers/log.py\n@@ -1,10 +1,15 @@\n import logging\n import os\n+import colorlog\n \n \n def initialize_log():\n pid = os.getpid()\n- logging.basicConfig()\n+\n+ handler = colorlog.StreamHandler()\n+ handler.setFormatter(colorlog.ColoredFormatter())\n+\n+ logging.basicConfig(handlers=[handler])\n log = logging.getLogger(f'lightwood-{pid}')\n log.setLevel(logging.DEBUG)\n return log\n", "issue": ":wrench: Colored terminal output\n## Task\r\n\r\nModify the lightwood [logger class](https://github.com/mindsdb/lightwood/blob/stable/lightwood/helpers/log.py) so that its output is colored, depending on the log level. Color scheme is not set in stone, but should be something that makes sense, e.g.: red for `CRITICAL`, orange for `ERROR`, yellow for `WARNING`, green for `INFO`, uncolored for `DEBUG`.\r\n\r\n## Steps :male_detective: :female_detective: \r\n\r\n- Fork the Lightwood repository, checkout the `staging` branch and from it create a new one.\r\n- Implement the necessary changes. The package to achieve colored logging can be chosen as part of the PR, but we suggest using lightweight alternatives like [colored](https://pypi.org/project/colored/) or [termcolor](https://pypi.org/project/termcolor/).\r\n- Check the logs are using the new color scheme. For this, you can run any of the integration tests, like [`test_boston_housing`](https://github.com/mindsdb/lightwood/blob/stable/tests/integration/basic/test_boston_housing.py), and analyze the output.\r\n- Make the PR and address any comments that reviewers might make.\r\n\r\n## Additional rewards :1st_place_medal: \r\n\r\nEach documentation PR brings :one: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/\n", "before_files": [{"content": "import logging\nimport os\n\n\ndef initialize_log():\n pid = os.getpid()\n logging.basicConfig()\n log = logging.getLogger(f'lightwood-{pid}')\n log.setLevel(logging.DEBUG)\n return log\n\n\nlog = initialize_log()\n", "path": "lightwood/helpers/log.py"}]} | 946 | 126 |
gh_patches_debug_5795 | rasdani/github-patches | git_diff | sunpy__sunpy-7148 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fido query for GBM data only matches files with v00.pha and misses those that have been version updated (e.g. v01.pha)
### Describe the bug
The way the source client for GBM is currently implemented, scraper only looks for files ending with `v00.pha`, and hence wont return data if its instead `v01.pha`
I'll make a PR now
### To Reproduce
```python
>>> from sunpy.net import Fido, attrs as a
>>> res_gbm = Fido.search(a.Time("2014-10-14 00:00", "2014-10-15"), a.Instrument.gbm, a.Resolution.cspec, a.Detector("n5"))
Results from 1 Provider:
1 Results from the GBMClient:
Source: https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/daily
Start Time End Time Instrument Physobs Source Provider Resolution Detector
----------------------- ----------------------- ---------- ------- ------ -------- ---------- --------
2014-10-15 00:00:00.000 2014-10-15 23:59:59.999 GBM flux FERMI NASA cspec n5
```
however data exists for the 14th but not returned here.
</issue>
<code>
[start of sunpy/net/dataretriever/sources/fermi_gbm.py]
1 from sunpy.net.dataretriever import GenericClient
2
3 __all__ = ['GBMClient']
4
5
6 class GBMClient(GenericClient):
7 """
8 Provides access to data from the Gamma-Ray Burst Monitor (GBM) instrument
9 on board the Fermi satellite.
10
11 Although GBMs primary objective is to detect gamma-ray bursts,
12 it provides high quality high energy solar flare observations.
13
14 The instrument consists of 12 Sodium Iodide (NaI) scintillation
15 detectors, which are sensitive to an energy range of 4keV to 1MeV.
16 At any one time, 6 of the NaI detectors are Sunward facing.
17 The detectors are numbered 'n1' to 'n11'. This client supports the user
18 to choose which detector to use through the `a.Detector <sunpy.net.attrs.Detector>` attribute.
19 The default detector is 'n5'.
20
21 The GBM data comes in daily version files in two formats:
22
23 * CSPEC - counts accumulated every 4.096 seconds in 128 energy channels for each detector.
24 * CTIME - counts accumulated every 0.256 seconds in 8 energy channels
25
26 Both of which can be accessed through the attrs `a.Resolution <sunpy.net.attrs.Resolution>`.
27 The default data type is CSPEC unless the user defines.
28
29 Examples
30 --------
31 >>> from sunpy.net import Fido, attrs as a
32 >>> res = Fido.search(a.Time('2015-06-21 00:00', '2015-06-23 23:59'),
33 ... a.Instrument.gbm, a.Detector.n3,
34 ... a.Resolution.ctime) # doctest: +REMOTE_DATA
35 >>> res # doctest: +REMOTE_DATA
36 <sunpy.net.fido_factory.UnifiedResponse object at ...>
37 Results from 1 Provider:
38 <BLANKLINE>
39 3 Results from the GBMClient:
40 Source: https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/daily
41 <BLANKLINE>
42 Start Time End Time ... Resolution Detector
43 ----------------------- ----------------------- ... ---------- --------
44 2015-06-21 00:00:00.000 2015-06-21 23:59:59.999 ... ctime n3
45 2015-06-22 00:00:00.000 2015-06-22 23:59:59.999 ... ctime n3
46 2015-06-23 00:00:00.000 2015-06-23 23:59:59.999 ... ctime n3
47 <BLANKLINE>
48 <BLANKLINE>
49
50 """
51 baseurl = r'https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/daily/%Y/%m/%d/current/glg_(\w){5}_(\w){2}_%y%m%d_v00.pha'
52 pattern = '{}/daily/{year:4d}/{month:2d}/{day:2d}/current/glg_{Resolution:5}_{Detector:2}_{:6d}{}'
53
54 @property
55 def info_url(self):
56 return 'https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/daily'
57
58 @classmethod
59 def register_values(cls):
60 from sunpy.net import attrs
61 adict = {attrs.Instrument: [('GBM', 'Gamma-Ray Burst Monitor on board the Fermi satellite.')],
62 attrs.Physobs: [('flux', 'a measure of the amount of radiation received by an object from a given source.')],
63 attrs.Source: [('FERMI', 'The Fermi Gamma-ray Space Telescope.')],
64 attrs.Provider: [('NASA', 'The National Aeronautics and Space Administration.')],
65 attrs.Resolution: [
66 ("cspec", "CSPEC 128 channel spectra every 4.096 seconds."),
67 ("ctime", "CTIME provides 8 channel spectra every 0.256 seconds.")],
68 attrs.Detector: [(f"n{x}", f"GBM Detector short name for the detector NAI_{x:02}") for x in range(12)]}
69 return adict
70
[end of sunpy/net/dataretriever/sources/fermi_gbm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sunpy/net/dataretriever/sources/fermi_gbm.py b/sunpy/net/dataretriever/sources/fermi_gbm.py
--- a/sunpy/net/dataretriever/sources/fermi_gbm.py
+++ b/sunpy/net/dataretriever/sources/fermi_gbm.py
@@ -48,7 +48,8 @@
<BLANKLINE>
"""
- baseurl = r'https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/daily/%Y/%m/%d/current/glg_(\w){5}_(\w){2}_%y%m%d_v00.pha'
+
+ baseurl = r'https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/daily/%Y/%m/%d/current/glg_(\w){5}_(\w){2}_%y%m%d_.*\.pha'
pattern = '{}/daily/{year:4d}/{month:2d}/{day:2d}/current/glg_{Resolution:5}_{Detector:2}_{:6d}{}'
@property
| {"golden_diff": "diff --git a/sunpy/net/dataretriever/sources/fermi_gbm.py b/sunpy/net/dataretriever/sources/fermi_gbm.py\n--- a/sunpy/net/dataretriever/sources/fermi_gbm.py\n+++ b/sunpy/net/dataretriever/sources/fermi_gbm.py\n@@ -48,7 +48,8 @@\n <BLANKLINE>\n \n \"\"\"\n- baseurl = r'https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/daily/%Y/%m/%d/current/glg_(\\w){5}_(\\w){2}_%y%m%d_v00.pha'\n+\n+ baseurl = r'https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/daily/%Y/%m/%d/current/glg_(\\w){5}_(\\w){2}_%y%m%d_.*\\.pha'\n pattern = '{}/daily/{year:4d}/{month:2d}/{day:2d}/current/glg_{Resolution:5}_{Detector:2}_{:6d}{}'\n \n @property\n", "issue": "Fido query for GBM data only matches files with v00.pha and misses those that have been version updated (e.g. v01.pha)\n### Describe the bug\r\n\r\nThe way the source client for GBM is currently implemented, scraper only looks for files ending with `v00.pha`, and hence wont return data if its instead `v01.pha`\r\n\r\nI'll make a PR now\r\n\r\n\r\n\r\n### To Reproduce\r\n\r\n```python\r\n>>> from sunpy.net import Fido, attrs as a\r\n>>> res_gbm = Fido.search(a.Time(\"2014-10-14 00:00\", \"2014-10-15\"), a.Instrument.gbm, a.Resolution.cspec, a.Detector(\"n5\"))\r\nResults from 1 Provider:\r\n\r\n1 Results from the GBMClient:\r\nSource: https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/daily\r\n\r\n Start Time End Time Instrument Physobs Source Provider Resolution Detector\r\n----------------------- ----------------------- ---------- ------- ------ -------- ---------- --------\r\n2014-10-15 00:00:00.000 2014-10-15 23:59:59.999 GBM flux FERMI NASA cspec n5\r\n```\r\nhowever data exists for the 14th but not returned here.\r\n\r\n\n", "before_files": [{"content": "from sunpy.net.dataretriever import GenericClient\n\n__all__ = ['GBMClient']\n\n\nclass GBMClient(GenericClient):\n \"\"\"\n Provides access to data from the Gamma-Ray Burst Monitor (GBM) instrument\n on board the Fermi satellite.\n\n Although GBMs primary objective is to detect gamma-ray bursts,\n it provides high quality high energy solar flare observations.\n\n The instrument consists of 12 Sodium Iodide (NaI) scintillation\n detectors, which are sensitive to an energy range of 4keV to 1MeV.\n At any one time, 6 of the NaI detectors are Sunward facing.\n The detectors are numbered 'n1' to 'n11'. This client supports the user\n to choose which detector to use through the `a.Detector <sunpy.net.attrs.Detector>` attribute.\n The default detector is 'n5'.\n\n The GBM data comes in daily version files in two formats:\n\n * CSPEC - counts accumulated every 4.096 seconds in 128 energy channels for each detector.\n * CTIME - counts accumulated every 0.256 seconds in 8 energy channels\n\n Both of which can be accessed through the attrs `a.Resolution <sunpy.net.attrs.Resolution>`.\n The default data type is CSPEC unless the user defines.\n\n Examples\n --------\n >>> from sunpy.net import Fido, attrs as a\n >>> res = Fido.search(a.Time('2015-06-21 00:00', '2015-06-23 23:59'),\n ... a.Instrument.gbm, a.Detector.n3,\n ... a.Resolution.ctime) # doctest: +REMOTE_DATA\n >>> res # doctest: +REMOTE_DATA\n <sunpy.net.fido_factory.UnifiedResponse object at ...>\n Results from 1 Provider:\n <BLANKLINE>\n 3 Results from the GBMClient:\n Source: https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/daily\n <BLANKLINE>\n Start Time End Time ... Resolution Detector\n ----------------------- ----------------------- ... ---------- --------\n 2015-06-21 00:00:00.000 2015-06-21 23:59:59.999 ... ctime n3\n 2015-06-22 00:00:00.000 2015-06-22 23:59:59.999 ... ctime n3\n 2015-06-23 00:00:00.000 2015-06-23 23:59:59.999 ... ctime n3\n <BLANKLINE>\n <BLANKLINE>\n\n \"\"\"\n baseurl = r'https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/daily/%Y/%m/%d/current/glg_(\\w){5}_(\\w){2}_%y%m%d_v00.pha'\n pattern = '{}/daily/{year:4d}/{month:2d}/{day:2d}/current/glg_{Resolution:5}_{Detector:2}_{:6d}{}'\n\n @property\n def info_url(self):\n return 'https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/daily'\n\n @classmethod\n def register_values(cls):\n from sunpy.net import attrs\n adict = {attrs.Instrument: [('GBM', 'Gamma-Ray Burst Monitor on board the Fermi satellite.')],\n attrs.Physobs: [('flux', 'a measure of the amount of radiation received by an object from a given source.')],\n attrs.Source: [('FERMI', 'The Fermi Gamma-ray Space Telescope.')],\n attrs.Provider: [('NASA', 'The National Aeronautics and Space Administration.')],\n attrs.Resolution: [\n (\"cspec\", \"CSPEC 128 channel spectra every 4.096 seconds.\"),\n (\"ctime\", \"CTIME provides 8 channel spectra every 0.256 seconds.\")],\n attrs.Detector: [(f\"n{x}\", f\"GBM Detector short name for the detector NAI_{x:02}\") for x in range(12)]}\n return adict\n", "path": "sunpy/net/dataretriever/sources/fermi_gbm.py"}]} | 2,013 | 251 |
gh_patches_debug_7002 | rasdani/github-patches | git_diff | streamlit__streamlit-7050 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove number_input -/+ step toggles option
Is there an option to remove the -/+ number_input step toggles? If not, I would suggest that for a future release. Thank you!
Also, is it possible to increase the precision?
Right now I am just using a text_input and type casting to float to get around this.
---
Community voting on feature requests enables the Streamlit team to understand which features are most important to our users.
**If you'd like the Streamlit team to prioritize this feature request, please use the 👍 (thumbs up emoji) reaction in response to the initial post.**
</issue>
<code>
[start of e2e/scripts/st_number_input.py]
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16 from streamlit import runtime
17
18 i1 = st.number_input("number input 1")
19 st.write('value 1: "', i1, '"')
20
21 i2 = st.number_input("number input 2", value=1)
22 st.write('value 2: "', i2, '"')
23
24 i3 = st.number_input("number input 3", 1, 10)
25 st.write('value 3: "', i3, '"')
26
27 i4 = st.number_input("number input 4", step=2)
28 st.write('value 4: "', i4, '"')
29
30 i5 = st.number_input("number input 5", max_value=10)
31 st.write('value 5: "', i5, '"')
32
33 i6 = st.number_input("number input 6", disabled=True)
34 st.write('value 6: "', i6, '"')
35
36 i7 = st.number_input("number input 7", label_visibility="hidden")
37 st.write('value 7: "', i7, '"')
38
39 i8 = st.number_input("number input 8", label_visibility="collapsed")
40 st.write('value 8: "', i8, '"')
41
42 if runtime.exists():
43
44 def on_change():
45 st.session_state.number_input_changed = True
46
47 st.number_input("number input 9", key="number_input9", on_change=on_change)
48 st.write('value 9: "', st.session_state.number_input9, '"')
49 st.write("number input changed:", "number_input_changed" in st.session_state)
50
[end of e2e/scripts/st_number_input.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/e2e/scripts/st_number_input.py b/e2e/scripts/st_number_input.py
--- a/e2e/scripts/st_number_input.py
+++ b/e2e/scripts/st_number_input.py
@@ -47,3 +47,9 @@
st.number_input("number input 9", key="number_input9", on_change=on_change)
st.write('value 9: "', st.session_state.number_input9, '"')
st.write("number input changed:", "number_input_changed" in st.session_state)
+
+[col1, col2, col3, col4, col5, col6] = st.columns(6)
+
+with col1:
+ i10 = st.number_input("number input 10", max_value=10)
+ st.write('value 10: "', i10, '"')
| {"golden_diff": "diff --git a/e2e/scripts/st_number_input.py b/e2e/scripts/st_number_input.py\n--- a/e2e/scripts/st_number_input.py\n+++ b/e2e/scripts/st_number_input.py\n@@ -47,3 +47,9 @@\n st.number_input(\"number input 9\", key=\"number_input9\", on_change=on_change)\n st.write('value 9: \"', st.session_state.number_input9, '\"')\n st.write(\"number input changed:\", \"number_input_changed\" in st.session_state)\n+\n+[col1, col2, col3, col4, col5, col6] = st.columns(6)\n+\n+with col1:\n+ i10 = st.number_input(\"number input 10\", max_value=10)\n+ st.write('value 10: \"', i10, '\"')\n", "issue": "Remove number_input -/+ step toggles option\nIs there an option to remove the -/+ number_input step toggles? If not, I would suggest that for a future release. Thank you! \r\n\r\nAlso, is it possible to increase the precision? \r\n\r\nRight now I am just using a text_input and type casting to float to get around this.\r\n\r\n---\r\n\r\nCommunity voting on feature requests enables the Streamlit team to understand which features are most important to our users.\r\n\r\n**If you'd like the Streamlit team to prioritize this feature request, please use the \ud83d\udc4d (thumbs up emoji) reaction in response to the initial post.**\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\nfrom streamlit import runtime\n\ni1 = st.number_input(\"number input 1\")\nst.write('value 1: \"', i1, '\"')\n\ni2 = st.number_input(\"number input 2\", value=1)\nst.write('value 2: \"', i2, '\"')\n\ni3 = st.number_input(\"number input 3\", 1, 10)\nst.write('value 3: \"', i3, '\"')\n\ni4 = st.number_input(\"number input 4\", step=2)\nst.write('value 4: \"', i4, '\"')\n\ni5 = st.number_input(\"number input 5\", max_value=10)\nst.write('value 5: \"', i5, '\"')\n\ni6 = st.number_input(\"number input 6\", disabled=True)\nst.write('value 6: \"', i6, '\"')\n\ni7 = st.number_input(\"number input 7\", label_visibility=\"hidden\")\nst.write('value 7: \"', i7, '\"')\n\ni8 = st.number_input(\"number input 8\", label_visibility=\"collapsed\")\nst.write('value 8: \"', i8, '\"')\n\nif runtime.exists():\n\n def on_change():\n st.session_state.number_input_changed = True\n\n st.number_input(\"number input 9\", key=\"number_input9\", on_change=on_change)\n st.write('value 9: \"', st.session_state.number_input9, '\"')\n st.write(\"number input changed:\", \"number_input_changed\" in st.session_state)\n", "path": "e2e/scripts/st_number_input.py"}]} | 1,246 | 186 |
gh_patches_debug_29309 | rasdani/github-patches | git_diff | deepchecks__deepchecks-1131 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FEAT] add tests for more model types
we should test any model with predict and predict_proba functions, including the common ones:
Scikitlearn (also pipelines)
CatBoost
LGBM
XGBoost
Custom binary classification model that implements the predict_proba and predict functions
Custom multiclass classification model that implements the predict_proba and predict functions
Custom regression model that implements the predict function
</issue>
<code>
[start of deepchecks/tabular/checks/performance/confusion_matrix_report.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """The confusion_matrix_report check module."""
12 import pandas as pd
13 import sklearn
14 import plotly.express as px
15
16 from deepchecks.core import CheckResult
17 from deepchecks.tabular import Context, SingleDatasetCheck
18
19
20 __all__ = ['ConfusionMatrixReport']
21
22
23 class ConfusionMatrixReport(SingleDatasetCheck):
24 """Calculate the confusion matrix of the model on the given dataset."""
25
26 def run_logic(self, context: Context, dataset_type: str = 'train') -> CheckResult:
27 """Run check.
28
29 Returns
30 -------
31 CheckResult
32 value is numpy array of the confusion matrix, displays the confusion matrix
33
34 Raises
35 ------
36 DeepchecksValueError
37 If the data is not a Dataset instance with a label
38 """
39 if dataset_type == 'train':
40 dataset = context.train
41 else:
42 dataset = context.test
43
44 context.assert_classification_task()
45 ds_y = dataset.label_col
46 ds_x = dataset.features_columns
47 model = context.model
48
49 y_pred = model.predict(ds_x)
50 total_classes = sorted(list(set(pd.concat([ds_y, pd.Series(y_pred)]).to_list())))
51 confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)
52
53 # Figure
54 fig = px.imshow(confusion_matrix, x=total_classes, y=total_classes, text_auto=True)
55 fig.update_layout(width=600, height=600)
56 fig.update_xaxes(title='Predicted Value', type='category')
57 fig.update_yaxes(title='True value', type='category')
58
59 return CheckResult(confusion_matrix, display=fig)
60
[end of deepchecks/tabular/checks/performance/confusion_matrix_report.py]
[start of deepchecks/tabular/checks/overview/model_info.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module contains model_info check."""
12 import warnings
13
14 import pandas as pd
15
16 from deepchecks.tabular import Context, ModelOnlyCheck
17 from deepchecks.core import CheckResult
18 from deepchecks.utils.model import get_model_of_pipeline
19
20
21 __all__ = ['ModelInfo']
22
23
24 class ModelInfo(ModelOnlyCheck):
25 """Summarize given model parameters."""
26
27 def run_logic(self, context: Context) -> CheckResult:
28 """Run check.
29
30 Returns
31 -------
32 CheckResult
33 value is dictionary in format {type: <model_type>, params: <model_params_dict>}
34 """
35 model = context.model
36 estimator = get_model_of_pipeline(model)
37 model_type = type(estimator).__name__
38 model_params = estimator.get_params()
39 default_params = type(estimator)().get_params()
40
41 # Create dataframe to show
42 model_param_df = pd.DataFrame(model_params.items(), columns=['Parameter', 'Value'])
43 model_param_df['Default'] = model_param_df['Parameter'].map(lambda x: default_params.get(x, ''))
44
45 def highlight_not_default(data):
46 n = len(data)
47 if data['Value'] != data['Default']:
48 return n * ['background-color: lightblue']
49 else:
50 return n * ['']
51 with warnings.catch_warnings():
52 warnings.simplefilter(action='ignore', category=FutureWarning)
53 model_param_df = model_param_df.style.apply(highlight_not_default, axis=1).hide_index()
54
55 value = {'type': model_type, 'params': model_params}
56 footnote = '<p style="font-size:0.7em"><i>Colored rows are parameters with non-default values</i></p>'
57 display = [f'Model Type: {model_type}', model_param_df, footnote]
58
59 return CheckResult(value, header='Model Info', display=display)
60
[end of deepchecks/tabular/checks/overview/model_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/deepchecks/tabular/checks/overview/model_info.py b/deepchecks/tabular/checks/overview/model_info.py
--- a/deepchecks/tabular/checks/overview/model_info.py
+++ b/deepchecks/tabular/checks/overview/model_info.py
@@ -35,8 +35,12 @@
model = context.model
estimator = get_model_of_pipeline(model)
model_type = type(estimator).__name__
- model_params = estimator.get_params()
- default_params = type(estimator)().get_params()
+ try:
+ model_params = estimator.get_params()
+ default_params = type(estimator)().get_params()
+ except AttributeError:
+ model_params = {}
+ default_params = {}
# Create dataframe to show
model_param_df = pd.DataFrame(model_params.items(), columns=['Parameter', 'Value'])
diff --git a/deepchecks/tabular/checks/performance/confusion_matrix_report.py b/deepchecks/tabular/checks/performance/confusion_matrix_report.py
--- a/deepchecks/tabular/checks/performance/confusion_matrix_report.py
+++ b/deepchecks/tabular/checks/performance/confusion_matrix_report.py
@@ -9,6 +9,7 @@
# ----------------------------------------------------------------------------
#
"""The confusion_matrix_report check module."""
+import numpy as np
import pandas as pd
import sklearn
import plotly.express as px
@@ -46,7 +47,7 @@
ds_x = dataset.features_columns
model = context.model
- y_pred = model.predict(ds_x)
+ y_pred = np.array(model.predict(ds_x)).reshape(len(ds_y), )
total_classes = sorted(list(set(pd.concat([ds_y, pd.Series(y_pred)]).to_list())))
confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)
| {"golden_diff": "diff --git a/deepchecks/tabular/checks/overview/model_info.py b/deepchecks/tabular/checks/overview/model_info.py\n--- a/deepchecks/tabular/checks/overview/model_info.py\n+++ b/deepchecks/tabular/checks/overview/model_info.py\n@@ -35,8 +35,12 @@\n model = context.model\n estimator = get_model_of_pipeline(model)\n model_type = type(estimator).__name__\n- model_params = estimator.get_params()\n- default_params = type(estimator)().get_params()\n+ try:\n+ model_params = estimator.get_params()\n+ default_params = type(estimator)().get_params()\n+ except AttributeError:\n+ model_params = {}\n+ default_params = {}\n \n # Create dataframe to show\n model_param_df = pd.DataFrame(model_params.items(), columns=['Parameter', 'Value'])\ndiff --git a/deepchecks/tabular/checks/performance/confusion_matrix_report.py b/deepchecks/tabular/checks/performance/confusion_matrix_report.py\n--- a/deepchecks/tabular/checks/performance/confusion_matrix_report.py\n+++ b/deepchecks/tabular/checks/performance/confusion_matrix_report.py\n@@ -9,6 +9,7 @@\n # ----------------------------------------------------------------------------\n #\n \"\"\"The confusion_matrix_report check module.\"\"\"\n+import numpy as np\n import pandas as pd\n import sklearn\n import plotly.express as px\n@@ -46,7 +47,7 @@\n ds_x = dataset.features_columns\n model = context.model\n \n- y_pred = model.predict(ds_x)\n+ y_pred = np.array(model.predict(ds_x)).reshape(len(ds_y), )\n total_classes = sorted(list(set(pd.concat([ds_y, pd.Series(y_pred)]).to_list())))\n confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)\n", "issue": "[FEAT] add tests for more model types\nwe should test any model with predict and predict_proba functions, including the common ones:\r\n\r\nScikitlearn (also pipelines)\r\nCatBoost\r\nLGBM\r\nXGBoost\r\nCustom binary classification model that implements the predict_proba and predict functions\r\nCustom multiclass classification model that implements the predict_proba and predict functions\r\nCustom regression model that implements the predict function\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"The confusion_matrix_report check module.\"\"\"\nimport pandas as pd\nimport sklearn\nimport plotly.express as px\n\nfrom deepchecks.core import CheckResult\nfrom deepchecks.tabular import Context, SingleDatasetCheck\n\n\n__all__ = ['ConfusionMatrixReport']\n\n\nclass ConfusionMatrixReport(SingleDatasetCheck):\n \"\"\"Calculate the confusion matrix of the model on the given dataset.\"\"\"\n\n def run_logic(self, context: Context, dataset_type: str = 'train') -> CheckResult:\n \"\"\"Run check.\n\n Returns\n -------\n CheckResult\n value is numpy array of the confusion matrix, displays the confusion matrix\n\n Raises\n ------\n DeepchecksValueError\n If the data is not a Dataset instance with a label\n \"\"\"\n if dataset_type == 'train':\n dataset = context.train\n else:\n dataset = context.test\n\n context.assert_classification_task()\n ds_y = dataset.label_col\n ds_x = dataset.features_columns\n model = context.model\n\n y_pred = model.predict(ds_x)\n total_classes = sorted(list(set(pd.concat([ds_y, pd.Series(y_pred)]).to_list())))\n confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)\n\n # Figure\n fig = px.imshow(confusion_matrix, x=total_classes, y=total_classes, text_auto=True)\n fig.update_layout(width=600, height=600)\n fig.update_xaxes(title='Predicted Value', type='category')\n fig.update_yaxes(title='True value', type='category')\n\n return CheckResult(confusion_matrix, display=fig)\n", "path": "deepchecks/tabular/checks/performance/confusion_matrix_report.py"}, {"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module contains model_info check.\"\"\"\nimport warnings\n\nimport pandas as pd\n\nfrom deepchecks.tabular import Context, ModelOnlyCheck\nfrom deepchecks.core import CheckResult\nfrom deepchecks.utils.model import get_model_of_pipeline\n\n\n__all__ = ['ModelInfo']\n\n\nclass ModelInfo(ModelOnlyCheck):\n \"\"\"Summarize given model parameters.\"\"\"\n\n def run_logic(self, context: Context) -> CheckResult:\n \"\"\"Run check.\n\n Returns\n -------\n CheckResult\n value is dictionary in format {type: <model_type>, params: <model_params_dict>}\n \"\"\"\n model = context.model\n estimator = get_model_of_pipeline(model)\n model_type = type(estimator).__name__\n model_params = estimator.get_params()\n default_params = type(estimator)().get_params()\n\n # Create dataframe to show\n model_param_df = pd.DataFrame(model_params.items(), columns=['Parameter', 'Value'])\n model_param_df['Default'] = model_param_df['Parameter'].map(lambda x: default_params.get(x, ''))\n\n def highlight_not_default(data):\n n = len(data)\n if data['Value'] != data['Default']:\n return n * ['background-color: lightblue']\n else:\n return n * ['']\n with warnings.catch_warnings():\n warnings.simplefilter(action='ignore', category=FutureWarning)\n model_param_df = model_param_df.style.apply(highlight_not_default, axis=1).hide_index()\n\n value = {'type': model_type, 'params': model_params}\n footnote = '<p style=\"font-size:0.7em\"><i>Colored rows are parameters with non-default values</i></p>'\n display = [f'Model Type: {model_type}', model_param_df, footnote]\n\n return CheckResult(value, header='Model Info', display=display)\n", "path": "deepchecks/tabular/checks/overview/model_info.py"}]} | 1,824 | 392 |
gh_patches_debug_5115 | rasdani/github-patches | git_diff | magenta__magenta-1347 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] Melody_rnn Create Dataset get_pipeline missing arg
`melody_rnn_create_dataset --config=basic_rnn --input=notesequences.tfrecord --output_dir=sequence_examples` fails with this error:
```
File "/Users/ericcacciavillani/anaconda3/envs/Magenta_Testing/lib/python3.6/site-packages/magenta/pipelines/note_sequence_pipelines.py", line 184, in transform
for amount in self._transposition_range:
TypeError: 'float' object is not iterable
```
We're trying to train our own melody rnn and we get this error, also we don't see a mention of transposition range in the melody rnn readme?
Looks like in `melody_rnn_create_dataset.py` at line 52 its calling `melody_rnn_pipeline.get_pipeline(config, FLAGS.eval_ratio)`
but `get_pipeline` in `melody_rnn_pipeline.py` takes 3 args `config`, `transposition_range`, and `eval ratio` so it looks like transposition_range is being set as eval_ratio.
Working with @robindiddams on this.
</issue>
<code>
[start of magenta/models/melody_rnn/melody_rnn_create_dataset.py]
1 # Copyright 2016 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Create a dataset of SequenceExamples from NoteSequence protos.
15
16 This script will extract melodies from NoteSequence protos and save them to
17 TensorFlow's SequenceExample protos for input to the melody RNN models.
18 """
19
20 import os
21
22 import tensorflow as tf
23
24 from magenta.models.melody_rnn import melody_rnn_config_flags
25 from magenta.models.melody_rnn import melody_rnn_pipeline
26 from magenta.pipelines import pipeline
27
28 flags = tf.app.flags
29 FLAGS = tf.app.flags.FLAGS
30 flags.DEFINE_string(
31 'input', None,
32 'TFRecord to read NoteSequence protos from.')
33 flags.DEFINE_string(
34 'output_dir', None,
35 'Directory to write training and eval TFRecord files. The TFRecord files '
36 'are populated with SequenceExample protos.')
37 flags.DEFINE_float(
38 'eval_ratio', 0.1,
39 'Fraction of input to set aside for eval set. Partition is randomly '
40 'selected.')
41 flags.DEFINE_string(
42 'log', 'INFO',
43 'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '
44 'or FATAL.')
45
46
47 def main(unused_argv):
48 tf.logging.set_verbosity(FLAGS.log)
49
50 config = melody_rnn_config_flags.config_from_flags()
51 pipeline_instance = melody_rnn_pipeline.get_pipeline(
52 config, FLAGS.eval_ratio)
53
54 FLAGS.input = os.path.expanduser(FLAGS.input)
55 FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
56 pipeline.run_pipeline_serial(
57 pipeline_instance,
58 pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
59 FLAGS.output_dir)
60
61
62 def console_entry_point():
63 tf.app.run(main)
64
65
66 if __name__ == '__main__':
67 console_entry_point()
68
[end of magenta/models/melody_rnn/melody_rnn_create_dataset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/magenta/models/melody_rnn/melody_rnn_create_dataset.py b/magenta/models/melody_rnn/melody_rnn_create_dataset.py
--- a/magenta/models/melody_rnn/melody_rnn_create_dataset.py
+++ b/magenta/models/melody_rnn/melody_rnn_create_dataset.py
@@ -49,7 +49,7 @@
config = melody_rnn_config_flags.config_from_flags()
pipeline_instance = melody_rnn_pipeline.get_pipeline(
- config, FLAGS.eval_ratio)
+ config, eval_ratio=FLAGS.eval_ratio)
FLAGS.input = os.path.expanduser(FLAGS.input)
FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
| {"golden_diff": "diff --git a/magenta/models/melody_rnn/melody_rnn_create_dataset.py b/magenta/models/melody_rnn/melody_rnn_create_dataset.py\n--- a/magenta/models/melody_rnn/melody_rnn_create_dataset.py\n+++ b/magenta/models/melody_rnn/melody_rnn_create_dataset.py\n@@ -49,7 +49,7 @@\n \n config = melody_rnn_config_flags.config_from_flags()\n pipeline_instance = melody_rnn_pipeline.get_pipeline(\n- config, FLAGS.eval_ratio)\n+ config, eval_ratio=FLAGS.eval_ratio)\n \n FLAGS.input = os.path.expanduser(FLAGS.input)\n FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)\n", "issue": "[Bug] Melody_rnn Create Dataset get_pipeline missing arg\n`melody_rnn_create_dataset --config=basic_rnn --input=notesequences.tfrecord --output_dir=sequence_examples` fails with this error:\r\n```\r\nFile \"/Users/ericcacciavillani/anaconda3/envs/Magenta_Testing/lib/python3.6/site-packages/magenta/pipelines/note_sequence_pipelines.py\", line 184, in transform\r\n for amount in self._transposition_range:\r\nTypeError: 'float' object is not iterable\r\n```\r\nWe're trying to train our own melody rnn and we get this error, also we don't see a mention of transposition range in the melody rnn readme?\r\n\r\nLooks like in `melody_rnn_create_dataset.py` at line 52 its calling `melody_rnn_pipeline.get_pipeline(config, FLAGS.eval_ratio)`\r\nbut `get_pipeline` in `melody_rnn_pipeline.py` takes 3 args `config`, `transposition_range`, and `eval ratio` so it looks like transposition_range is being set as eval_ratio.\r\n\r\nWorking with @robindiddams on this.\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Create a dataset of SequenceExamples from NoteSequence protos.\n\nThis script will extract melodies from NoteSequence protos and save them to\nTensorFlow's SequenceExample protos for input to the melody RNN models.\n\"\"\"\n\nimport os\n\nimport tensorflow as tf\n\nfrom magenta.models.melody_rnn import melody_rnn_config_flags\nfrom magenta.models.melody_rnn import melody_rnn_pipeline\nfrom magenta.pipelines import pipeline\n\nflags = tf.app.flags\nFLAGS = tf.app.flags.FLAGS\nflags.DEFINE_string(\n 'input', None,\n 'TFRecord to read NoteSequence protos from.')\nflags.DEFINE_string(\n 'output_dir', None,\n 'Directory to write training and eval TFRecord files. The TFRecord files '\n 'are populated with SequenceExample protos.')\nflags.DEFINE_float(\n 'eval_ratio', 0.1,\n 'Fraction of input to set aside for eval set. Partition is randomly '\n 'selected.')\nflags.DEFINE_string(\n 'log', 'INFO',\n 'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '\n 'or FATAL.')\n\n\ndef main(unused_argv):\n tf.logging.set_verbosity(FLAGS.log)\n\n config = melody_rnn_config_flags.config_from_flags()\n pipeline_instance = melody_rnn_pipeline.get_pipeline(\n config, FLAGS.eval_ratio)\n\n FLAGS.input = os.path.expanduser(FLAGS.input)\n FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)\n pipeline.run_pipeline_serial(\n pipeline_instance,\n pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),\n FLAGS.output_dir)\n\n\ndef console_entry_point():\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n", "path": "magenta/models/melody_rnn/melody_rnn_create_dataset.py"}]} | 1,413 | 147 |
gh_patches_debug_22874 | rasdani/github-patches | git_diff | chainer__chainer-719 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unreadable error message appears when `nvcc` command is not found
Only "No suche file or directory" is shown in such case.
related to #698
</issue>
<code>
[start of cupy/cuda/compiler.py]
1 import hashlib
2 import os
3 import re
4 import subprocess
5 import sys
6 import tempfile
7
8 import filelock
9 import six
10
11 from cupy.cuda import device
12 from cupy.cuda import function
13
14
15 def _get_arch():
16 cc = device.Device().compute_capability
17 return 'sm_%s' % cc
18
19
20 class TemporaryDirectory(object):
21
22 def __enter__(self):
23 self.path = tempfile.mkdtemp()
24 return self.path
25
26 def __exit__(self, exc_type, exc_value, traceback):
27 if exc_value is not None:
28 return
29
30 for name in os.listdir(self.path):
31 os.unlink(os.path.join(self.path, name))
32 os.rmdir(self.path)
33
34
35 def nvcc(source, options=(), arch=None):
36 if not arch:
37 arch = _get_arch()
38 cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)
39
40 with TemporaryDirectory() as root_dir:
41 path = os.path.join(root_dir, 'kern')
42 cu_path = '%s.cu' % path
43 cubin_path = '%s.cubin' % path
44
45 with open(cu_path, 'w') as cu_file:
46 cu_file.write(source)
47
48 cmd.append(cu_path)
49 subprocess.check_output(cmd, cwd=root_dir)
50
51 with open(cubin_path, 'rb') as bin_file:
52 return bin_file.read()
53
54
55 def preprocess(source, options=()):
56 cmd = ['nvcc', '--preprocess'] + list(options)
57 with TemporaryDirectory() as root_dir:
58 path = os.path.join(root_dir, 'kern')
59 cu_path = '%s.cu' % path
60
61 with open(cu_path, 'w') as cu_file:
62 cu_file.write(source)
63
64 cmd.append(cu_path)
65 pp_src = subprocess.check_output(cmd, cwd=root_dir)
66
67 if isinstance(pp_src, six.binary_type):
68 pp_src = pp_src.decode('utf-8')
69 return re.sub('(?m)^#.*$', '', pp_src)
70
71
72 _default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')
73
74
75 def get_cache_dir():
76 return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)
77
78
79 _empty_file_preprocess_cache = {}
80
81
82 def compile_with_cache(source, options=(), arch=None, cache_dir=None):
83 global _empty_file_preprocess_cache
84 if cache_dir is None:
85 cache_dir = get_cache_dir()
86 if arch is None:
87 arch = _get_arch()
88
89 if 'win32' == sys.platform:
90 options += ('-Xcompiler', '/wd 4819')
91 if sys.maxsize == 9223372036854775807:
92 options += '-m64',
93 elif sys.maxsize == 2147483647:
94 options += '-m32',
95
96 env = (arch, options)
97 if '#include' in source:
98 pp_src = '%s %s' % (env, preprocess(source, options))
99 else:
100 base = _empty_file_preprocess_cache.get(env, None)
101 if base is None:
102 base = _empty_file_preprocess_cache[env] = preprocess('', options)
103 pp_src = '%s %s %s' % (env, base, source)
104
105 if isinstance(pp_src, six.text_type):
106 pp_src = pp_src.encode('utf-8')
107 name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()
108
109 mod = function.Module()
110
111 if not os.path.exists(cache_dir):
112 os.makedirs(cache_dir)
113
114 lock_path = os.path.join(cache_dir, 'lock_file.lock')
115
116 path = os.path.join(cache_dir, name)
117 with filelock.FileLock(lock_path) as lock:
118 if os.path.exists(path):
119 with open(path, 'rb') as file:
120 cubin = file.read()
121 mod.load(cubin)
122 else:
123 lock.release()
124 cubin = nvcc(source, options, arch)
125 mod.load(cubin)
126 lock.acquire()
127 with open(path, 'wb') as cubin_file:
128 cubin_file.write(cubin)
129
130 return mod
131
[end of cupy/cuda/compiler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py
--- a/cupy/cuda/compiler.py
+++ b/cupy/cuda/compiler.py
@@ -32,6 +32,15 @@
os.rmdir(self.path)
+def _run_nvcc(cmd, cwd):
+ try:
+ return subprocess.check_output(cmd, cwd=cwd)
+ except OSError as e:
+ trace = sys.exc_info()[2]
+ msg = 'Failed to run `nvcc` command: ' + str(e)
+ six.reraise(OSError, msg, trace)
+
+
def nvcc(source, options=(), arch=None):
if not arch:
arch = _get_arch()
@@ -46,7 +55,7 @@
cu_file.write(source)
cmd.append(cu_path)
- subprocess.check_output(cmd, cwd=root_dir)
+ _run_nvcc(cmd, root_dir)
with open(cubin_path, 'rb') as bin_file:
return bin_file.read()
@@ -62,7 +71,7 @@
cu_file.write(source)
cmd.append(cu_path)
- pp_src = subprocess.check_output(cmd, cwd=root_dir)
+ pp_src = _run_nvcc(cmd, root_dir)
if isinstance(pp_src, six.binary_type):
pp_src = pp_src.decode('utf-8')
| {"golden_diff": "diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py\n--- a/cupy/cuda/compiler.py\n+++ b/cupy/cuda/compiler.py\n@@ -32,6 +32,15 @@\n os.rmdir(self.path)\n \n \n+def _run_nvcc(cmd, cwd):\n+ try:\n+ return subprocess.check_output(cmd, cwd=cwd)\n+ except OSError as e:\n+ trace = sys.exc_info()[2]\n+ msg = 'Failed to run `nvcc` command: ' + str(e)\n+ six.reraise(OSError, msg, trace)\n+\n+\n def nvcc(source, options=(), arch=None):\n if not arch:\n arch = _get_arch()\n@@ -46,7 +55,7 @@\n cu_file.write(source)\n \n cmd.append(cu_path)\n- subprocess.check_output(cmd, cwd=root_dir)\n+ _run_nvcc(cmd, root_dir)\n \n with open(cubin_path, 'rb') as bin_file:\n return bin_file.read()\n@@ -62,7 +71,7 @@\n cu_file.write(source)\n \n cmd.append(cu_path)\n- pp_src = subprocess.check_output(cmd, cwd=root_dir)\n+ pp_src = _run_nvcc(cmd, root_dir)\n \n if isinstance(pp_src, six.binary_type):\n pp_src = pp_src.decode('utf-8')\n", "issue": "Unreadable error message appears when `nvcc` command is not found\nOnly \"No suche file or directory\" is shown in such case.\n\nrelated to #698\n\n", "before_files": [{"content": "import hashlib\nimport os\nimport re\nimport subprocess\nimport sys\nimport tempfile\n\nimport filelock\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\n\n\ndef _get_arch():\n cc = device.Device().compute_capability\n return 'sm_%s' % cc\n\n\nclass TemporaryDirectory(object):\n\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef nvcc(source, options=(), arch=None):\n if not arch:\n arch = _get_arch()\n cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)\n\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n cubin_path = '%s.cubin' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n subprocess.check_output(cmd, cwd=root_dir)\n\n with open(cubin_path, 'rb') as bin_file:\n return bin_file.read()\n\n\ndef preprocess(source, options=()):\n cmd = ['nvcc', '--preprocess'] + list(options)\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n pp_src = subprocess.check_output(cmd, cwd=root_dir)\n\n if isinstance(pp_src, six.binary_type):\n pp_src = pp_src.decode('utf-8')\n return re.sub('(?m)^#.*$', '', pp_src)\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None):\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n if 'win32' == sys.platform:\n options += ('-Xcompiler', '/wd 4819')\n if sys.maxsize == 9223372036854775807:\n options += '-m64',\n elif sys.maxsize == 2147483647:\n options += '-m32',\n\n env = (arch, options)\n if '#include' in source:\n pp_src = '%s %s' % (env, preprocess(source, options))\n else:\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n base = _empty_file_preprocess_cache[env] = preprocess('', options)\n pp_src = '%s %s %s' % (env, base, source)\n\n if isinstance(pp_src, six.text_type):\n pp_src = pp_src.encode('utf-8')\n name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()\n\n mod = function.Module()\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n lock_path = os.path.join(cache_dir, 'lock_file.lock')\n\n path = os.path.join(cache_dir, name)\n with filelock.FileLock(lock_path) as lock:\n if os.path.exists(path):\n with open(path, 'rb') as file:\n cubin = file.read()\n mod.load(cubin)\n else:\n lock.release()\n cubin = nvcc(source, options, arch)\n mod.load(cubin)\n lock.acquire()\n with open(path, 'wb') as cubin_file:\n cubin_file.write(cubin)\n\n return mod\n", "path": "cupy/cuda/compiler.py"}]} | 1,788 | 305 |
gh_patches_debug_27343 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5115 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
update-locale triggers CKV_DOCKER_5
**Describe the issue**
CKV_DOCKER_5 (Ensure update instructions are not use alone in the Dockerfile) fails in anything that has `update` in it. From reading the source, it. seems that CKV_DOCKER_5 is geared towards `apt-get update` and `apt-get install` which, from the code, are cancelling each other out so the `update_cnt` variable remains 0. I have other `update` command like `update-locale`. I'm not sure if it's part of the issue in my Dockerfile that I need to deal or I could just ignore the failure message.
**Examples**
```
RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
dpkg-reconfigure --frontend noninteractive locales && \
update-locale LANG=en_US.UTF-8
```
**Version (please complete the following information):**
- Checkov Version 2.2.229
</issue>
<code>
[start of checkov/dockerfile/checks/UpdateNotAlone.py]
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 from checkov.common.models.enums import CheckCategories, CheckResult
6 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
7
8 if TYPE_CHECKING:
9 from dockerfile_parse.parser import _Instruction
10
11 install_commands = (
12 "install",
13 "source-install",
14 "reinstall",
15 "groupinstall",
16 "localinstall",
17 "add",
18 )
19 update_commands = (
20 "update",
21 "--update",
22 )
23
24
25 class UpdateNotAlone(BaseDockerfileCheck):
26 def __init__(self) -> None:
27 name = "Ensure update instructions are not use alone in the Dockerfile"
28 id = "CKV_DOCKER_5"
29 supported_instructions = ("RUN",)
30 categories = (CheckCategories.APPLICATION_SECURITY,)
31 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
32
33 def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:
34 update_instructions = []
35 update_cnt = 0
36 i = 0
37 for instruction in conf:
38 content = instruction["content"]
39 if instruction["instruction"] in self.supported_instructions:
40
41 if any(x in content for x in update_commands):
42 update_cnt = update_cnt + 1
43 update_instructions.append(i)
44 if any(x in content for x in install_commands):
45 update_cnt = update_cnt - 1
46 i = i + 1
47
48 if update_cnt <= 0:
49 return CheckResult.PASSED, None
50 output = []
51 for i in update_instructions:
52 output.append(conf[i])
53
54 return CheckResult.FAILED, output
55
56
57 check = UpdateNotAlone()
58
[end of checkov/dockerfile/checks/UpdateNotAlone.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/dockerfile/checks/UpdateNotAlone.py b/checkov/dockerfile/checks/UpdateNotAlone.py
--- a/checkov/dockerfile/checks/UpdateNotAlone.py
+++ b/checkov/dockerfile/checks/UpdateNotAlone.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import re
from typing import TYPE_CHECKING
from checkov.common.models.enums import CheckCategories, CheckResult
@@ -8,6 +9,8 @@
if TYPE_CHECKING:
from dockerfile_parse.parser import _Instruction
+UPDATE_COMMANDS_PATTERN = re.compile(r"\s+(?:--)?update(?!\S)")
+
install_commands = (
"install",
"source-install",
@@ -15,10 +18,7 @@
"groupinstall",
"localinstall",
"add",
-)
-update_commands = (
- "update",
- "--update",
+ "upgrade"
)
@@ -38,7 +38,7 @@
content = instruction["content"]
if instruction["instruction"] in self.supported_instructions:
- if any(x in content for x in update_commands):
+ if re.search(UPDATE_COMMANDS_PATTERN, content):
update_cnt = update_cnt + 1
update_instructions.append(i)
if any(x in content for x in install_commands):
| {"golden_diff": "diff --git a/checkov/dockerfile/checks/UpdateNotAlone.py b/checkov/dockerfile/checks/UpdateNotAlone.py\n--- a/checkov/dockerfile/checks/UpdateNotAlone.py\n+++ b/checkov/dockerfile/checks/UpdateNotAlone.py\n@@ -1,5 +1,6 @@\n from __future__ import annotations\n \n+import re\n from typing import TYPE_CHECKING\n \n from checkov.common.models.enums import CheckCategories, CheckResult\n@@ -8,6 +9,8 @@\n if TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n \n+UPDATE_COMMANDS_PATTERN = re.compile(r\"\\s+(?:--)?update(?!\\S)\")\n+\n install_commands = (\n \"install\",\n \"source-install\",\n@@ -15,10 +18,7 @@\n \"groupinstall\",\n \"localinstall\",\n \"add\",\n-)\n-update_commands = (\n- \"update\",\n- \"--update\",\n+ \"upgrade\"\n )\n \n \n@@ -38,7 +38,7 @@\n content = instruction[\"content\"]\n if instruction[\"instruction\"] in self.supported_instructions:\n \n- if any(x in content for x in update_commands):\n+ if re.search(UPDATE_COMMANDS_PATTERN, content):\n update_cnt = update_cnt + 1\n update_instructions.append(i)\n if any(x in content for x in install_commands):\n", "issue": "update-locale triggers CKV_DOCKER_5\n**Describe the issue**\r\nCKV_DOCKER_5 (Ensure update instructions are not use alone in the Dockerfile) fails in anything that has `update` in it. From reading the source, it. seems that CKV_DOCKER_5 is geared towards `apt-get update` and `apt-get install` which, from the code, are cancelling each other out so the `update_cnt` variable remains 0. I have other `update` command like `update-locale`. I'm not sure if it's part of the issue in my Dockerfile that I need to deal or I could just ignore the failure message.\r\n\r\n**Examples**\r\n```\r\nRUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \\\r\n dpkg-reconfigure --frontend noninteractive locales && \\\r\n update-locale LANG=en_US.UTF-8\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.2.229\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n\ninstall_commands = (\n \"install\",\n \"source-install\",\n \"reinstall\",\n \"groupinstall\",\n \"localinstall\",\n \"add\",\n)\nupdate_commands = (\n \"update\",\n \"--update\",\n)\n\n\nclass UpdateNotAlone(BaseDockerfileCheck):\n def __init__(self) -> None:\n name = \"Ensure update instructions are not use alone in the Dockerfile\"\n id = \"CKV_DOCKER_5\"\n supported_instructions = (\"RUN\",)\n categories = (CheckCategories.APPLICATION_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n update_instructions = []\n update_cnt = 0\n i = 0\n for instruction in conf:\n content = instruction[\"content\"]\n if instruction[\"instruction\"] in self.supported_instructions:\n\n if any(x in content for x in update_commands):\n update_cnt = update_cnt + 1\n update_instructions.append(i)\n if any(x in content for x in install_commands):\n update_cnt = update_cnt - 1\n i = i + 1\n\n if update_cnt <= 0:\n return CheckResult.PASSED, None\n output = []\n for i in update_instructions:\n output.append(conf[i])\n\n return CheckResult.FAILED, output\n\n\ncheck = UpdateNotAlone()\n", "path": "checkov/dockerfile/checks/UpdateNotAlone.py"}]} | 1,268 | 297 |
gh_patches_debug_57147 | rasdani/github-patches | git_diff | pymeasure__pymeasure-433 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pyvisa no longer support ask, replace with query
In resources.py
`idn = res.ask('*idn?')[:-1]`
Should be:
`idn = res.query('*idn?')[:-1]`
</issue>
<code>
[start of pymeasure/instruments/resources.py]
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2021 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25 import pyvisa
26
27
28 def list_resources():
29 """
30 Prints the available resources, and returns a list of VISA resource names
31
32 .. code-block:: python
33
34 resources = list_resources()
35 #prints (e.g.)
36 #0 : GPIB0::22::INSTR : Agilent Technologies,34410A,******
37 #1 : GPIB0::26::INSTR : Keithley Instruments Inc., Model 2612, *****
38 dmm = Agilent34410(resources[0])
39
40 """
41 rm = pyvisa.ResourceManager()
42 instrs = rm.list_resources()
43 for n, instr in enumerate(instrs):
44 # trying to catch errors in comunication
45 try:
46 res = rm.open_resource(instr)
47 # try to avoid errors from *idn?
48 try:
49 # noinspection PyUnresolvedReferences
50 idn = res.ask('*idn?')[:-1]
51 except pyvisa.Error:
52 idn = "Not known"
53 finally:
54 res.close()
55 print(n, ":", instr, ":", idn)
56 except pyvisa.VisaIOError as e:
57 print(n, ":", instr, ":", "Visa IO Error: check connections")
58 print(e)
59 rm.close()
60 return instrs
61
[end of pymeasure/instruments/resources.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pymeasure/instruments/resources.py b/pymeasure/instruments/resources.py
--- a/pymeasure/instruments/resources.py
+++ b/pymeasure/instruments/resources.py
@@ -47,7 +47,7 @@
# try to avoid errors from *idn?
try:
# noinspection PyUnresolvedReferences
- idn = res.ask('*idn?')[:-1]
+ idn = res.query('*idn?')[:-1]
except pyvisa.Error:
idn = "Not known"
finally:
| {"golden_diff": "diff --git a/pymeasure/instruments/resources.py b/pymeasure/instruments/resources.py\n--- a/pymeasure/instruments/resources.py\n+++ b/pymeasure/instruments/resources.py\n@@ -47,7 +47,7 @@\n # try to avoid errors from *idn?\n try:\n # noinspection PyUnresolvedReferences\n- idn = res.ask('*idn?')[:-1]\n+ idn = res.query('*idn?')[:-1]\n except pyvisa.Error:\n idn = \"Not known\"\n finally:\n", "issue": "Pyvisa no longer support ask, replace with query\nIn resources.py\r\n`idn = res.ask('*idn?')[:-1]`\r\nShould be:\r\n`idn = res.query('*idn?')[:-1]`\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2021 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport pyvisa\n\n\ndef list_resources():\n \"\"\"\n Prints the available resources, and returns a list of VISA resource names\n \n .. code-block:: python\n\n resources = list_resources()\n #prints (e.g.)\n #0 : GPIB0::22::INSTR : Agilent Technologies,34410A,******\n #1 : GPIB0::26::INSTR : Keithley Instruments Inc., Model 2612, *****\n dmm = Agilent34410(resources[0])\n \n \"\"\"\n rm = pyvisa.ResourceManager()\n instrs = rm.list_resources()\n for n, instr in enumerate(instrs):\n # trying to catch errors in comunication\n try:\n res = rm.open_resource(instr)\n # try to avoid errors from *idn?\n try:\n # noinspection PyUnresolvedReferences\n idn = res.ask('*idn?')[:-1]\n except pyvisa.Error:\n idn = \"Not known\"\n finally:\n res.close()\n print(n, \":\", instr, \":\", idn)\n except pyvisa.VisaIOError as e:\n print(n, \":\", instr, \":\", \"Visa IO Error: check connections\")\n print(e)\n rm.close()\n return instrs\n", "path": "pymeasure/instruments/resources.py"}]} | 1,239 | 123 |
gh_patches_debug_56463 | rasdani/github-patches | git_diff | acl-org__acl-anthology-3109 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reingestion Request: ROCLING (10-20-2023)
### General information about this request
- [X] I confirm that I have read the [Information for Submitters](https://aclanthology.org/info/contrib/).
- [ ] I am submitting a request for a **new venue** that does not exist in the ACL Anthology yet.
### Venue Identifier
ROCLING
### Volume Title
Proceedings of the 35th Conference on Computational Linguistics and Speech Processing (ROCLING 2023)
### Venue Name (only if you are submitting a new venue)
Conference on Computational Linguistics and Speech Processing
### Venue Website (only if you are submitting a new venue)
https://rocling2023.github.io/
### Date of Publication
2023-10-20
### Supporting Information
Dear Anthology Director,
I'm Hou-Chiang Tseng who the publication chair of the 35th annual Conference on Computational Linguistics and Speech Processing (ROCLING 2023).
The conference website: https://rocling2023.github.io/
We want to register the ROCLING 2023 to ACL Anthology. Please see following two items:
(a) the complete list of volumes: please see the attached file,
and (b) all the new material can be downloaded from the following URL:
https://drive.google.com/drive/folders/1dxt_gYlUvmuLiNETgDRg9cGpiJxVGwbD?usp=sharing
If there is any question, please let me know.
[Anthology.Volume_ROCLING.2023.xlsx](https://github.com/acl-org/acl-anthology/files/14318157/Anthology.Volume_ROCLING.2023.xlsx)
Best regards,
Dr. Hou-Chiang Tseng
</issue>
<code>
[start of bin/volumes_from_diff.py]
1 #!/usr/bin/env python3
2
3 """
4 Takes a list of XML files on STDIN, and prints all the volumes
5 within each of those files. e.g.,
6
7 git diff --name-only master | ./bin/volumes_from_xml.py https://preview.aclanthology.org/BRANCH
8
9 Used to find the list of volumes to generate previews for.
10 """
11
12 import sys
13 import argparse
14 import lxml.etree as etree
15
16
17 parser = argparse.ArgumentParser()
18 parser.add_argument("url_root")
19 args = parser.parse_args()
20
21 volumes = []
22 for filepath in sys.stdin:
23 if filepath.startswith("python/") or not filepath.endswith(".xml"):
24 continue
25
26 try:
27 tree = etree.parse(filepath.rstrip())
28 except Exception:
29 continue
30
31 root = tree.getroot()
32 collection_id = root.attrib["id"]
33 for volume in root.findall("./volume"):
34 volume_name = volume.attrib["id"]
35 volume_id = f"{collection_id}-{volume_name}"
36 volumes.append(f"[{volume_id}]({args.url_root}/{volume_id})")
37
38 if len(volumes) > 50:
39 volumes = volumes[0:50] + [f"(plus {len(volumes)-50} more...)"]
40
41 print(", ".join(volumes))
42
[end of bin/volumes_from_diff.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bin/volumes_from_diff.py b/bin/volumes_from_diff.py
--- a/bin/volumes_from_diff.py
+++ b/bin/volumes_from_diff.py
@@ -20,6 +20,7 @@
volumes = []
for filepath in sys.stdin:
+ filepath = filepath.rstrip()
if filepath.startswith("python/") or not filepath.endswith(".xml"):
continue
| {"golden_diff": "diff --git a/bin/volumes_from_diff.py b/bin/volumes_from_diff.py\n--- a/bin/volumes_from_diff.py\n+++ b/bin/volumes_from_diff.py\n@@ -20,6 +20,7 @@\n \n volumes = []\n for filepath in sys.stdin:\n+ filepath = filepath.rstrip()\n if filepath.startswith(\"python/\") or not filepath.endswith(\".xml\"):\n continue\n", "issue": "Reingestion Request: ROCLING (10-20-2023)\n### General information about this request\n\n- [X] I confirm that I have read the [Information for Submitters](https://aclanthology.org/info/contrib/).\n- [ ] I am submitting a request for a **new venue** that does not exist in the ACL Anthology yet.\n\n### Venue Identifier\n\nROCLING\n\n### Volume Title\n\nProceedings of the 35th Conference on Computational Linguistics and Speech Processing (ROCLING 2023)\n\n### Venue Name (only if you are submitting a new venue)\n\nConference on Computational Linguistics and Speech Processing\n\n### Venue Website (only if you are submitting a new venue)\n\nhttps://rocling2023.github.io/\n\n### Date of Publication\n\n2023-10-20\n\n### Supporting Information\n\nDear Anthology Director,\r\n\r\nI'm Hou-Chiang Tseng who the publication chair of the 35th annual Conference on Computational Linguistics and Speech Processing (ROCLING 2023).\r\n\r\nThe conference website: https://rocling2023.github.io/\r\n\r\nWe want to register the ROCLING 2023 to ACL Anthology. Please see following two items:\r\n(a) the complete list of volumes: please see the attached file,\r\nand (b) all the new material can be downloaded from the following URL:\r\nhttps://drive.google.com/drive/folders/1dxt_gYlUvmuLiNETgDRg9cGpiJxVGwbD?usp=sharing\r\n\r\nIf there is any question, please let me know.\r\n[Anthology.Volume_ROCLING.2023.xlsx](https://github.com/acl-org/acl-anthology/files/14318157/Anthology.Volume_ROCLING.2023.xlsx)\r\n\r\nBest regards,\r\nDr. Hou-Chiang Tseng\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nTakes a list of XML files on STDIN, and prints all the volumes\nwithin each of those files. e.g.,\n\n git diff --name-only master | ./bin/volumes_from_xml.py https://preview.aclanthology.org/BRANCH\n\nUsed to find the list of volumes to generate previews for.\n\"\"\"\n\nimport sys\nimport argparse\nimport lxml.etree as etree\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"url_root\")\nargs = parser.parse_args()\n\nvolumes = []\nfor filepath in sys.stdin:\n if filepath.startswith(\"python/\") or not filepath.endswith(\".xml\"):\n continue\n\n try:\n tree = etree.parse(filepath.rstrip())\n except Exception:\n continue\n\n root = tree.getroot()\n collection_id = root.attrib[\"id\"]\n for volume in root.findall(\"./volume\"):\n volume_name = volume.attrib[\"id\"]\n volume_id = f\"{collection_id}-{volume_name}\"\n volumes.append(f\"[{volume_id}]({args.url_root}/{volume_id})\")\n\nif len(volumes) > 50:\n volumes = volumes[0:50] + [f\"(plus {len(volumes)-50} more...)\"]\n\nprint(\", \".join(volumes))\n", "path": "bin/volumes_from_diff.py"}]} | 1,285 | 83 |
gh_patches_debug_50396 | rasdani/github-patches | git_diff | freqtrade__freqtrade-2467 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow simple loading of "dummy" configuration
A simple method to load a minimal configuration should be added.
This can be handy for some data-analysis tasks in notebooks, where no full configuration is needed or necessary.
Something like `Configuration.get_empty_config()`, which contains the minimum required keys set to default values
</issue>
<code>
[start of freqtrade/configuration/check_exchange.py]
1 import logging
2 from typing import Any, Dict
3
4 from freqtrade import OperationalException
5 from freqtrade.exchange import (available_exchanges, get_exchange_bad_reason,
6 is_exchange_known_ccxt, is_exchange_bad,
7 is_exchange_officially_supported)
8 from freqtrade.state import RunMode
9
10 logger = logging.getLogger(__name__)
11
12
13 def check_exchange(config: Dict[str, Any], check_for_bad: bool = True) -> bool:
14 """
15 Check if the exchange name in the config file is supported by Freqtrade
16 :param check_for_bad: if True, check the exchange against the list of known 'bad'
17 exchanges
18 :return: False if exchange is 'bad', i.e. is known to work with the bot with
19 critical issues or does not work at all, crashes, etc. True otherwise.
20 raises an exception if the exchange if not supported by ccxt
21 and thus is not known for the Freqtrade at all.
22 """
23
24 if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE]
25 and not config.get('exchange', {}).get('name')):
26 # Skip checking exchange in plot mode, since it requires no exchange
27 return True
28 logger.info("Checking exchange...")
29
30 exchange = config.get('exchange', {}).get('name').lower()
31 if not exchange:
32 raise OperationalException(
33 f'This command requires a configured exchange. You should either use '
34 f'`--exchange <exchange_name>` or specify a configuration file via `--config`.\n'
35 f'The following exchanges are available for Freqtrade: '
36 f'{", ".join(available_exchanges())}'
37 )
38
39 if not is_exchange_known_ccxt(exchange):
40 raise OperationalException(
41 f'Exchange "{exchange}" is not known to the ccxt library '
42 f'and therefore not available for the bot.\n'
43 f'The following exchanges are available for Freqtrade: '
44 f'{", ".join(available_exchanges())}'
45 )
46
47 if check_for_bad and is_exchange_bad(exchange):
48 raise OperationalException(f'Exchange "{exchange}" is known to not work with the bot yet. '
49 f'Reason: {get_exchange_bad_reason(exchange)}')
50
51 if is_exchange_officially_supported(exchange):
52 logger.info(f'Exchange "{exchange}" is officially supported '
53 f'by the Freqtrade development team.')
54 else:
55 logger.warning(f'Exchange "{exchange}" is known to the the ccxt library, '
56 f'available for the bot, but not officially supported '
57 f'by the Freqtrade development team. '
58 f'It may work flawlessly (please report back) or have serious issues. '
59 f'Use it at your own discretion.')
60
61 return True
62
[end of freqtrade/configuration/check_exchange.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/freqtrade/configuration/check_exchange.py b/freqtrade/configuration/check_exchange.py
--- a/freqtrade/configuration/check_exchange.py
+++ b/freqtrade/configuration/check_exchange.py
@@ -21,7 +21,7 @@
and thus is not known for the Freqtrade at all.
"""
- if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE]
+ if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE, RunMode.OTHER]
and not config.get('exchange', {}).get('name')):
# Skip checking exchange in plot mode, since it requires no exchange
return True
| {"golden_diff": "diff --git a/freqtrade/configuration/check_exchange.py b/freqtrade/configuration/check_exchange.py\n--- a/freqtrade/configuration/check_exchange.py\n+++ b/freqtrade/configuration/check_exchange.py\n@@ -21,7 +21,7 @@\n and thus is not known for the Freqtrade at all.\n \"\"\"\n \n- if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE]\n+ if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE, RunMode.OTHER]\n and not config.get('exchange', {}).get('name')):\n # Skip checking exchange in plot mode, since it requires no exchange\n return True\n", "issue": "Allow simple loading of \"dummy\" configuration\nA simple method to load a minimal configuration should be added.\r\n\r\nThis can be handy for some data-analysis tasks in notebooks, where no full configuration is needed or necessary.\r\n\r\nSomething like `Configuration.get_empty_config()`, which contains the minimum required keys set to default values\n", "before_files": [{"content": "import logging\nfrom typing import Any, Dict\n\nfrom freqtrade import OperationalException\nfrom freqtrade.exchange import (available_exchanges, get_exchange_bad_reason,\n is_exchange_known_ccxt, is_exchange_bad,\n is_exchange_officially_supported)\nfrom freqtrade.state import RunMode\n\nlogger = logging.getLogger(__name__)\n\n\ndef check_exchange(config: Dict[str, Any], check_for_bad: bool = True) -> bool:\n \"\"\"\n Check if the exchange name in the config file is supported by Freqtrade\n :param check_for_bad: if True, check the exchange against the list of known 'bad'\n exchanges\n :return: False if exchange is 'bad', i.e. is known to work with the bot with\n critical issues or does not work at all, crashes, etc. True otherwise.\n raises an exception if the exchange if not supported by ccxt\n and thus is not known for the Freqtrade at all.\n \"\"\"\n\n if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE]\n and not config.get('exchange', {}).get('name')):\n # Skip checking exchange in plot mode, since it requires no exchange\n return True\n logger.info(\"Checking exchange...\")\n\n exchange = config.get('exchange', {}).get('name').lower()\n if not exchange:\n raise OperationalException(\n f'This command requires a configured exchange. You should either use '\n f'`--exchange <exchange_name>` or specify a configuration file via `--config`.\\n'\n f'The following exchanges are available for Freqtrade: '\n f'{\", \".join(available_exchanges())}'\n )\n\n if not is_exchange_known_ccxt(exchange):\n raise OperationalException(\n f'Exchange \"{exchange}\" is not known to the ccxt library '\n f'and therefore not available for the bot.\\n'\n f'The following exchanges are available for Freqtrade: '\n f'{\", \".join(available_exchanges())}'\n )\n\n if check_for_bad and is_exchange_bad(exchange):\n raise OperationalException(f'Exchange \"{exchange}\" is known to not work with the bot yet. '\n f'Reason: {get_exchange_bad_reason(exchange)}')\n\n if is_exchange_officially_supported(exchange):\n logger.info(f'Exchange \"{exchange}\" is officially supported '\n f'by the Freqtrade development team.')\n else:\n logger.warning(f'Exchange \"{exchange}\" is known to the the ccxt library, '\n f'available for the bot, but not officially supported '\n f'by the Freqtrade development team. '\n f'It may work flawlessly (please report back) or have serious issues. '\n f'Use it at your own discretion.')\n\n return True\n", "path": "freqtrade/configuration/check_exchange.py"}]} | 1,312 | 155 |
gh_patches_debug_34359 | rasdani/github-patches | git_diff | bridgecrewio__checkov-975 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_AWS_103 should not trigger for TCP NLB's
Hi!
CKV_AWS_103 throws an error when using an NLB with TCP protocol.
However, setting an `ssl_policy` only make sense for ELB's with protocol HTTPS or TLS.
```
14 | resource "aws_lb_listener" "nlb_listener_https" {
15 | load_balancer_arn = aws_lb.nlb.arn
16 | port = xxxxxxxx
17 | protocol = "TCP"
18 | default_action {
19 | target_group_arn = aws_lb_target_group.nlb_target_group.id
20 | type = "forward"
21 | }
22 | }
```
`Check: CKV_AWS_103: "Ensure that load balancer is using TLS 1.2"
FAILED for resource: aws_lb_listener.nlb_listener_https
File: /nlb.tf:14-22
Guide: https://docs.bridgecrew.io/docs/bc_aws_general_43`
**Expected behavior**
Check is PASSED instead of FAILED.
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class AppLoadBalancerTLS12(BaseResourceCheck):
6 def __init__(self):
7 name = "Ensure that load balancer is using TLS 1.2"
8 id = "CKV_AWS_103"
9 supported_resources = ['aws_lb_listener']
10 categories = [CheckCategories.GENERAL_SECURITY]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 key="protocol"
15 if key in conf.keys():
16 if conf[key] in (["HTTPS"], ["TLS"]):
17 # Only interested in HTTPS & TLS listeners
18 policy="ssl_policy"
19 if policy in conf.keys():
20 name=str(conf[policy]).strip("['']")
21 if name.startswith("ELBSecurityPolicy-FS-1-2") or name.startswith("ELBSecurityPolicy-TLS-1-2"):
22 return CheckResult.PASSED
23 else:
24 return CheckResult.FAILED
25 else:
26 return CheckResult.FAILED
27 else:
28 for action in conf.get('default_action',[]):
29 for redirect in action.get('redirect',[]):
30 if redirect.get('protocol',[]) == ["HTTPS"]:
31 return CheckResult.PASSED
32 return CheckResult.FAILED
33 else:
34 return CheckResult.FAILED
35
36
37 check = AppLoadBalancerTLS12()
38
[end of checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py b/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py
--- a/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py
+++ b/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py
@@ -6,28 +6,35 @@
def __init__(self):
name = "Ensure that load balancer is using TLS 1.2"
id = "CKV_AWS_103"
- supported_resources = ['aws_lb_listener']
+ supported_resources = ["aws_lb_listener"]
categories = [CheckCategories.GENERAL_SECURITY]
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
+ super().__init__(
+ name=name,
+ id=id,
+ categories=categories,
+ supported_resources=supported_resources,
+ )
def scan_resource_conf(self, conf):
- key="protocol"
+ key = "protocol"
if key in conf.keys():
if conf[key] in (["HTTPS"], ["TLS"]):
- # Only interested in HTTPS & TLS listeners
- policy="ssl_policy"
+ # Only interested in HTTPS & TLS listeners
+ policy = "ssl_policy"
if policy in conf.keys():
- name=str(conf[policy]).strip("['']")
- if name.startswith("ELBSecurityPolicy-FS-1-2") or name.startswith("ELBSecurityPolicy-TLS-1-2"):
- return CheckResult.PASSED
- else:
- return CheckResult.FAILED
+ name = str(conf[policy]).strip("['']")
+ if name.startswith("ELBSecurityPolicy-FS-1-2") or name.startswith("ELBSecurityPolicy-TLS-1-2"):
+ return CheckResult.PASSED
+ else:
+ return CheckResult.FAILED
else:
- return CheckResult.FAILED
+ return CheckResult.FAILED
+ elif conf[key] in (["TCP"], ["UDP"], ["TCP_UDP"]):
+ return CheckResult.PASSED
else:
- for action in conf.get('default_action',[]):
- for redirect in action.get('redirect',[]):
- if redirect.get('protocol',[]) == ["HTTPS"]:
+ for action in conf.get("default_action", []):
+ for redirect in action.get("redirect", []):
+ if redirect.get("protocol", []) == ["HTTPS"]:
return CheckResult.PASSED
return CheckResult.FAILED
else:
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py b/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py\n--- a/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py\n+++ b/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py\n@@ -6,28 +6,35 @@\n def __init__(self):\n name = \"Ensure that load balancer is using TLS 1.2\"\n id = \"CKV_AWS_103\"\n- supported_resources = ['aws_lb_listener']\n+ supported_resources = [\"aws_lb_listener\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n+ super().__init__(\n+ name=name,\n+ id=id,\n+ categories=categories,\n+ supported_resources=supported_resources,\n+ )\n \n def scan_resource_conf(self, conf):\n- key=\"protocol\"\n+ key = \"protocol\"\n if key in conf.keys():\n if conf[key] in ([\"HTTPS\"], [\"TLS\"]):\n- # Only interested in HTTPS & TLS listeners\n- policy=\"ssl_policy\"\n+ # Only interested in HTTPS & TLS listeners\n+ policy = \"ssl_policy\"\n if policy in conf.keys():\n- name=str(conf[policy]).strip(\"['']\") \n- if name.startswith(\"ELBSecurityPolicy-FS-1-2\") or name.startswith(\"ELBSecurityPolicy-TLS-1-2\"):\n- return CheckResult.PASSED\n- else:\n- return CheckResult.FAILED\n+ name = str(conf[policy]).strip(\"['']\")\n+ if name.startswith(\"ELBSecurityPolicy-FS-1-2\") or name.startswith(\"ELBSecurityPolicy-TLS-1-2\"):\n+ return CheckResult.PASSED\n+ else:\n+ return CheckResult.FAILED\n else:\n- return CheckResult.FAILED\n+ return CheckResult.FAILED\n+ elif conf[key] in ([\"TCP\"], [\"UDP\"], [\"TCP_UDP\"]):\n+ return CheckResult.PASSED\n else:\n- for action in conf.get('default_action',[]):\n- for redirect in action.get('redirect',[]):\n- if redirect.get('protocol',[]) == [\"HTTPS\"]:\n+ for action in conf.get(\"default_action\", []):\n+ for redirect in action.get(\"redirect\", []):\n+ if redirect.get(\"protocol\", []) == [\"HTTPS\"]:\n return CheckResult.PASSED\n return CheckResult.FAILED\n else:\n", "issue": "CKV_AWS_103 should not trigger for TCP NLB's\nHi!\r\n\r\nCKV_AWS_103 throws an error when using an NLB with TCP protocol.\r\nHowever, setting an `ssl_policy` only make sense for ELB's with protocol HTTPS or TLS.\r\n\r\n```\r\n 14 | resource \"aws_lb_listener\" \"nlb_listener_https\" {\r\n 15 | load_balancer_arn = aws_lb.nlb.arn\r\n 16 | port = xxxxxxxx\r\n 17 | protocol = \"TCP\"\r\n 18 | default_action {\r\n 19 | target_group_arn = aws_lb_target_group.nlb_target_group.id\r\n 20 | type = \"forward\"\r\n 21 | }\r\n 22 | }\r\n```\r\n\r\n`Check: CKV_AWS_103: \"Ensure that load balancer is using TLS 1.2\"\r\n FAILED for resource: aws_lb_listener.nlb_listener_https\r\n File: /nlb.tf:14-22\r\n Guide: https://docs.bridgecrew.io/docs/bc_aws_general_43`\r\n\r\n**Expected behavior**\r\nCheck is PASSED instead of FAILED.\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass AppLoadBalancerTLS12(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure that load balancer is using TLS 1.2\"\n id = \"CKV_AWS_103\"\n supported_resources = ['aws_lb_listener']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n key=\"protocol\"\n if key in conf.keys():\n if conf[key] in ([\"HTTPS\"], [\"TLS\"]):\n # Only interested in HTTPS & TLS listeners\n policy=\"ssl_policy\"\n if policy in conf.keys():\n name=str(conf[policy]).strip(\"['']\") \n if name.startswith(\"ELBSecurityPolicy-FS-1-2\") or name.startswith(\"ELBSecurityPolicy-TLS-1-2\"):\n return CheckResult.PASSED\n else:\n return CheckResult.FAILED\n else:\n return CheckResult.FAILED\n else:\n for action in conf.get('default_action',[]):\n for redirect in action.get('redirect',[]):\n if redirect.get('protocol',[]) == [\"HTTPS\"]:\n return CheckResult.PASSED\n return CheckResult.FAILED\n else:\n return CheckResult.FAILED\n\n\ncheck = AppLoadBalancerTLS12()\n", "path": "checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py"}]} | 1,219 | 578 |
gh_patches_debug_29263 | rasdani/github-patches | git_diff | joke2k__faker-266 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
use environment markers and reactivate universal wheels + distribute wheels
right now pip install is broken due to the released package claiming to be universal wheel but not using environment markers
so pip makes a wheel for all pythons with the first python its run on, then its used on all other pythons,
so a pip install on python2.6 would create a broken wheel with the extra dependency which is then subsequently tried in other pythons, and will utterly break them
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # coding=utf-8
3
4 import os
5 import io
6 import sys
7 from setuptools import setup, find_packages
8
9 here = os.path.abspath(os.path.dirname(__file__))
10 README = io.open(os.path.join(here, 'README.rst'), encoding="utf8").read()
11 NEWS = io.open(os.path.join(here, 'CHANGELOG.rst'), encoding="utf8").read()
12
13
14 version = '0.5.3'
15
16 install_requires = []
17 if ((sys.version_info[0] == 2 and sys.version_info[1] < 7) or
18 (sys.version_info[0] == 3 and sys.version_info[1] < 1)):
19 install_requires.append('importlib')
20
21 # this module can be zip-safe if the zipimporter implements iter_modules or if
22 # pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
23 try:
24 import pkgutil
25 import zipimport
26 zip_safe = hasattr(zipimport.zipimporter, "iter_modules") or \
27 zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
28 except (ImportError, AttributeError):
29 zip_safe = False
30
31 setup(
32 name='fake-factory',
33 version=version,
34 description="Faker is a Python package that generates fake data for you.",
35 long_description=README + '\n\n' + NEWS,
36 scripts=['faker/bin/faker'],
37 classifiers=[
38 # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
39 'Development Status :: 3 - Alpha',
40 'Environment :: Console',
41 'Intended Audience :: Developers',
42 'Programming Language :: Python',
43 'Programming Language :: Python :: 2',
44 'Programming Language :: Python :: 3',
45 'Programming Language :: Python :: 3.4',
46 'Topic :: Software Development :: Libraries :: Python Modules',
47 'Topic :: Software Development :: Testing',
48 'Topic :: Utilities',
49 'License :: OSI Approved :: MIT License'
50 ],
51 keywords='faker fixtures data test mock generator',
52 author='joke2k',
53 author_email='[email protected]',
54 url='http://github.com/joke2k/faker',
55 license='MIT License',
56 packages=find_packages(exclude=['*.tests']),
57 platforms=["any"],
58 test_suite='faker.tests',
59 zip_safe=zip_safe,
60 install_requires=install_requires
61 )
62
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,6 @@
import os
import io
-import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
@@ -13,11 +12,6 @@
version = '0.5.3'
-install_requires = []
-if ((sys.version_info[0] == 2 and sys.version_info[1] < 7) or
- (sys.version_info[0] == 3 and sys.version_info[1] < 1)):
- install_requires.append('importlib')
-
# this module can be zip-safe if the zipimporter implements iter_modules or if
# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
try:
@@ -33,7 +27,9 @@
version=version,
description="Faker is a Python package that generates fake data for you.",
long_description=README + '\n\n' + NEWS,
- scripts=['faker/bin/faker'],
+ entry_points={
+ 'console_scripts': ['faker=faker.cli:execute_from_command_line'],
+ },
classifiers=[
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
@@ -57,5 +53,8 @@
platforms=["any"],
test_suite='faker.tests',
zip_safe=zip_safe,
- install_requires=install_requires
+ extras_require={
+ ':python_version=="2.6"': ['importlib'],
+ ':python_version=="3.0"': ['importlib'],
+ }
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,7 +3,6 @@\n \n import os\n import io\n-import sys\n from setuptools import setup, find_packages\n \n here = os.path.abspath(os.path.dirname(__file__))\n@@ -13,11 +12,6 @@\n \n version = '0.5.3'\n \n-install_requires = []\n-if ((sys.version_info[0] == 2 and sys.version_info[1] < 7) or\n- (sys.version_info[0] == 3 and sys.version_info[1] < 1)):\n- install_requires.append('importlib')\n-\n # this module can be zip-safe if the zipimporter implements iter_modules or if\n # pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\n try:\n@@ -33,7 +27,9 @@\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n- scripts=['faker/bin/faker'],\n+ entry_points={\n+ 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n+ },\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n@@ -57,5 +53,8 @@\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n- install_requires=install_requires\n+ extras_require={\n+ ':python_version==\"2.6\"': ['importlib'],\n+ ':python_version==\"3.0\"': ['importlib'],\n+ }\n )\n", "issue": "use environment markers and reactivate universal wheels + distribute wheels\nright now pip install is broken due to the released package claiming to be universal wheel but not using environment markers\n\nso pip makes a wheel for all pythons with the first python its run on, then its used on all other pythons,\nso a pip install on python2.6 would create a broken wheel with the extra dependency which is then subsequently tried in other pythons, and will utterly break them\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport io\nimport sys\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = io.open(os.path.join(here, 'README.rst'), encoding=\"utf8\").read()\nNEWS = io.open(os.path.join(here, 'CHANGELOG.rst'), encoding=\"utf8\").read()\n\n\nversion = '0.5.3'\n\ninstall_requires = []\nif ((sys.version_info[0] == 2 and sys.version_info[1] < 7) or\n (sys.version_info[0] == 3 and sys.version_info[1] < 1)):\n install_requires.append('importlib')\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='fake-factory',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n scripts=['faker/bin/faker'],\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='http://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=['*.tests']),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n install_requires=install_requires\n)\n", "path": "setup.py"}]} | 1,258 | 373 |
gh_patches_debug_8279 | rasdani/github-patches | git_diff | microsoft__ptvsd-759 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create release version of ptvsd
Create a release version of ptvsd version 4.1.1
- [x] Change development status to production from alpha
- [x] Set version to 4.1.1
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # Copyright (c) Microsoft Corporation. All rights reserved.
4 # Licensed under the MIT License. See LICENSE in the project root
5 # for license information.
6
7 import os
8 import os.path
9 import subprocess
10 import sys
11
12 from setuptools import setup
13
14 import versioneer
15 import ptvsd
16 import ptvsd._vendored
17
18
19 PYDEVD_ROOT = ptvsd._vendored.project_root('pydevd')
20 PTVSD_ROOT = os.path.dirname(os.path.abspath(ptvsd.__file__))
21
22
23 def cython_build():
24 print('Compiling extension modules (set SKIP_CYTHON_BUILD=1 to omit)')
25 subprocess.call([
26 sys.executable,
27 os.path.join(PYDEVD_ROOT, 'setup_cython.py'),
28 'build_ext',
29 '-i',
30 ])
31
32
33 def iter_vendored_files():
34 # Add pydevd files as data files for this package. They are not
35 # treated as a package of their own, because we don't actually
36 # want to provide pydevd - just use our own copy internally.
37 for project in ptvsd._vendored.list_all():
38 for filename in ptvsd._vendored.iter_packaging_files(project):
39 yield filename
40
41
42 with open('DESCRIPTION.md', 'r') as fh:
43 long_description = fh.read()
44
45
46 if __name__ == '__main__':
47 if not os.getenv('SKIP_CYTHON_BUILD'):
48 cython_build()
49
50 setup(
51 name='ptvsd',
52 version=versioneer.get_version(),
53 description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa
54 long_description=long_description,
55 long_description_content_type='text/markdown',
56 license='MIT',
57 author='Microsoft Corporation',
58 author_email='[email protected]',
59 url='https://aka.ms/ptvs',
60 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
61 classifiers=[
62 'Development Status :: 3 - Alpha',
63 'Programming Language :: Python :: 2.7',
64 'Programming Language :: Python :: 3.4',
65 'Programming Language :: Python :: 3.5',
66 'Programming Language :: Python :: 3.6',
67 'Programming Language :: Python :: 3.7',
68 'Topic :: Software Development :: Debuggers',
69 'Operating System :: OS Independent',
70 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)',
71 'License :: OSI Approved :: MIT License',
72 ],
73 packages=[
74 'ptvsd',
75 'ptvsd._vendored',
76 ],
77 package_data={
78 'ptvsd': ['ThirdPartyNotices.txt'],
79 'ptvsd._vendored': list(iter_vendored_files()),
80 },
81 cmdclass=versioneer.get_cmdclass(),
82 )
83
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,7 +59,7 @@
url='https://aka.ms/ptvs',
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
classifiers=[
- 'Development Status :: 3 - Alpha',
+ 'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,7 +59,7 @@\n url='https://aka.ms/ptvs',\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n classifiers=[\n- 'Development Status :: 3 - Alpha',\n+ 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n", "issue": "Create release version of ptvsd\nCreate a release version of ptvsd version 4.1.1\r\n- [x] Change development status to production from alpha\r\n- [x] Set version to 4.1.1\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport os\nimport os.path\nimport subprocess\nimport sys\n\nfrom setuptools import setup\n\nimport versioneer\nimport ptvsd\nimport ptvsd._vendored\n\n\nPYDEVD_ROOT = ptvsd._vendored.project_root('pydevd')\nPTVSD_ROOT = os.path.dirname(os.path.abspath(ptvsd.__file__))\n\n\ndef cython_build():\n print('Compiling extension modules (set SKIP_CYTHON_BUILD=1 to omit)')\n subprocess.call([\n sys.executable,\n os.path.join(PYDEVD_ROOT, 'setup_cython.py'),\n 'build_ext',\n '-i',\n ])\n\n\ndef iter_vendored_files():\n # Add pydevd files as data files for this package. They are not\n # treated as a package of their own, because we don't actually\n # want to provide pydevd - just use our own copy internally.\n for project in ptvsd._vendored.list_all():\n for filename in ptvsd._vendored.iter_packaging_files(project):\n yield filename\n\n\nwith open('DESCRIPTION.md', 'r') as fh:\n long_description = fh.read()\n\n\nif __name__ == '__main__':\n if not os.getenv('SKIP_CYTHON_BUILD'):\n cython_build()\n\n setup(\n name='ptvsd',\n version=versioneer.get_version(),\n description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa\n long_description=long_description,\n long_description_content_type='text/markdown',\n license='MIT',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://aka.ms/ptvs',\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Debuggers',\n 'Operating System :: OS Independent',\n 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)',\n 'License :: OSI Approved :: MIT License',\n ],\n packages=[\n 'ptvsd',\n 'ptvsd._vendored',\n ],\n package_data={\n 'ptvsd': ['ThirdPartyNotices.txt'],\n 'ptvsd._vendored': list(iter_vendored_files()),\n },\n cmdclass=versioneer.get_cmdclass(),\n )\n", "path": "setup.py"}]} | 1,372 | 137 |
gh_patches_debug_24618 | rasdani/github-patches | git_diff | airctic__icevision-660 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fastai efficientdet fails on learn.validate() with AttributeError: 'NoneType' object has no attribute 'shape'
## 🐛 Bug
when trying to simply validate metrics for an efficientdet model with fastai
```python
KeyError: 'image_id'
```
```python
AttributeError: 'NoneType' object has no attribute 'shape'
```
it fails when trying to read the batch size automatically: in `accumulate, find_bs`
```python
class AvgLoss(Metric):
"Average the losses taking into account potential different batch sizes"
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(learn.loss.mean())*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return "loss"
```
**To Reproduce**
Steps to reproduce the behavior:
1. Go to https://colab.research.google.com/drive/1i4aXYu4wIKA7eLUK86GwTm7lq7zku_oF?usp=sharing
</issue>
<code>
[start of icevision/models/torchvision/fastai/learner.py]
1 __all__ = ["RCNNCallback", "rcnn_learner"]
2
3 from icevision.imports import *
4 from icevision.engines.fastai import *
5 from icevision.models.torchvision.loss_fn import loss_fn
6 from icevision.models.torchvision.fastai.callbacks import *
7
8
9 def noop_watch(models, criterion=None, log="gradients", log_freq=1000, idx=None):
10 return []
11
12
13 def rcnn_learner(
14 dls: List[Union[DataLoader, fastai.DataLoader]],
15 model: nn.Module,
16 cbs=None,
17 **kwargs,
18 ):
19 learn = adapted_fastai_learner(
20 dls=dls,
21 model=model,
22 cbs=cbs,
23 loss_func=loss_fn,
24 **kwargs,
25 )
26
27 # HACK: patch AvgLoss (in original, find_bs gives errors)
28 class RCNNAvgLoss(fastai.AvgLoss):
29 def accumulate(self, learn):
30 bs = len(learn.yb)
31 self.total += fastai.to_detach(learn.loss.mean()) * bs
32 self.count += bs
33
34 recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]
35 recorder.loss = RCNNAvgLoss()
36
37 is_wandb = [cb for cb in learn.cbs if "WandbCallback" in str(type(cb))]
38 if len(is_wandb) == 1:
39 logger.warning("Wandb quickfix implemented, for more info check issue #527")
40 wandb.watch = noop_watch
41 if len(is_wandb) > 1:
42 raise ValueError(
43 f"It seems you are passing {len(is_wandb)} `WandbCallback` instances to the `learner`. Only 1 is allowed."
44 )
45
46 return learn
47
[end of icevision/models/torchvision/fastai/learner.py]
[start of icevision/models/ross/efficientdet/fastai/learner.py]
1 __all__ = ["learner"]
2
3 from icevision.imports import *
4 from icevision.engines.fastai import *
5 from icevision.models.ross.efficientdet.loss_fn import loss_fn
6 from icevision.models.ross.efficientdet.fastai.callbacks import EfficientDetCallback
7
8
9 def learner(
10 dls: List[Union[DataLoader, fastai.DataLoader]],
11 model: nn.Module,
12 cbs=None,
13 **learner_kwargs,
14 ):
15 """Fastai `Learner` adapted for EfficientDet.
16
17 # Arguments
18 dls: `Sequence` of `DataLoaders` passed to the `Learner`.
19 The first one will be used for training and the second for validation.
20 model: The model to train.
21 cbs: Optional `Sequence` of callbacks.
22 **learner_kwargs: Keyword arguments that will be internally passed to `Learner`.
23
24 # Returns
25 A fastai `Learner`.
26 """
27 cbs = [EfficientDetCallback()] + L(cbs)
28
29 learn = adapted_fastai_learner(
30 dls=dls,
31 model=model,
32 cbs=cbs,
33 loss_func=loss_fn,
34 **learner_kwargs,
35 )
36
37 # HACK: patch AvgLoss (in original, find_bs gives errors)
38 class PatchedAvgLoss(fastai.AvgLoss):
39 def accumulate(self, learn):
40 bs = len(learn.yb)
41 self.total += fastai.to_detach(learn.loss.mean()) * bs
42 self.count += bs
43
44 recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]
45 recorder.loss = PatchedAvgLoss()
46
47 return learn
48
[end of icevision/models/ross/efficientdet/fastai/learner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/icevision/models/ross/efficientdet/fastai/learner.py b/icevision/models/ross/efficientdet/fastai/learner.py
--- a/icevision/models/ross/efficientdet/fastai/learner.py
+++ b/icevision/models/ross/efficientdet/fastai/learner.py
@@ -34,14 +34,14 @@
**learner_kwargs,
)
- # HACK: patch AvgLoss (in original, find_bs gives errors)
- class PatchedAvgLoss(fastai.AvgLoss):
+ # HACK: patch AvgLoss (in original, find_bs looks at the first element in dictionary and gives errors)
+ class EffDetAvgLoss(fastai.AvgLoss):
def accumulate(self, learn):
- bs = len(learn.yb)
- self.total += fastai.to_detach(learn.loss.mean()) * bs
+ bs = len(first(learn.yb)["cls"])
+ self.total += learn.to_detach(learn.loss.mean()) * bs
self.count += bs
recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]
- recorder.loss = PatchedAvgLoss()
+ recorder.loss = EffDetAvgLoss()
return learn
diff --git a/icevision/models/torchvision/fastai/learner.py b/icevision/models/torchvision/fastai/learner.py
--- a/icevision/models/torchvision/fastai/learner.py
+++ b/icevision/models/torchvision/fastai/learner.py
@@ -27,7 +27,7 @@
# HACK: patch AvgLoss (in original, find_bs gives errors)
class RCNNAvgLoss(fastai.AvgLoss):
def accumulate(self, learn):
- bs = len(learn.yb)
+ bs = len(first(learn.yb))
self.total += fastai.to_detach(learn.loss.mean()) * bs
self.count += bs
| {"golden_diff": "diff --git a/icevision/models/ross/efficientdet/fastai/learner.py b/icevision/models/ross/efficientdet/fastai/learner.py\n--- a/icevision/models/ross/efficientdet/fastai/learner.py\n+++ b/icevision/models/ross/efficientdet/fastai/learner.py\n@@ -34,14 +34,14 @@\n **learner_kwargs,\n )\n \n- # HACK: patch AvgLoss (in original, find_bs gives errors)\n- class PatchedAvgLoss(fastai.AvgLoss):\n+ # HACK: patch AvgLoss (in original, find_bs looks at the first element in dictionary and gives errors)\n+ class EffDetAvgLoss(fastai.AvgLoss):\n def accumulate(self, learn):\n- bs = len(learn.yb)\n- self.total += fastai.to_detach(learn.loss.mean()) * bs\n+ bs = len(first(learn.yb)[\"cls\"])\n+ self.total += learn.to_detach(learn.loss.mean()) * bs\n self.count += bs\n \n recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]\n- recorder.loss = PatchedAvgLoss()\n+ recorder.loss = EffDetAvgLoss()\n \n return learn\ndiff --git a/icevision/models/torchvision/fastai/learner.py b/icevision/models/torchvision/fastai/learner.py\n--- a/icevision/models/torchvision/fastai/learner.py\n+++ b/icevision/models/torchvision/fastai/learner.py\n@@ -27,7 +27,7 @@\n # HACK: patch AvgLoss (in original, find_bs gives errors)\n class RCNNAvgLoss(fastai.AvgLoss):\n def accumulate(self, learn):\n- bs = len(learn.yb)\n+ bs = len(first(learn.yb))\n self.total += fastai.to_detach(learn.loss.mean()) * bs\n self.count += bs\n", "issue": "fastai efficientdet fails on learn.validate() with AttributeError: 'NoneType' object has no attribute 'shape'\n## \ud83d\udc1b Bug\r\nwhen trying to simply validate metrics for an efficientdet model with fastai \r\n```python\r\nKeyError: 'image_id'\r\n```\r\n```python\r\nAttributeError: 'NoneType' object has no attribute 'shape'\r\n```\r\n\r\nit fails when trying to read the batch size automatically: in `accumulate, find_bs`\r\n```python\r\nclass AvgLoss(Metric):\r\n \"Average the losses taking into account potential different batch sizes\"\r\n def reset(self): self.total,self.count = 0.,0\r\n def accumulate(self, learn):\r\n bs = find_bs(learn.yb)\r\n self.total += learn.to_detach(learn.loss.mean())*bs\r\n self.count += bs\r\n @property\r\n def value(self): return self.total/self.count if self.count != 0 else None\r\n @property\r\n def name(self): return \"loss\"\r\n```\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to https://colab.research.google.com/drive/1i4aXYu4wIKA7eLUK86GwTm7lq7zku_oF?usp=sharing\r\n\r\n\n", "before_files": [{"content": "__all__ = [\"RCNNCallback\", \"rcnn_learner\"]\n\nfrom icevision.imports import *\nfrom icevision.engines.fastai import *\nfrom icevision.models.torchvision.loss_fn import loss_fn\nfrom icevision.models.torchvision.fastai.callbacks import *\n\n\ndef noop_watch(models, criterion=None, log=\"gradients\", log_freq=1000, idx=None):\n return []\n\n\ndef rcnn_learner(\n dls: List[Union[DataLoader, fastai.DataLoader]],\n model: nn.Module,\n cbs=None,\n **kwargs,\n):\n learn = adapted_fastai_learner(\n dls=dls,\n model=model,\n cbs=cbs,\n loss_func=loss_fn,\n **kwargs,\n )\n\n # HACK: patch AvgLoss (in original, find_bs gives errors)\n class RCNNAvgLoss(fastai.AvgLoss):\n def accumulate(self, learn):\n bs = len(learn.yb)\n self.total += fastai.to_detach(learn.loss.mean()) * bs\n self.count += bs\n\n recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]\n recorder.loss = RCNNAvgLoss()\n\n is_wandb = [cb for cb in learn.cbs if \"WandbCallback\" in str(type(cb))]\n if len(is_wandb) == 1:\n logger.warning(\"Wandb quickfix implemented, for more info check issue #527\")\n wandb.watch = noop_watch\n if len(is_wandb) > 1:\n raise ValueError(\n f\"It seems you are passing {len(is_wandb)} `WandbCallback` instances to the `learner`. Only 1 is allowed.\"\n )\n\n return learn\n", "path": "icevision/models/torchvision/fastai/learner.py"}, {"content": "__all__ = [\"learner\"]\n\nfrom icevision.imports import *\nfrom icevision.engines.fastai import *\nfrom icevision.models.ross.efficientdet.loss_fn import loss_fn\nfrom icevision.models.ross.efficientdet.fastai.callbacks import EfficientDetCallback\n\n\ndef learner(\n dls: List[Union[DataLoader, fastai.DataLoader]],\n model: nn.Module,\n cbs=None,\n **learner_kwargs,\n):\n \"\"\"Fastai `Learner` adapted for EfficientDet.\n\n # Arguments\n dls: `Sequence` of `DataLoaders` passed to the `Learner`.\n The first one will be used for training and the second for validation.\n model: The model to train.\n cbs: Optional `Sequence` of callbacks.\n **learner_kwargs: Keyword arguments that will be internally passed to `Learner`.\n\n # Returns\n A fastai `Learner`.\n \"\"\"\n cbs = [EfficientDetCallback()] + L(cbs)\n\n learn = adapted_fastai_learner(\n dls=dls,\n model=model,\n cbs=cbs,\n loss_func=loss_fn,\n **learner_kwargs,\n )\n\n # HACK: patch AvgLoss (in original, find_bs gives errors)\n class PatchedAvgLoss(fastai.AvgLoss):\n def accumulate(self, learn):\n bs = len(learn.yb)\n self.total += fastai.to_detach(learn.loss.mean()) * bs\n self.count += bs\n\n recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]\n recorder.loss = PatchedAvgLoss()\n\n return learn\n", "path": "icevision/models/ross/efficientdet/fastai/learner.py"}]} | 1,775 | 429 |
gh_patches_debug_24853 | rasdani/github-patches | git_diff | getnikola__nikola-2188 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use a FS cache for Jinja to speed it up a bit
http://jinja.pocoo.org/docs/dev/api/#bytecode-cache
</issue>
<code>
[start of nikola/plugins/template/jinja.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2015 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27
28 """Jinja template handler."""
29
30 from __future__ import unicode_literals
31 import os
32 import json
33 from collections import deque
34 try:
35 import jinja2
36 from jinja2 import meta
37 except ImportError:
38 jinja2 = None # NOQA
39
40 from nikola.plugin_categories import TemplateSystem
41 from nikola.utils import makedirs, req_missing
42
43
44 class JinjaTemplates(TemplateSystem):
45 """Support for Jinja2 templates."""
46
47 name = "jinja"
48 lookup = None
49 dependency_cache = {}
50
51 def __init__(self):
52 """Initialize Jinja2 environment with extended set of filters."""
53 if jinja2 is None:
54 return
55 self.lookup = jinja2.Environment()
56 self.lookup.trim_blocks = True
57 self.lookup.lstrip_blocks = True
58 self.lookup.filters['tojson'] = json.dumps
59 self.lookup.globals['enumerate'] = enumerate
60 self.lookup.globals['isinstance'] = isinstance
61 self.lookup.globals['tuple'] = tuple
62
63 def set_directories(self, directories, cache_folder):
64 """Create a new template lookup with set directories."""
65 if jinja2 is None:
66 req_missing(['jinja2'], 'use this theme')
67 self.directories = directories
68 self.create_lookup()
69
70 def inject_directory(self, directory):
71 """Add a directory to the lookup and recreate it if it's not there yet."""
72 if directory not in self.directories:
73 self.directories.append(directory)
74 self.create_lookup()
75
76 def create_lookup(self):
77 """Create a template lookup."""
78 self.lookup.loader = jinja2.FileSystemLoader(self.directories,
79 encoding='utf-8')
80
81 def set_site(self, site):
82 """Set the Nikola site."""
83 self.site = site
84 self.lookup.filters.update(self.site.config['TEMPLATE_FILTERS'])
85
86 def render_template(self, template_name, output_name, context):
87 """Render the template into output_name using context."""
88 if jinja2 is None:
89 req_missing(['jinja2'], 'use this theme')
90 template = self.lookup.get_template(template_name)
91 output = template.render(**context)
92 if output_name is not None:
93 makedirs(os.path.dirname(output_name))
94 with open(output_name, 'w+') as output:
95 output.write(output.encode('utf8'))
96 return output
97
98 def render_template_to_string(self, template, context):
99 """Render template to a string using context."""
100 return self.lookup.from_string(template).render(**context)
101
102 def template_deps(self, template_name):
103 """Generate list of dependencies for a template."""
104 # Cache the lists of dependencies for each template name.
105 if self.dependency_cache.get(template_name) is None:
106 # Use a breadth-first search to find all templates this one
107 # depends on.
108 queue = deque([template_name])
109 visited_templates = set([template_name])
110 deps = []
111 while len(queue) > 0:
112 curr = queue.popleft()
113 source, filename = self.lookup.loader.get_source(self.lookup,
114 curr)[:2]
115 deps.append(filename)
116 ast = self.lookup.parse(source)
117 dep_names = meta.find_referenced_templates(ast)
118 for dep_name in dep_names:
119 if (dep_name not in visited_templates and dep_name is not None):
120 visited_templates.add(dep_name)
121 queue.append(dep_name)
122 self.dependency_cache[template_name] = deps
123 return self.dependency_cache[template_name]
124
[end of nikola/plugins/template/jinja.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nikola/plugins/template/jinja.py b/nikola/plugins/template/jinja.py
--- a/nikola/plugins/template/jinja.py
+++ b/nikola/plugins/template/jinja.py
@@ -52,18 +52,21 @@
"""Initialize Jinja2 environment with extended set of filters."""
if jinja2 is None:
return
- self.lookup = jinja2.Environment()
+
+ def set_directories(self, directories, cache_folder):
+ """Create a new template lookup with set directories."""
+ if jinja2 is None:
+ req_missing(['jinja2'], 'use this theme')
+ cache_folder = os.path.join(cache_folder, 'jinja')
+ makedirs(cache_folder)
+ cache = jinja2.FileSystemBytecodeCache(cache_folder)
+ self.lookup = jinja2.Environment(bytecode_cache=cache)
self.lookup.trim_blocks = True
self.lookup.lstrip_blocks = True
self.lookup.filters['tojson'] = json.dumps
self.lookup.globals['enumerate'] = enumerate
self.lookup.globals['isinstance'] = isinstance
self.lookup.globals['tuple'] = tuple
-
- def set_directories(self, directories, cache_folder):
- """Create a new template lookup with set directories."""
- if jinja2 is None:
- req_missing(['jinja2'], 'use this theme')
self.directories = directories
self.create_lookup()
| {"golden_diff": "diff --git a/nikola/plugins/template/jinja.py b/nikola/plugins/template/jinja.py\n--- a/nikola/plugins/template/jinja.py\n+++ b/nikola/plugins/template/jinja.py\n@@ -52,18 +52,21 @@\n \"\"\"Initialize Jinja2 environment with extended set of filters.\"\"\"\n if jinja2 is None:\n return\n- self.lookup = jinja2.Environment()\n+\n+ def set_directories(self, directories, cache_folder):\n+ \"\"\"Create a new template lookup with set directories.\"\"\"\n+ if jinja2 is None:\n+ req_missing(['jinja2'], 'use this theme')\n+ cache_folder = os.path.join(cache_folder, 'jinja')\n+ makedirs(cache_folder)\n+ cache = jinja2.FileSystemBytecodeCache(cache_folder)\n+ self.lookup = jinja2.Environment(bytecode_cache=cache)\n self.lookup.trim_blocks = True\n self.lookup.lstrip_blocks = True\n self.lookup.filters['tojson'] = json.dumps\n self.lookup.globals['enumerate'] = enumerate\n self.lookup.globals['isinstance'] = isinstance\n self.lookup.globals['tuple'] = tuple\n-\n- def set_directories(self, directories, cache_folder):\n- \"\"\"Create a new template lookup with set directories.\"\"\"\n- if jinja2 is None:\n- req_missing(['jinja2'], 'use this theme')\n self.directories = directories\n self.create_lookup()\n", "issue": "Use a FS cache for Jinja to speed it up a bit\nhttp://jinja.pocoo.org/docs/dev/api/#bytecode-cache\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\n\"\"\"Jinja template handler.\"\"\"\n\nfrom __future__ import unicode_literals\nimport os\nimport json\nfrom collections import deque\ntry:\n import jinja2\n from jinja2 import meta\nexcept ImportError:\n jinja2 = None # NOQA\n\nfrom nikola.plugin_categories import TemplateSystem\nfrom nikola.utils import makedirs, req_missing\n\n\nclass JinjaTemplates(TemplateSystem):\n \"\"\"Support for Jinja2 templates.\"\"\"\n\n name = \"jinja\"\n lookup = None\n dependency_cache = {}\n\n def __init__(self):\n \"\"\"Initialize Jinja2 environment with extended set of filters.\"\"\"\n if jinja2 is None:\n return\n self.lookup = jinja2.Environment()\n self.lookup.trim_blocks = True\n self.lookup.lstrip_blocks = True\n self.lookup.filters['tojson'] = json.dumps\n self.lookup.globals['enumerate'] = enumerate\n self.lookup.globals['isinstance'] = isinstance\n self.lookup.globals['tuple'] = tuple\n\n def set_directories(self, directories, cache_folder):\n \"\"\"Create a new template lookup with set directories.\"\"\"\n if jinja2 is None:\n req_missing(['jinja2'], 'use this theme')\n self.directories = directories\n self.create_lookup()\n\n def inject_directory(self, directory):\n \"\"\"Add a directory to the lookup and recreate it if it's not there yet.\"\"\"\n if directory not in self.directories:\n self.directories.append(directory)\n self.create_lookup()\n\n def create_lookup(self):\n \"\"\"Create a template lookup.\"\"\"\n self.lookup.loader = jinja2.FileSystemLoader(self.directories,\n encoding='utf-8')\n\n def set_site(self, site):\n \"\"\"Set the Nikola site.\"\"\"\n self.site = site\n self.lookup.filters.update(self.site.config['TEMPLATE_FILTERS'])\n\n def render_template(self, template_name, output_name, context):\n \"\"\"Render the template into output_name using context.\"\"\"\n if jinja2 is None:\n req_missing(['jinja2'], 'use this theme')\n template = self.lookup.get_template(template_name)\n output = template.render(**context)\n if output_name is not None:\n makedirs(os.path.dirname(output_name))\n with open(output_name, 'w+') as output:\n output.write(output.encode('utf8'))\n return output\n\n def render_template_to_string(self, template, context):\n \"\"\"Render template to a string using context.\"\"\"\n return self.lookup.from_string(template).render(**context)\n\n def template_deps(self, template_name):\n \"\"\"Generate list of dependencies for a template.\"\"\"\n # Cache the lists of dependencies for each template name.\n if self.dependency_cache.get(template_name) is None:\n # Use a breadth-first search to find all templates this one\n # depends on.\n queue = deque([template_name])\n visited_templates = set([template_name])\n deps = []\n while len(queue) > 0:\n curr = queue.popleft()\n source, filename = self.lookup.loader.get_source(self.lookup,\n curr)[:2]\n deps.append(filename)\n ast = self.lookup.parse(source)\n dep_names = meta.find_referenced_templates(ast)\n for dep_name in dep_names:\n if (dep_name not in visited_templates and dep_name is not None):\n visited_templates.add(dep_name)\n queue.append(dep_name)\n self.dependency_cache[template_name] = deps\n return self.dependency_cache[template_name]\n", "path": "nikola/plugins/template/jinja.py"}]} | 1,830 | 317 |
gh_patches_debug_9404 | rasdani/github-patches | git_diff | pyca__cryptography-3130 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
scrypt bounds checking
```
[11:23:58] <Alex_Gaynor> reaperhulk: what happens if you pass a non-even n?
[11:24:10] <Alex_Gaynor> Or a negative value for any of the params?
```
Presumably it will fail with an assertion error on return from the call to `EVP_PBE_scrypt`, but we shouldn't allow those types of errors.
cc @Ayrx.
</issue>
<code>
[start of src/cryptography/hazmat/primitives/kdf/scrypt.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 from cryptography import utils
8 from cryptography.exceptions import (
9 AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons
10 )
11 from cryptography.hazmat.backends.interfaces import ScryptBackend
12 from cryptography.hazmat.primitives import constant_time
13 from cryptography.hazmat.primitives.kdf import KeyDerivationFunction
14
15
16 @utils.register_interface(KeyDerivationFunction)
17 class Scrypt(object):
18 def __init__(self, salt, length, n, r, p, backend):
19 if not isinstance(backend, ScryptBackend):
20 raise UnsupportedAlgorithm(
21 "Backend object does not implement ScryptBackend.",
22 _Reasons.BACKEND_MISSING_INTERFACE
23 )
24
25 self._length = length
26 if not isinstance(salt, bytes):
27 raise TypeError("salt must be bytes.")
28 self._used = False
29 self._salt = salt
30 self._n = n
31 self._r = r
32 self._p = p
33 self._backend = backend
34
35 def derive(self, key_material):
36 if self._used:
37 raise AlreadyFinalized("Scrypt instances can only be used once.")
38 self._used = True
39
40 if not isinstance(key_material, bytes):
41 raise TypeError("key_material must be bytes.")
42 return self._backend.derive_scrypt(
43 key_material, self._salt, self._length, self._n, self._r, self._p
44 )
45
46 def verify(self, key_material, expected_key):
47 derived_key = self.derive(key_material)
48 if not constant_time.bytes_eq(derived_key, expected_key):
49 raise InvalidKey("Keys do not match.")
50
[end of src/cryptography/hazmat/primitives/kdf/scrypt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/primitives/kdf/scrypt.py b/src/cryptography/hazmat/primitives/kdf/scrypt.py
--- a/src/cryptography/hazmat/primitives/kdf/scrypt.py
+++ b/src/cryptography/hazmat/primitives/kdf/scrypt.py
@@ -25,6 +25,16 @@
self._length = length
if not isinstance(salt, bytes):
raise TypeError("salt must be bytes.")
+
+ if n < 2 or (n & (n - 1)) != 0:
+ raise ValueError("n must be greater than 1 and be a power of 2.")
+
+ if r < 1:
+ raise ValueError("r must be greater than or equal to 1.")
+
+ if p < 1:
+ raise ValueError("p must be greater than or equal to 1.")
+
self._used = False
self._salt = salt
self._n = n
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/kdf/scrypt.py b/src/cryptography/hazmat/primitives/kdf/scrypt.py\n--- a/src/cryptography/hazmat/primitives/kdf/scrypt.py\n+++ b/src/cryptography/hazmat/primitives/kdf/scrypt.py\n@@ -25,6 +25,16 @@\n self._length = length\n if not isinstance(salt, bytes):\n raise TypeError(\"salt must be bytes.\")\n+\n+ if n < 2 or (n & (n - 1)) != 0:\n+ raise ValueError(\"n must be greater than 1 and be a power of 2.\")\n+\n+ if r < 1:\n+ raise ValueError(\"r must be greater than or equal to 1.\")\n+\n+ if p < 1:\n+ raise ValueError(\"p must be greater than or equal to 1.\")\n+\n self._used = False\n self._salt = salt\n self._n = n\n", "issue": "scrypt bounds checking\n```\n[11:23:58] <Alex_Gaynor> reaperhulk: what happens if you pass a non-even n?\n[11:24:10] <Alex_Gaynor> Or a negative value for any of the params?\n```\n\nPresumably it will fail with an assertion error on return from the call to `EVP_PBE_scrypt`, but we shouldn't allow those types of errors.\n\ncc @Ayrx.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import ScryptBackend\nfrom cryptography.hazmat.primitives import constant_time\nfrom cryptography.hazmat.primitives.kdf import KeyDerivationFunction\n\n\[email protected]_interface(KeyDerivationFunction)\nclass Scrypt(object):\n def __init__(self, salt, length, n, r, p, backend):\n if not isinstance(backend, ScryptBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement ScryptBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._length = length\n if not isinstance(salt, bytes):\n raise TypeError(\"salt must be bytes.\")\n self._used = False\n self._salt = salt\n self._n = n\n self._r = r\n self._p = p\n self._backend = backend\n\n def derive(self, key_material):\n if self._used:\n raise AlreadyFinalized(\"Scrypt instances can only be used once.\")\n self._used = True\n\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n return self._backend.derive_scrypt(\n key_material, self._salt, self._length, self._n, self._r, self._p\n )\n\n def verify(self, key_material, expected_key):\n derived_key = self.derive(key_material)\n if not constant_time.bytes_eq(derived_key, expected_key):\n raise InvalidKey(\"Keys do not match.\")\n", "path": "src/cryptography/hazmat/primitives/kdf/scrypt.py"}]} | 1,154 | 217 |
gh_patches_debug_32814 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-645 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
skill container is broken
When the intent skill became the intent class the skill container wasn't updated to match the new structure and is currently not working.
</issue>
<code>
[start of mycroft/skills/container.py]
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17
18
19 import argparse
20 import sys
21 from os.path import dirname, exists, isdir
22
23 from mycroft.configuration import ConfigurationManager
24 from mycroft.messagebus.client.ws import WebsocketClient
25 from mycroft.skills.core import create_skill_descriptor, load_skill
26 from mycroft.skills.intent import create_skill as create_intent_skill
27 from mycroft.util.log import getLogger
28
29 __author__ = 'seanfitz'
30
31 LOG = getLogger("SkillContainer")
32
33
34 class SkillContainer(object):
35 def __init__(self, args):
36 params = self.__build_params(args)
37
38 if params.config:
39 ConfigurationManager.load_local([params.config])
40
41 if exists(params.lib) and isdir(params.lib):
42 sys.path.append(params.lib)
43
44 sys.path.append(params.dir)
45 self.dir = params.dir
46
47 self.enable_intent_skill = params.enable_intent_skill
48
49 self.__init_client(params)
50
51 @staticmethod
52 def __build_params(args):
53 parser = argparse.ArgumentParser()
54 parser.add_argument("--config", default="./mycroft.conf")
55 parser.add_argument("dir", nargs='?', default=dirname(__file__))
56 parser.add_argument("--lib", default="./lib")
57 parser.add_argument("--host", default=None)
58 parser.add_argument("--port", default=None)
59 parser.add_argument("--use-ssl", action='store_true', default=False)
60 parser.add_argument("--enable-intent-skill", action='store_true',
61 default=False)
62 return parser.parse_args(args)
63
64 def __init_client(self, params):
65 config = ConfigurationManager.get().get("websocket")
66
67 if not params.host:
68 params.host = config.get('host')
69 if not params.port:
70 params.port = config.get('port')
71
72 self.ws = WebsocketClient(host=params.host,
73 port=params.port,
74 ssl=params.use_ssl)
75
76 def load_skill(self):
77 if self.enable_intent_skill:
78 intent_skill = create_intent_skill()
79 intent_skill.bind(self.ws)
80 intent_skill.initialize()
81 skill_descriptor = create_skill_descriptor(self.dir)
82 self.skill = load_skill(skill_descriptor, self.ws)
83
84 def run(self):
85 try:
86 self.ws.on('message', LOG.debug)
87 self.ws.on('open', self.load_skill)
88 self.ws.on('error', LOG.error)
89 self.ws.run_forever()
90 except Exception as e:
91 LOG.error("Error: {0}".format(e))
92 self.stop()
93
94 def stop(self):
95 if self.skill:
96 self.skill.shutdown()
97
98
99 def main():
100 container = SkillContainer(sys.argv[1:])
101 try:
102 container.run()
103 except KeyboardInterrupt:
104 container.stop()
105 finally:
106 sys.exit()
107
108
109 if __name__ == "__main__":
110 main()
111
[end of mycroft/skills/container.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mycroft/skills/container.py b/mycroft/skills/container.py
--- a/mycroft/skills/container.py
+++ b/mycroft/skills/container.py
@@ -23,7 +23,7 @@
from mycroft.configuration import ConfigurationManager
from mycroft.messagebus.client.ws import WebsocketClient
from mycroft.skills.core import create_skill_descriptor, load_skill
-from mycroft.skills.intent import create_skill as create_intent_skill
+from mycroft.skills.intent import Intent
from mycroft.util.log import getLogger
__author__ = 'seanfitz'
@@ -44,7 +44,7 @@
sys.path.append(params.dir)
self.dir = params.dir
- self.enable_intent_skill = params.enable_intent_skill
+ self.enable_intent = params.enable_intent
self.__init_client(params)
@@ -57,7 +57,7 @@
parser.add_argument("--host", default=None)
parser.add_argument("--port", default=None)
parser.add_argument("--use-ssl", action='store_true', default=False)
- parser.add_argument("--enable-intent-skill", action='store_true',
+ parser.add_argument("--enable-intent", action='store_true',
default=False)
return parser.parse_args(args)
@@ -74,10 +74,9 @@
ssl=params.use_ssl)
def load_skill(self):
- if self.enable_intent_skill:
- intent_skill = create_intent_skill()
- intent_skill.bind(self.ws)
- intent_skill.initialize()
+ if self.enable_intent:
+ Intent(self.ws)
+
skill_descriptor = create_skill_descriptor(self.dir)
self.skill = load_skill(skill_descriptor, self.ws)
| {"golden_diff": "diff --git a/mycroft/skills/container.py b/mycroft/skills/container.py\n--- a/mycroft/skills/container.py\n+++ b/mycroft/skills/container.py\n@@ -23,7 +23,7 @@\n from mycroft.configuration import ConfigurationManager\n from mycroft.messagebus.client.ws import WebsocketClient\n from mycroft.skills.core import create_skill_descriptor, load_skill\n-from mycroft.skills.intent import create_skill as create_intent_skill\n+from mycroft.skills.intent import Intent\n from mycroft.util.log import getLogger\n \n __author__ = 'seanfitz'\n@@ -44,7 +44,7 @@\n sys.path.append(params.dir)\n self.dir = params.dir\n \n- self.enable_intent_skill = params.enable_intent_skill\n+ self.enable_intent = params.enable_intent\n \n self.__init_client(params)\n \n@@ -57,7 +57,7 @@\n parser.add_argument(\"--host\", default=None)\n parser.add_argument(\"--port\", default=None)\n parser.add_argument(\"--use-ssl\", action='store_true', default=False)\n- parser.add_argument(\"--enable-intent-skill\", action='store_true',\n+ parser.add_argument(\"--enable-intent\", action='store_true',\n default=False)\n return parser.parse_args(args)\n \n@@ -74,10 +74,9 @@\n ssl=params.use_ssl)\n \n def load_skill(self):\n- if self.enable_intent_skill:\n- intent_skill = create_intent_skill()\n- intent_skill.bind(self.ws)\n- intent_skill.initialize()\n+ if self.enable_intent:\n+ Intent(self.ws)\n+\n skill_descriptor = create_skill_descriptor(self.dir)\n self.skill = load_skill(skill_descriptor, self.ws)\n", "issue": "skill container is broken\nWhen the intent skill became the intent class the skill container wasn't updated to match the new structure and is currently not working.\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport argparse\nimport sys\nfrom os.path import dirname, exists, isdir\n\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.messagebus.client.ws import WebsocketClient\nfrom mycroft.skills.core import create_skill_descriptor, load_skill\nfrom mycroft.skills.intent import create_skill as create_intent_skill\nfrom mycroft.util.log import getLogger\n\n__author__ = 'seanfitz'\n\nLOG = getLogger(\"SkillContainer\")\n\n\nclass SkillContainer(object):\n def __init__(self, args):\n params = self.__build_params(args)\n\n if params.config:\n ConfigurationManager.load_local([params.config])\n\n if exists(params.lib) and isdir(params.lib):\n sys.path.append(params.lib)\n\n sys.path.append(params.dir)\n self.dir = params.dir\n\n self.enable_intent_skill = params.enable_intent_skill\n\n self.__init_client(params)\n\n @staticmethod\n def __build_params(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", default=\"./mycroft.conf\")\n parser.add_argument(\"dir\", nargs='?', default=dirname(__file__))\n parser.add_argument(\"--lib\", default=\"./lib\")\n parser.add_argument(\"--host\", default=None)\n parser.add_argument(\"--port\", default=None)\n parser.add_argument(\"--use-ssl\", action='store_true', default=False)\n parser.add_argument(\"--enable-intent-skill\", action='store_true',\n default=False)\n return parser.parse_args(args)\n\n def __init_client(self, params):\n config = ConfigurationManager.get().get(\"websocket\")\n\n if not params.host:\n params.host = config.get('host')\n if not params.port:\n params.port = config.get('port')\n\n self.ws = WebsocketClient(host=params.host,\n port=params.port,\n ssl=params.use_ssl)\n\n def load_skill(self):\n if self.enable_intent_skill:\n intent_skill = create_intent_skill()\n intent_skill.bind(self.ws)\n intent_skill.initialize()\n skill_descriptor = create_skill_descriptor(self.dir)\n self.skill = load_skill(skill_descriptor, self.ws)\n\n def run(self):\n try:\n self.ws.on('message', LOG.debug)\n self.ws.on('open', self.load_skill)\n self.ws.on('error', LOG.error)\n self.ws.run_forever()\n except Exception as e:\n LOG.error(\"Error: {0}\".format(e))\n self.stop()\n\n def stop(self):\n if self.skill:\n self.skill.shutdown()\n\n\ndef main():\n container = SkillContainer(sys.argv[1:])\n try:\n container.run()\n except KeyboardInterrupt:\n container.stop()\n finally:\n sys.exit()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mycroft/skills/container.py"}]} | 1,534 | 363 |
gh_patches_debug_25137 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-1072 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HDF5Exporter: throws error when curves aren't the same length
When trying to save data from a graph as an hdf5 file, the HDF5Exporter throws and error when you have multiple curves with differing lengths. This looks to be because the numpy.array(data).astype('double') can't handle lists with different lengths. Below is a traceback from the error. This occurs when trying to save data from the "Multiple curves" graph in the "Basic Plotting" example.
````
Traceback (most recent call last):
File "/home/jchrist/PycharmProjects/lib/python3.5/site-packages/pyqtgraph/exporters/Exporter.py", line 77, in fileSaveFinished
self.export(fileName=fileName, **self.fileDialog.opts)
File "/home/jchrist/PycharmProjects/lib/python3.5/site-packages/pyqtgraph/exporters/HDF5Exporter.py", line 55, in export
fdata = numpy.array(data).astype('double')
ValueError: setting an array element with a sequence.
````
</issue>
<code>
[start of pyqtgraph/exporters/HDF5Exporter.py]
1 from ..Qt import QtGui, QtCore
2 from .Exporter import Exporter
3 from ..parametertree import Parameter
4 from .. import PlotItem
5
6 import numpy
7 try:
8 import h5py
9 HAVE_HDF5 = True
10 except ImportError:
11 HAVE_HDF5 = False
12
13 __all__ = ['HDF5Exporter']
14
15
16 class HDF5Exporter(Exporter):
17 Name = "HDF5 Export: plot (x,y)"
18 windows = []
19 allowCopy = False
20
21 def __init__(self, item):
22 Exporter.__init__(self, item)
23 self.params = Parameter(name='params', type='group', children=[
24 {'name': 'Name', 'type': 'str', 'value': 'Export',},
25 {'name': 'columnMode', 'type': 'list', 'values': ['(x,y) per plot', '(x,y,y,y) for all plots']},
26 ])
27
28 def parameters(self):
29 return self.params
30
31 def export(self, fileName=None):
32 if not HAVE_HDF5:
33 raise RuntimeError("This exporter requires the h5py package, "
34 "but it was not importable.")
35
36 if not isinstance(self.item, PlotItem):
37 raise Exception("Must have a PlotItem selected for HDF5 export.")
38
39 if fileName is None:
40 self.fileSaveDialog(filter=["*.h5", "*.hdf", "*.hd5"])
41 return
42 dsname = self.params['Name']
43 fd = h5py.File(fileName, 'a') # forces append to file... 'w' doesn't seem to "delete/overwrite"
44 data = []
45
46 appendAllX = self.params['columnMode'] == '(x,y) per plot'
47 #print dir(self.item.curves[0])
48 tlen = 0
49 for i, c in enumerate(self.item.curves):
50 d = c.getData()
51 if i > 0 and len(d[0]) != tlen:
52 raise ValueError ("HDF5 Export requires all curves in plot to have same length")
53 if appendAllX or i == 0:
54 data.append(d[0])
55 tlen = len(d[0])
56 data.append(d[1])
57
58
59 fdata = numpy.array(data).astype('double')
60 dset = fd.create_dataset(dsname, data=fdata)
61 fd.close()
62
63 if HAVE_HDF5:
64 HDF5Exporter.register()
65
[end of pyqtgraph/exporters/HDF5Exporter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyqtgraph/exporters/HDF5Exporter.py b/pyqtgraph/exporters/HDF5Exporter.py
--- a/pyqtgraph/exporters/HDF5Exporter.py
+++ b/pyqtgraph/exporters/HDF5Exporter.py
@@ -44,20 +44,27 @@
data = []
appendAllX = self.params['columnMode'] == '(x,y) per plot'
- #print dir(self.item.curves[0])
- tlen = 0
- for i, c in enumerate(self.item.curves):
- d = c.getData()
- if i > 0 and len(d[0]) != tlen:
- raise ValueError ("HDF5 Export requires all curves in plot to have same length")
- if appendAllX or i == 0:
- data.append(d[0])
- tlen = len(d[0])
- data.append(d[1])
+ # Check if the arrays are ragged
+ len_first = len(self.item.curves[0].getData()[0]) if self.item.curves[0] else None
+ ragged = any(len(i.getData()[0]) != len_first for i in self.item.curves)
+ if ragged:
+ dgroup = fd.create_group(dsname)
+ for i, c in enumerate(self.item.curves):
+ d = c.getData()
+ fdata = numpy.array([d[0], d[1]]).astype('double')
+ cname = c.name() if c.name() is not None else str(i)
+ dset = dgroup.create_dataset(cname, data=fdata)
+ else:
+ for i, c in enumerate(self.item.curves):
+ d = c.getData()
+ if appendAllX or i == 0:
+ data.append(d[0])
+ data.append(d[1])
+
+ fdata = numpy.array(data).astype('double')
+ dset = fd.create_dataset(dsname, data=fdata)
- fdata = numpy.array(data).astype('double')
- dset = fd.create_dataset(dsname, data=fdata)
fd.close()
if HAVE_HDF5:
| {"golden_diff": "diff --git a/pyqtgraph/exporters/HDF5Exporter.py b/pyqtgraph/exporters/HDF5Exporter.py\n--- a/pyqtgraph/exporters/HDF5Exporter.py\n+++ b/pyqtgraph/exporters/HDF5Exporter.py\n@@ -44,20 +44,27 @@\n data = []\n \n appendAllX = self.params['columnMode'] == '(x,y) per plot'\n- #print dir(self.item.curves[0])\n- tlen = 0\n- for i, c in enumerate(self.item.curves):\n- d = c.getData()\n- if i > 0 and len(d[0]) != tlen:\n- raise ValueError (\"HDF5 Export requires all curves in plot to have same length\")\n- if appendAllX or i == 0:\n- data.append(d[0])\n- tlen = len(d[0])\n- data.append(d[1])\n+ # Check if the arrays are ragged\n+ len_first = len(self.item.curves[0].getData()[0]) if self.item.curves[0] else None\n+ ragged = any(len(i.getData()[0]) != len_first for i in self.item.curves)\n \n+ if ragged:\n+ dgroup = fd.create_group(dsname)\n+ for i, c in enumerate(self.item.curves):\n+ d = c.getData()\n+ fdata = numpy.array([d[0], d[1]]).astype('double')\n+ cname = c.name() if c.name() is not None else str(i)\n+ dset = dgroup.create_dataset(cname, data=fdata)\n+ else:\n+ for i, c in enumerate(self.item.curves):\n+ d = c.getData()\n+ if appendAllX or i == 0:\n+ data.append(d[0])\n+ data.append(d[1])\n+\n+ fdata = numpy.array(data).astype('double')\n+ dset = fd.create_dataset(dsname, data=fdata)\n \n- fdata = numpy.array(data).astype('double')\n- dset = fd.create_dataset(dsname, data=fdata)\n fd.close()\n \n if HAVE_HDF5:\n", "issue": "HDF5Exporter: throws error when curves aren't the same length\nWhen trying to save data from a graph as an hdf5 file, the HDF5Exporter throws and error when you have multiple curves with differing lengths. This looks to be because the numpy.array(data).astype('double') can't handle lists with different lengths. Below is a traceback from the error. This occurs when trying to save data from the \"Multiple curves\" graph in the \"Basic Plotting\" example.\r\n\r\n````\r\nTraceback (most recent call last):\r\n File \"/home/jchrist/PycharmProjects/lib/python3.5/site-packages/pyqtgraph/exporters/Exporter.py\", line 77, in fileSaveFinished\r\n self.export(fileName=fileName, **self.fileDialog.opts)\r\n File \"/home/jchrist/PycharmProjects/lib/python3.5/site-packages/pyqtgraph/exporters/HDF5Exporter.py\", line 55, in export\r\n fdata = numpy.array(data).astype('double')\r\nValueError: setting an array element with a sequence.\r\n````\n", "before_files": [{"content": "from ..Qt import QtGui, QtCore\nfrom .Exporter import Exporter\nfrom ..parametertree import Parameter\nfrom .. import PlotItem\n\nimport numpy \ntry:\n import h5py\n HAVE_HDF5 = True\nexcept ImportError:\n HAVE_HDF5 = False\n \n__all__ = ['HDF5Exporter']\n\n \nclass HDF5Exporter(Exporter):\n Name = \"HDF5 Export: plot (x,y)\"\n windows = []\n allowCopy = False\n\n def __init__(self, item):\n Exporter.__init__(self, item)\n self.params = Parameter(name='params', type='group', children=[\n {'name': 'Name', 'type': 'str', 'value': 'Export',},\n {'name': 'columnMode', 'type': 'list', 'values': ['(x,y) per plot', '(x,y,y,y) for all plots']},\n ])\n \n def parameters(self):\n return self.params\n \n def export(self, fileName=None):\n if not HAVE_HDF5:\n raise RuntimeError(\"This exporter requires the h5py package, \"\n \"but it was not importable.\")\n \n if not isinstance(self.item, PlotItem):\n raise Exception(\"Must have a PlotItem selected for HDF5 export.\")\n \n if fileName is None:\n self.fileSaveDialog(filter=[\"*.h5\", \"*.hdf\", \"*.hd5\"])\n return\n dsname = self.params['Name']\n fd = h5py.File(fileName, 'a') # forces append to file... 'w' doesn't seem to \"delete/overwrite\"\n data = []\n\n appendAllX = self.params['columnMode'] == '(x,y) per plot'\n #print dir(self.item.curves[0])\n tlen = 0\n for i, c in enumerate(self.item.curves):\n d = c.getData()\n if i > 0 and len(d[0]) != tlen:\n raise ValueError (\"HDF5 Export requires all curves in plot to have same length\")\n if appendAllX or i == 0:\n data.append(d[0])\n tlen = len(d[0])\n data.append(d[1])\n\n\n fdata = numpy.array(data).astype('double')\n dset = fd.create_dataset(dsname, data=fdata)\n fd.close()\n\nif HAVE_HDF5:\n HDF5Exporter.register()\n", "path": "pyqtgraph/exporters/HDF5Exporter.py"}]} | 1,402 | 481 |
gh_patches_debug_7270 | rasdani/github-patches | git_diff | pantsbuild__pants-18551 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`Failed to write to remote cache` when running `ruff`
**Describe the bug**
When running `pants lint --only=ruff ::` I see many `WARN` logs like:
```
14:18:48.60 [WARN] Failed to write to remote cache (1 occurrences so far): Declared output directory path RelativePath("src/django_apps/preventive_care/consults/automatic_refer_out/__init__.py") in output digest Digest { hash: Fingerprint<b4c4a7e44c3d23b8eac247bfdd5ed723d054947915bbfb808e0ee16e4fa75430>, size_bytes: 77 } contained a file instead.
```
**Pants version**
`PANTS_SHA=254f69b3f111fb620206bbfe72b262520849484f` (on the 2.16.x branch)
**OS**
MacOS
</issue>
<code>
[start of src/python/pants/backend/python/lint/ruff/rules.py]
1 # Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from pants.backend.python.lint.ruff.subsystem import Ruff, RuffFieldSet
7 from pants.backend.python.util_rules import pex
8 from pants.backend.python.util_rules.pex import PexRequest, VenvPex, VenvPexProcess
9 from pants.core.goals.fix import FixResult, FixTargetsRequest
10 from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
11 from pants.core.util_rules.partitions import PartitionerType
12 from pants.engine.fs import Digest, MergeDigests
13 from pants.engine.process import FallibleProcessResult
14 from pants.engine.rules import Get, MultiGet, collect_rules, rule
15 from pants.util.logging import LogLevel
16 from pants.util.strutil import pluralize
17
18
19 class RuffRequest(FixTargetsRequest):
20 field_set_type = RuffFieldSet
21 tool_subsystem = Ruff
22 partitioner_type = PartitionerType.DEFAULT_SINGLE_PARTITION
23
24
25 @rule(desc="Fix with ruff", level=LogLevel.DEBUG)
26 async def ruff_fix(request: RuffRequest.Batch, ruff: Ruff) -> FixResult:
27 ruff_pex_get = Get(VenvPex, PexRequest, ruff.to_pex_request())
28
29 config_files_get = Get(
30 ConfigFiles, ConfigFilesRequest, ruff.config_request(request.snapshot.dirs)
31 )
32
33 ruff_pex, config_files = await MultiGet(ruff_pex_get, config_files_get)
34
35 input_digest = await Get(
36 Digest,
37 MergeDigests((request.snapshot.digest, config_files.snapshot.digest)),
38 )
39
40 conf_args = [f"--config={ruff.config}"] if ruff.config else []
41
42 result = await Get(
43 FallibleProcessResult,
44 VenvPexProcess(
45 ruff_pex,
46 argv=("--fix", *conf_args, *ruff.args, *request.files),
47 input_digest=input_digest,
48 output_directories=request.files,
49 description=f"Run ruff on {pluralize(len(request.elements), 'file')}.",
50 level=LogLevel.DEBUG,
51 ),
52 )
53 return await FixResult.create(request, result, strip_chroot_path=True)
54
55
56 def rules():
57 return [*collect_rules(), *RuffRequest.rules(), *pex.rules()]
58
[end of src/python/pants/backend/python/lint/ruff/rules.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/backend/python/lint/ruff/rules.py b/src/python/pants/backend/python/lint/ruff/rules.py
--- a/src/python/pants/backend/python/lint/ruff/rules.py
+++ b/src/python/pants/backend/python/lint/ruff/rules.py
@@ -45,7 +45,7 @@
ruff_pex,
argv=("--fix", *conf_args, *ruff.args, *request.files),
input_digest=input_digest,
- output_directories=request.files,
+ output_files=request.files,
description=f"Run ruff on {pluralize(len(request.elements), 'file')}.",
level=LogLevel.DEBUG,
),
| {"golden_diff": "diff --git a/src/python/pants/backend/python/lint/ruff/rules.py b/src/python/pants/backend/python/lint/ruff/rules.py\n--- a/src/python/pants/backend/python/lint/ruff/rules.py\n+++ b/src/python/pants/backend/python/lint/ruff/rules.py\n@@ -45,7 +45,7 @@\n ruff_pex,\n argv=(\"--fix\", *conf_args, *ruff.args, *request.files),\n input_digest=input_digest,\n- output_directories=request.files,\n+ output_files=request.files,\n description=f\"Run ruff on {pluralize(len(request.elements), 'file')}.\",\n level=LogLevel.DEBUG,\n ),\n", "issue": "`Failed to write to remote cache` when running `ruff`\n**Describe the bug**\r\n\r\nWhen running `pants lint --only=ruff ::` I see many `WARN` logs like:\r\n```\r\n14:18:48.60 [WARN] Failed to write to remote cache (1 occurrences so far): Declared output directory path RelativePath(\"src/django_apps/preventive_care/consults/automatic_refer_out/__init__.py\") in output digest Digest { hash: Fingerprint<b4c4a7e44c3d23b8eac247bfdd5ed723d054947915bbfb808e0ee16e4fa75430>, size_bytes: 77 } contained a file instead.\r\n```\r\n\r\n**Pants version**\r\n\r\n`PANTS_SHA=254f69b3f111fb620206bbfe72b262520849484f` (on the 2.16.x branch)\r\n\r\n**OS**\r\n\r\nMacOS\n", "before_files": [{"content": "# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom pants.backend.python.lint.ruff.subsystem import Ruff, RuffFieldSet\nfrom pants.backend.python.util_rules import pex\nfrom pants.backend.python.util_rules.pex import PexRequest, VenvPex, VenvPexProcess\nfrom pants.core.goals.fix import FixResult, FixTargetsRequest\nfrom pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest\nfrom pants.core.util_rules.partitions import PartitionerType\nfrom pants.engine.fs import Digest, MergeDigests\nfrom pants.engine.process import FallibleProcessResult\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\nclass RuffRequest(FixTargetsRequest):\n field_set_type = RuffFieldSet\n tool_subsystem = Ruff\n partitioner_type = PartitionerType.DEFAULT_SINGLE_PARTITION\n\n\n@rule(desc=\"Fix with ruff\", level=LogLevel.DEBUG)\nasync def ruff_fix(request: RuffRequest.Batch, ruff: Ruff) -> FixResult:\n ruff_pex_get = Get(VenvPex, PexRequest, ruff.to_pex_request())\n\n config_files_get = Get(\n ConfigFiles, ConfigFilesRequest, ruff.config_request(request.snapshot.dirs)\n )\n\n ruff_pex, config_files = await MultiGet(ruff_pex_get, config_files_get)\n\n input_digest = await Get(\n Digest,\n MergeDigests((request.snapshot.digest, config_files.snapshot.digest)),\n )\n\n conf_args = [f\"--config={ruff.config}\"] if ruff.config else []\n\n result = await Get(\n FallibleProcessResult,\n VenvPexProcess(\n ruff_pex,\n argv=(\"--fix\", *conf_args, *ruff.args, *request.files),\n input_digest=input_digest,\n output_directories=request.files,\n description=f\"Run ruff on {pluralize(len(request.elements), 'file')}.\",\n level=LogLevel.DEBUG,\n ),\n )\n return await FixResult.create(request, result, strip_chroot_path=True)\n\n\ndef rules():\n return [*collect_rules(), *RuffRequest.rules(), *pex.rules()]\n", "path": "src/python/pants/backend/python/lint/ruff/rules.py"}]} | 1,414 | 147 |
gh_patches_debug_3369 | rasdani/github-patches | git_diff | svthalia__concrexit-1925 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Annual documents does not show drop-down menu for year
### Describe the bug
When I try to add an annual document to the site, I am not able to select a year, the dropdown menu does not contain any entries.
### Steps to reproduce
1.
</issue>
<code>
[start of website/documents/forms.py]
1 """The forms defined by the documents package."""
2 from django import forms
3 from django.contrib import admin
4 from django.forms import widgets
5 from django.utils import timezone
6
7 from documents import models
8 from utils.snippets import datetime_to_lectureyear
9
10
11 class DocumentFileInput(widgets.ClearableFileInput):
12 """Wrapper around Django's :class:`~django.forms.widgets.ClearableFileInput`.
13
14 It overrides the URL of the associated file when it is fetched.
15 """
16
17 template_name = "widgets/clearable_file_input.html"
18
19 def get_context(self, name, value, attrs):
20 context = super().get_context(name, value, attrs)
21 if hasattr(value, "url"):
22 doc = models.Document.objects.get(file=value)
23 context["document_id"] = doc.pk
24 context["language"] = "en"
25 return context
26
27
28 class MinutesForm(forms.ModelForm):
29 """Form that overrides the widgets for the files."""
30
31 class Meta:
32 model = models.Minutes
33 fields = (
34 "file",
35 "members_only",
36 )
37 widgets = {
38 "file": DocumentFileInput,
39 }
40
41
42 class AnnualDocumentForm(forms.ModelForm):
43 """Form that provides custom functionality for annual documents."""
44
45 class Meta:
46 model = models.AnnualDocument
47 fields = "__all__"
48 widgets = {
49 "year": forms.Select,
50 "file": DocumentFileInput,
51 }
52
53 @staticmethod
54 def _current_year():
55 """Get the current lecture year."""
56 return datetime_to_lectureyear(timezone.now())
57
58 @staticmethod
59 def _year_choices():
60 """Get the lecture years."""
61 current = datetime_to_lectureyear(timezone.now())
62 return [
63 (year, "{}-{}".format(year, year + 1))
64 for year in range(current + 1, 1989, -1)
65 ]
66
67
68 class AssociationDocumentForm(forms.ModelForm):
69 """Form that overrides the widgets for the files."""
70
71 class Meta:
72 model = models.AssociationDocument
73 fields = (
74 "name",
75 "file",
76 "members_only",
77 )
78 widgets = {
79 "file": DocumentFileInput,
80 }
81
82
83 class EventDocumentForm(forms.ModelForm):
84 """Form that overrides the widgets for the files."""
85
86 class Meta:
87 model = models.EventDocument
88 fields = (
89 "name",
90 "file",
91 "members_only",
92 "owner",
93 )
94 widgets = {
95 "file": DocumentFileInput,
96 }
97
98
99 class MiscellaneousDocumentForm(forms.ModelForm):
100 """Form that overrides the widgets for the files."""
101
102 class Meta:
103 model = models.MiscellaneousDocument
104 fields = (
105 "name",
106 "file",
107 "members_only",
108 )
109 widgets = {
110 "file": DocumentFileInput,
111 }
112
113
114 class GeneralMeetingForm(forms.ModelForm):
115 """Custom form for general meetings with a custom widget for documents."""
116
117 class Meta:
118 model = models.GeneralMeeting
119 fields = "__all__"
120 widgets = {
121 "documents": admin.widgets.FilteredSelectMultiple(
122 "documents", is_stacked=False
123 )
124 }
125
[end of website/documents/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/documents/forms.py b/website/documents/forms.py
--- a/website/documents/forms.py
+++ b/website/documents/forms.py
@@ -64,6 +64,10 @@
for year in range(current + 1, 1989, -1)
]
+ year = forms.TypedChoiceField(
+ coerce=int, choices=_year_choices.__func__, initial=_current_year.__func__
+ )
+
class AssociationDocumentForm(forms.ModelForm):
"""Form that overrides the widgets for the files."""
| {"golden_diff": "diff --git a/website/documents/forms.py b/website/documents/forms.py\n--- a/website/documents/forms.py\n+++ b/website/documents/forms.py\n@@ -64,6 +64,10 @@\n for year in range(current + 1, 1989, -1)\n ]\n \n+ year = forms.TypedChoiceField(\n+ coerce=int, choices=_year_choices.__func__, initial=_current_year.__func__\n+ )\n+\n \n class AssociationDocumentForm(forms.ModelForm):\n \"\"\"Form that overrides the widgets for the files.\"\"\"\n", "issue": "Annual documents does not show drop-down menu for year\n### Describe the bug\r\n\r\nWhen I try to add an annual document to the site, I am not able to select a year, the dropdown menu does not contain any entries. \r\n\r\n### Steps to reproduce\r\n\r\n1. \n", "before_files": [{"content": "\"\"\"The forms defined by the documents package.\"\"\"\nfrom django import forms\nfrom django.contrib import admin\nfrom django.forms import widgets\nfrom django.utils import timezone\n\nfrom documents import models\nfrom utils.snippets import datetime_to_lectureyear\n\n\nclass DocumentFileInput(widgets.ClearableFileInput):\n \"\"\"Wrapper around Django's :class:`~django.forms.widgets.ClearableFileInput`.\n\n It overrides the URL of the associated file when it is fetched.\n \"\"\"\n\n template_name = \"widgets/clearable_file_input.html\"\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n if hasattr(value, \"url\"):\n doc = models.Document.objects.get(file=value)\n context[\"document_id\"] = doc.pk\n context[\"language\"] = \"en\"\n return context\n\n\nclass MinutesForm(forms.ModelForm):\n \"\"\"Form that overrides the widgets for the files.\"\"\"\n\n class Meta:\n model = models.Minutes\n fields = (\n \"file\",\n \"members_only\",\n )\n widgets = {\n \"file\": DocumentFileInput,\n }\n\n\nclass AnnualDocumentForm(forms.ModelForm):\n \"\"\"Form that provides custom functionality for annual documents.\"\"\"\n\n class Meta:\n model = models.AnnualDocument\n fields = \"__all__\"\n widgets = {\n \"year\": forms.Select,\n \"file\": DocumentFileInput,\n }\n\n @staticmethod\n def _current_year():\n \"\"\"Get the current lecture year.\"\"\"\n return datetime_to_lectureyear(timezone.now())\n\n @staticmethod\n def _year_choices():\n \"\"\"Get the lecture years.\"\"\"\n current = datetime_to_lectureyear(timezone.now())\n return [\n (year, \"{}-{}\".format(year, year + 1))\n for year in range(current + 1, 1989, -1)\n ]\n\n\nclass AssociationDocumentForm(forms.ModelForm):\n \"\"\"Form that overrides the widgets for the files.\"\"\"\n\n class Meta:\n model = models.AssociationDocument\n fields = (\n \"name\",\n \"file\",\n \"members_only\",\n )\n widgets = {\n \"file\": DocumentFileInput,\n }\n\n\nclass EventDocumentForm(forms.ModelForm):\n \"\"\"Form that overrides the widgets for the files.\"\"\"\n\n class Meta:\n model = models.EventDocument\n fields = (\n \"name\",\n \"file\",\n \"members_only\",\n \"owner\",\n )\n widgets = {\n \"file\": DocumentFileInput,\n }\n\n\nclass MiscellaneousDocumentForm(forms.ModelForm):\n \"\"\"Form that overrides the widgets for the files.\"\"\"\n\n class Meta:\n model = models.MiscellaneousDocument\n fields = (\n \"name\",\n \"file\",\n \"members_only\",\n )\n widgets = {\n \"file\": DocumentFileInput,\n }\n\n\nclass GeneralMeetingForm(forms.ModelForm):\n \"\"\"Custom form for general meetings with a custom widget for documents.\"\"\"\n\n class Meta:\n model = models.GeneralMeeting\n fields = \"__all__\"\n widgets = {\n \"documents\": admin.widgets.FilteredSelectMultiple(\n \"documents\", is_stacked=False\n )\n }\n", "path": "website/documents/forms.py"}]} | 1,526 | 119 |
gh_patches_debug_28224 | rasdani/github-patches | git_diff | Parsl__parsl-435 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
confusing error message with simple configs
Using this config:
```
config = Config(executors=[ThreadPoolExecutor()])
```
parsl startup logs this message to the logger:
```
$ ./c.py
2018-07-11 08:04:42 parsl.config:66 [DEBUG] Checkpoint period only has an effect with checkpoint_mode='periodic'
```
This appears to be in config creation, as it happens even without passing that config to create a DataFlowKernel.
This might cause user confusion that something is broken/misconfigured.
</issue>
<code>
[start of parsl/config.py]
1 import logging
2
3 from libsubmit.utils import RepresentationMixin
4 from parsl.executors.threads import ThreadPoolExecutor
5 from parsl.dataflow.error import ConfigurationError
6
7 logger = logging.getLogger(__name__)
8
9
10 class Config(RepresentationMixin):
11 """
12 Specification of Parsl configuration options.
13
14 Parameters
15 ----------
16 executors : list of ParslExecutor, optional
17 List of executor instances to use. Possible executors include :class:`~parsl.executors.threads.ThreadPoolExecutor`,
18 :class:`~parsl.executors.ipp.IPyParallelExecutor`, or :class:`~parsl.executors.swift_t.TurbineExecutor`. Default
19 is [:class:`~parsl.executors.threads.ThreadPoolExecutor()`].
20 app_cache : bool, optional
21 Enable app caching. Default is True.
22 checkpoint_files : list of str, optional
23 List of paths to checkpoint files. Default is None.
24 checkpoint_mode : str, optional
25 Checkpoint mode to use, can be 'dfk_exit', 'task_exit', or 'periodic'. If set to
26 `None`, checkpointing will be disabled. Default is None.
27 checkpoint_period : str, optional
28 Time interval (in "HH:MM:SS") at which to checkpoint completed tasks. Only has an effect if
29 `checkpoint_mode='periodic'`.
30 data_management_max_threads : int, optional
31 Maximum number of threads to allocate for the data manager to use for managing input and output transfers.
32 Default is 10.
33 lazy_errors : bool, optional
34 If True, errors from task failures will not be raised until `future.result()` is called. Otherwise, they will
35 be raised as soon as the task returns. Default is True.
36 retries : int, optional
37 Set the number of retries in case of failure. Default is 0.
38 run_dir : str, optional
39 Path to run directory. Default is 'runinfo'.
40 strategy : str, optional
41 Strategy to use for scaling resources according to workflow needs. Can be 'simple' or `None`. If `None`, dynamic
42 scaling will be disabled. Default is 'simple'.
43 usage_tracking : bool, optional
44 Enable usage tracking. Default is True.
45 """
46 def __init__(self,
47 executors=None,
48 app_cache=True,
49 checkpoint_files=None,
50 checkpoint_mode=None,
51 checkpoint_period="00:30:00",
52 data_management_max_threads=10,
53 lazy_errors=True,
54 retries=0,
55 run_dir='runinfo',
56 strategy='simple',
57 db_logger_config=None,
58 usage_tracking=True):
59 if executors is None:
60 executors = [ThreadPoolExecutor()]
61 self.executors = executors
62 self.app_cache = app_cache
63 self.checkpoint_files = checkpoint_files
64 self.checkpoint_mode = checkpoint_mode
65 if checkpoint_mode is not 'periodic' and checkpoint_period is not None:
66 logger.debug("Checkpoint period only has an effect with checkpoint_mode='periodic'")
67 self.checkpoint_period = checkpoint_period
68 self.data_management_max_threads = data_management_max_threads
69 self.lazy_errors = lazy_errors
70 self.retries = retries
71 self.run_dir = run_dir
72 self.strategy = strategy
73 self.usage_tracking = usage_tracking
74 self.db_logger_config = db_logger_config
75
76 @property
77 def executors(self):
78 return self._executors
79
80 @executors.setter
81 def executors(self, executors):
82 labels = [e.label for e in executors]
83 duplicates = [e for n, e in enumerate(labels) if e in labels[:n]]
84 if len(duplicates) > 0:
85 raise ConfigurationError('Executors must have unique labels ({})'.format(
86 ', '.join(['label={}'.format(repr(d)) for d in duplicates])))
87 self._executors = executors
88
[end of parsl/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/config.py b/parsl/config.py
--- a/parsl/config.py
+++ b/parsl/config.py
@@ -48,7 +48,7 @@
app_cache=True,
checkpoint_files=None,
checkpoint_mode=None,
- checkpoint_period="00:30:00",
+ checkpoint_period=None,
data_management_max_threads=10,
lazy_errors=True,
retries=0,
@@ -62,8 +62,17 @@
self.app_cache = app_cache
self.checkpoint_files = checkpoint_files
self.checkpoint_mode = checkpoint_mode
- if checkpoint_mode is not 'periodic' and checkpoint_period is not None:
- logger.debug("Checkpoint period only has an effect with checkpoint_mode='periodic'")
+ if checkpoint_period is not None:
+ if checkpoint_mode is None:
+ logger.debug('The requested `checkpoint_period={}` will have no effect because `checkpoint_mode=None`'.format(
+ checkpoint_period)
+ )
+ elif checkpoint_mode is not 'periodic':
+ logger.debug("Requested checkpoint period of {} only has an effect with checkpoint_mode='periodic'".format(
+ checkpoint_period)
+ )
+ if checkpoint_mode is 'periodic' and checkpoint_period is None:
+ checkpoint_period = "00:30:00"
self.checkpoint_period = checkpoint_period
self.data_management_max_threads = data_management_max_threads
self.lazy_errors = lazy_errors
| {"golden_diff": "diff --git a/parsl/config.py b/parsl/config.py\n--- a/parsl/config.py\n+++ b/parsl/config.py\n@@ -48,7 +48,7 @@\n app_cache=True,\n checkpoint_files=None,\n checkpoint_mode=None,\n- checkpoint_period=\"00:30:00\",\n+ checkpoint_period=None,\n data_management_max_threads=10,\n lazy_errors=True,\n retries=0,\n@@ -62,8 +62,17 @@\n self.app_cache = app_cache\n self.checkpoint_files = checkpoint_files\n self.checkpoint_mode = checkpoint_mode\n- if checkpoint_mode is not 'periodic' and checkpoint_period is not None:\n- logger.debug(\"Checkpoint period only has an effect with checkpoint_mode='periodic'\")\n+ if checkpoint_period is not None:\n+ if checkpoint_mode is None:\n+ logger.debug('The requested `checkpoint_period={}` will have no effect because `checkpoint_mode=None`'.format(\n+ checkpoint_period)\n+ )\n+ elif checkpoint_mode is not 'periodic':\n+ logger.debug(\"Requested checkpoint period of {} only has an effect with checkpoint_mode='periodic'\".format(\n+ checkpoint_period)\n+ )\n+ if checkpoint_mode is 'periodic' and checkpoint_period is None:\n+ checkpoint_period = \"00:30:00\"\n self.checkpoint_period = checkpoint_period\n self.data_management_max_threads = data_management_max_threads\n self.lazy_errors = lazy_errors\n", "issue": "confusing error message with simple configs\nUsing this config:\r\n\r\n```\r\nconfig = Config(executors=[ThreadPoolExecutor()])\r\n```\r\n\r\nparsl startup logs this message to the logger:\r\n\r\n```\r\n$ ./c.py \r\n2018-07-11 08:04:42 parsl.config:66 [DEBUG] Checkpoint period only has an effect with checkpoint_mode='periodic'\r\n```\r\n\r\nThis appears to be in config creation, as it happens even without passing that config to create a DataFlowKernel.\r\n\r\nThis might cause user confusion that something is broken/misconfigured.\r\n\n", "before_files": [{"content": "import logging\n\nfrom libsubmit.utils import RepresentationMixin\nfrom parsl.executors.threads import ThreadPoolExecutor\nfrom parsl.dataflow.error import ConfigurationError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Config(RepresentationMixin):\n \"\"\"\n Specification of Parsl configuration options.\n\n Parameters\n ----------\n executors : list of ParslExecutor, optional\n List of executor instances to use. Possible executors include :class:`~parsl.executors.threads.ThreadPoolExecutor`,\n :class:`~parsl.executors.ipp.IPyParallelExecutor`, or :class:`~parsl.executors.swift_t.TurbineExecutor`. Default\n is [:class:`~parsl.executors.threads.ThreadPoolExecutor()`].\n app_cache : bool, optional\n Enable app caching. Default is True.\n checkpoint_files : list of str, optional\n List of paths to checkpoint files. Default is None.\n checkpoint_mode : str, optional\n Checkpoint mode to use, can be 'dfk_exit', 'task_exit', or 'periodic'. If set to\n `None`, checkpointing will be disabled. Default is None.\n checkpoint_period : str, optional\n Time interval (in \"HH:MM:SS\") at which to checkpoint completed tasks. Only has an effect if\n `checkpoint_mode='periodic'`.\n data_management_max_threads : int, optional\n Maximum number of threads to allocate for the data manager to use for managing input and output transfers.\n Default is 10.\n lazy_errors : bool, optional\n If True, errors from task failures will not be raised until `future.result()` is called. Otherwise, they will\n be raised as soon as the task returns. Default is True.\n retries : int, optional\n Set the number of retries in case of failure. Default is 0.\n run_dir : str, optional\n Path to run directory. Default is 'runinfo'.\n strategy : str, optional\n Strategy to use for scaling resources according to workflow needs. Can be 'simple' or `None`. If `None`, dynamic\n scaling will be disabled. Default is 'simple'.\n usage_tracking : bool, optional\n Enable usage tracking. Default is True.\n \"\"\"\n def __init__(self,\n executors=None,\n app_cache=True,\n checkpoint_files=None,\n checkpoint_mode=None,\n checkpoint_period=\"00:30:00\",\n data_management_max_threads=10,\n lazy_errors=True,\n retries=0,\n run_dir='runinfo',\n strategy='simple',\n db_logger_config=None,\n usage_tracking=True):\n if executors is None:\n executors = [ThreadPoolExecutor()]\n self.executors = executors\n self.app_cache = app_cache\n self.checkpoint_files = checkpoint_files\n self.checkpoint_mode = checkpoint_mode\n if checkpoint_mode is not 'periodic' and checkpoint_period is not None:\n logger.debug(\"Checkpoint period only has an effect with checkpoint_mode='periodic'\")\n self.checkpoint_period = checkpoint_period\n self.data_management_max_threads = data_management_max_threads\n self.lazy_errors = lazy_errors\n self.retries = retries\n self.run_dir = run_dir\n self.strategy = strategy\n self.usage_tracking = usage_tracking\n self.db_logger_config = db_logger_config\n\n @property\n def executors(self):\n return self._executors\n\n @executors.setter\n def executors(self, executors):\n labels = [e.label for e in executors]\n duplicates = [e for n, e in enumerate(labels) if e in labels[:n]]\n if len(duplicates) > 0:\n raise ConfigurationError('Executors must have unique labels ({})'.format(\n ', '.join(['label={}'.format(repr(d)) for d in duplicates])))\n self._executors = executors\n", "path": "parsl/config.py"}]} | 1,650 | 327 |
gh_patches_debug_576 | rasdani/github-patches | git_diff | pex-tool__pex-975 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.10
On the docket:
+ [x] Improve Pex packaging. (#961)
+ [x] Make the interpreter cache deterministic. (#960)
+ [x] Fix deprecation warning for `rU` mode (#956)
+ [x] Fix runtime resolve error message generation. (#955)
+ [x] Kill dead code. (#954)
+ [x] Many Pex tests fail under Python 2.7 in CI #967
+ [x] Add a `--local` mode for packaging the Pex PEX. #971
+ [x] Split Pex resolve API. (#970)
+ [x] Can't run PEX file when a dependency's wheel includes a build tag #964
+ [x] Expose network configuration in pex options. #803
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.1.9'
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.1.9'
+__version__ = '2.1.10'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.9'\n+__version__ = '2.1.10'\n", "issue": "Release 2.1.10\nOn the docket:\r\n+ [x] Improve Pex packaging. (#961)\r\n+ [x] Make the interpreter cache deterministic. (#960)\r\n+ [x] Fix deprecation warning for `rU` mode (#956)\r\n+ [x] Fix runtime resolve error message generation. (#955)\r\n+ [x] Kill dead code. (#954)\r\n+ [x] Many Pex tests fail under Python 2.7 in CI #967\r\n+ [x] Add a `--local` mode for packaging the Pex PEX. #971\r\n+ [x] Split Pex resolve API. (#970)\r\n+ [x] Can't run PEX file when a dependency's wheel includes a build tag #964\r\n+ [x] Expose network configuration in pex options. #803\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.9'\n", "path": "pex/version.py"}]} | 777 | 96 |
gh_patches_debug_2746 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-3351 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BERT classifier doesn't work under distributed_train
The default tokenization is re, I think it's building the dictionary along the way...
**Logs**
Please paste the command line output:
```
ValueError: Dictionaries should be pre-built before distributed train.
ValueError: Dictionaries should be pre-built before distributed train.
```
</issue>
<code>
[start of parlai/agents/bert_ranker/bert_dictionary.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 from parlai.core.dict import DictionaryAgent
7 from parlai.zoo.bert.build import download
8 from parlai.utils.misc import warn_once
9
10 try:
11 from pytorch_pretrained_bert import BertTokenizer
12 except ImportError:
13 raise ImportError(
14 'BERT rankers needs pytorch-pretrained-BERT installed. \n '
15 'pip install pytorch-pretrained-bert'
16 )
17 from .helpers import VOCAB_PATH
18
19 import os
20
21
22 class BertDictionaryAgent(DictionaryAgent):
23 """
24 Allow to use the Torch Agent with the wordpiece dictionary of Hugging Face.
25 """
26
27 def __init__(self, opt):
28 super().__init__(opt)
29 # initialize from vocab path
30 warn_once(
31 'WARNING: BERT uses a Hugging Face tokenizer; ParlAI dictionary args are ignored'
32 )
33 download(opt['datapath'])
34 vocab_path = os.path.join(opt['datapath'], 'models', 'bert_models', VOCAB_PATH)
35 self.tokenizer = BertTokenizer.from_pretrained(vocab_path)
36
37 self.start_token = '[CLS]'
38 self.end_token = '[SEP]'
39 self.null_token = '[PAD]'
40 self.start_idx = self.tokenizer.convert_tokens_to_ids(['[CLS]'])[
41 0
42 ] # should be 101
43 self.end_idx = self.tokenizer.convert_tokens_to_ids(['[SEP]'])[
44 0
45 ] # should be 102
46 self.pad_idx = self.tokenizer.convert_tokens_to_ids(['[PAD]'])[0] # should be 0
47 # set tok2ind for special tokens
48 self.tok2ind[self.start_token] = self.start_idx
49 self.tok2ind[self.end_token] = self.end_idx
50 self.tok2ind[self.null_token] = self.pad_idx
51 # set ind2tok for special tokens
52 self.ind2tok[self.start_idx] = self.start_token
53 self.ind2tok[self.end_idx] = self.end_token
54 self.ind2tok[self.pad_idx] = self.null_token
55
56 def txt2vec(self, text, vec_type=list):
57 tokens = self.tokenizer.tokenize(text)
58 tokens_id = self.tokenizer.convert_tokens_to_ids(tokens)
59 return tokens_id
60
61 def vec2txt(self, vec):
62 if not isinstance(vec, list):
63 # assume tensor
64 idxs = [idx.item() for idx in vec.cpu()]
65 else:
66 idxs = vec
67 toks = self.tokenizer.convert_ids_to_tokens(idxs)
68 return ' '.join(toks)
69
70 def act(self):
71 return {}
72
[end of parlai/agents/bert_ranker/bert_dictionary.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parlai/agents/bert_ranker/bert_dictionary.py b/parlai/agents/bert_ranker/bert_dictionary.py
--- a/parlai/agents/bert_ranker/bert_dictionary.py
+++ b/parlai/agents/bert_ranker/bert_dictionary.py
@@ -24,6 +24,9 @@
Allow to use the Torch Agent with the wordpiece dictionary of Hugging Face.
"""
+ def is_prebuit(self):
+ return True
+
def __init__(self, opt):
super().__init__(opt)
# initialize from vocab path
| {"golden_diff": "diff --git a/parlai/agents/bert_ranker/bert_dictionary.py b/parlai/agents/bert_ranker/bert_dictionary.py\n--- a/parlai/agents/bert_ranker/bert_dictionary.py\n+++ b/parlai/agents/bert_ranker/bert_dictionary.py\n@@ -24,6 +24,9 @@\n Allow to use the Torch Agent with the wordpiece dictionary of Hugging Face.\n \"\"\"\n \n+ def is_prebuit(self):\n+ return True\n+\n def __init__(self, opt):\n super().__init__(opt)\n # initialize from vocab path\n", "issue": "BERT classifier doesn't work under distributed_train\nThe default tokenization is re, I think it's building the dictionary along the way...\r\n\r\n**Logs**\r\nPlease paste the command line output:\r\n\r\n```\r\nValueError: Dictionaries should be pre-built before distributed train.\r\nValueError: Dictionaries should be pre-built before distributed train.\r\n```\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom parlai.core.dict import DictionaryAgent\nfrom parlai.zoo.bert.build import download\nfrom parlai.utils.misc import warn_once\n\ntry:\n from pytorch_pretrained_bert import BertTokenizer\nexcept ImportError:\n raise ImportError(\n 'BERT rankers needs pytorch-pretrained-BERT installed. \\n '\n 'pip install pytorch-pretrained-bert'\n )\nfrom .helpers import VOCAB_PATH\n\nimport os\n\n\nclass BertDictionaryAgent(DictionaryAgent):\n \"\"\"\n Allow to use the Torch Agent with the wordpiece dictionary of Hugging Face.\n \"\"\"\n\n def __init__(self, opt):\n super().__init__(opt)\n # initialize from vocab path\n warn_once(\n 'WARNING: BERT uses a Hugging Face tokenizer; ParlAI dictionary args are ignored'\n )\n download(opt['datapath'])\n vocab_path = os.path.join(opt['datapath'], 'models', 'bert_models', VOCAB_PATH)\n self.tokenizer = BertTokenizer.from_pretrained(vocab_path)\n\n self.start_token = '[CLS]'\n self.end_token = '[SEP]'\n self.null_token = '[PAD]'\n self.start_idx = self.tokenizer.convert_tokens_to_ids(['[CLS]'])[\n 0\n ] # should be 101\n self.end_idx = self.tokenizer.convert_tokens_to_ids(['[SEP]'])[\n 0\n ] # should be 102\n self.pad_idx = self.tokenizer.convert_tokens_to_ids(['[PAD]'])[0] # should be 0\n # set tok2ind for special tokens\n self.tok2ind[self.start_token] = self.start_idx\n self.tok2ind[self.end_token] = self.end_idx\n self.tok2ind[self.null_token] = self.pad_idx\n # set ind2tok for special tokens\n self.ind2tok[self.start_idx] = self.start_token\n self.ind2tok[self.end_idx] = self.end_token\n self.ind2tok[self.pad_idx] = self.null_token\n\n def txt2vec(self, text, vec_type=list):\n tokens = self.tokenizer.tokenize(text)\n tokens_id = self.tokenizer.convert_tokens_to_ids(tokens)\n return tokens_id\n\n def vec2txt(self, vec):\n if not isinstance(vec, list):\n # assume tensor\n idxs = [idx.item() for idx in vec.cpu()]\n else:\n idxs = vec\n toks = self.tokenizer.convert_ids_to_tokens(idxs)\n return ' '.join(toks)\n\n def act(self):\n return {}\n", "path": "parlai/agents/bert_ranker/bert_dictionary.py"}]} | 1,362 | 139 |
gh_patches_debug_17982 | rasdani/github-patches | git_diff | apluslms__a-plus-820 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add separate button for Aalto login (aside Haka login)
In a recent change (pull request #804 ) Aalto (or other local organization) login button was replaced by general Haka login, that directs user to organization selector, to allow login also using other Haka organization accounts than Aalto. This was an intermediate step (due to difficulties in shibboleth configuration), and a separate button for local organization login should now be added back, as majority of students would be using it, and usually some additional guidance may need to be added, e.g. for open university students to use local organization account instead of other organization.
</issue>
<code>
[start of aplus/urls.py]
1 from django.conf import settings
2 from django.conf.urls import url, include
3 from django.contrib import admin
4 from django.contrib.sitemaps.views import sitemap
5 from django.urls import path
6
7 import shibboleth_login.urls
8 import social_django.urls
9 import userprofile.urls, userprofile.sitemaps
10 import course.urls, course.long_urls, course.sitemaps
11 import exercise.urls, exercise.sitemaps
12 import edit_course.urls
13 import deviations.urls
14 import notification.urls
15 import external_services.urls
16 import news.urls
17 import diploma.urls
18 import apps.urls
19 import api.urls_v2
20 import redirect_old_urls.urls
21
22
23 admin.autodiscover()
24
25 all_sitemaps = {
26 **course.sitemaps.all_sitemaps,
27 **exercise.sitemaps.all_sitemaps,
28 **userprofile.sitemaps.all_sitemaps,
29 }
30
31 # Pay attention to the order the URL patterns will be matched!
32 urlpatterns = [
33 url(r'^admin/', admin.site.urls),
34 url(r'^shibboleth/', include(shibboleth_login.urls)),
35 url('', include(social_django.urls, namespace='social')),
36 url(r'^api/v(?P<version>(2))/', include(api.urls_v2)), # why version in url? doc/api_versioning.md
37 url(r'^accounts/', include(userprofile.urls)),
38 url(r'^diploma/', include(diploma.urls)),
39 url(r'^', include(redirect_old_urls.urls)),
40 url(r'^', include(apps.urls)),
41 url(r'^', include(news.urls)),
42 url(r'^', include(external_services.urls)),
43 url(r'^', include(course.long_urls)),
44 url(r'^', include(deviations.urls)),
45 url(r'^', include(edit_course.urls)),
46 url(r'^', include(notification.urls)),
47 url(r'^', include(exercise.urls)),
48 url(r'^', include(course.urls)),
49 path('sitemap.xml', sitemap, { 'sitemaps': all_sitemaps },
50 name='django.contrib.sitemaps.views.sitemap'),
51 ]
52
53 if settings.DEBUG:
54 import django.views.static
55 urlpatterns.insert(0, url(r'^media/(?P<path>.*)$', django.views.static.serve,
56 { 'document_root': settings.MEDIA_ROOT }))
57
[end of aplus/urls.py]
[start of shibboleth_login/urls.py]
1 from django.conf import settings
2 from django.conf.urls import url
3
4 from . import views
5
6
7 urlpatterns = [
8 url(r'^login/$', views.login, name="shibboleth-login"),
9 ]
10
11 if settings.DEBUG:
12 urlpatterns.append(url(r'^debug/$', views.debug))
13
[end of shibboleth_login/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/aplus/urls.py b/aplus/urls.py
--- a/aplus/urls.py
+++ b/aplus/urls.py
@@ -31,7 +31,7 @@
# Pay attention to the order the URL patterns will be matched!
urlpatterns = [
url(r'^admin/', admin.site.urls),
- url(r'^shibboleth/', include(shibboleth_login.urls)),
+ url(r'^', include(shibboleth_login.urls)),
url('', include(social_django.urls, namespace='social')),
url(r'^api/v(?P<version>(2))/', include(api.urls_v2)), # why version in url? doc/api_versioning.md
url(r'^accounts/', include(userprofile.urls)),
diff --git a/shibboleth_login/urls.py b/shibboleth_login/urls.py
--- a/shibboleth_login/urls.py
+++ b/shibboleth_login/urls.py
@@ -5,7 +5,8 @@
urlpatterns = [
- url(r'^login/$', views.login, name="shibboleth-login"),
+ url(r'^shibboleth/login/$', views.login, name="shibboleth-login"),
+ url(r'^Shibboleth.sso/haka_login$', views.login, name="haka-login"),
]
if settings.DEBUG:
| {"golden_diff": "diff --git a/aplus/urls.py b/aplus/urls.py\n--- a/aplus/urls.py\n+++ b/aplus/urls.py\n@@ -31,7 +31,7 @@\n # Pay attention to the order the URL patterns will be matched!\n urlpatterns = [\n url(r'^admin/', admin.site.urls),\n- url(r'^shibboleth/', include(shibboleth_login.urls)),\n+ url(r'^', include(shibboleth_login.urls)),\n url('', include(social_django.urls, namespace='social')),\n url(r'^api/v(?P<version>(2))/', include(api.urls_v2)), # why version in url? doc/api_versioning.md\n url(r'^accounts/', include(userprofile.urls)),\ndiff --git a/shibboleth_login/urls.py b/shibboleth_login/urls.py\n--- a/shibboleth_login/urls.py\n+++ b/shibboleth_login/urls.py\n@@ -5,7 +5,8 @@\n \n \n urlpatterns = [\n- url(r'^login/$', views.login, name=\"shibboleth-login\"),\n+ url(r'^shibboleth/login/$', views.login, name=\"shibboleth-login\"),\n+ url(r'^Shibboleth.sso/haka_login$', views.login, name=\"haka-login\"),\n ]\n \n if settings.DEBUG:\n", "issue": "Add separate button for Aalto login (aside Haka login)\nIn a recent change (pull request #804 ) Aalto (or other local organization) login button was replaced by general Haka login, that directs user to organization selector, to allow login also using other Haka organization accounts than Aalto. This was an intermediate step (due to difficulties in shibboleth configuration), and a separate button for local organization login should now be added back, as majority of students would be using it, and usually some additional guidance may need to be added, e.g. for open university students to use local organization account instead of other organization.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.urls import path\n\nimport shibboleth_login.urls\nimport social_django.urls\nimport userprofile.urls, userprofile.sitemaps\nimport course.urls, course.long_urls, course.sitemaps\nimport exercise.urls, exercise.sitemaps\nimport edit_course.urls\nimport deviations.urls\nimport notification.urls\nimport external_services.urls\nimport news.urls\nimport diploma.urls\nimport apps.urls\nimport api.urls_v2\nimport redirect_old_urls.urls\n\n\nadmin.autodiscover()\n\nall_sitemaps = {\n **course.sitemaps.all_sitemaps,\n **exercise.sitemaps.all_sitemaps,\n **userprofile.sitemaps.all_sitemaps,\n}\n\n# Pay attention to the order the URL patterns will be matched!\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^shibboleth/', include(shibboleth_login.urls)),\n url('', include(social_django.urls, namespace='social')),\n url(r'^api/v(?P<version>(2))/', include(api.urls_v2)), # why version in url? doc/api_versioning.md\n url(r'^accounts/', include(userprofile.urls)),\n url(r'^diploma/', include(diploma.urls)),\n url(r'^', include(redirect_old_urls.urls)),\n url(r'^', include(apps.urls)),\n url(r'^', include(news.urls)),\n url(r'^', include(external_services.urls)),\n url(r'^', include(course.long_urls)),\n url(r'^', include(deviations.urls)),\n url(r'^', include(edit_course.urls)),\n url(r'^', include(notification.urls)),\n url(r'^', include(exercise.urls)),\n url(r'^', include(course.urls)),\n path('sitemap.xml', sitemap, { 'sitemaps': all_sitemaps },\n name='django.contrib.sitemaps.views.sitemap'),\n]\n\nif settings.DEBUG:\n import django.views.static\n urlpatterns.insert(0, url(r'^media/(?P<path>.*)$', django.views.static.serve,\n { 'document_root': settings.MEDIA_ROOT }))\n", "path": "aplus/urls.py"}, {"content": "from django.conf import settings\nfrom django.conf.urls import url\n\nfrom . import views\n\n\nurlpatterns = [\n url(r'^login/$', views.login, name=\"shibboleth-login\"),\n]\n\nif settings.DEBUG:\n urlpatterns.append(url(r'^debug/$', views.debug))\n", "path": "shibboleth_login/urls.py"}]} | 1,340 | 289 |
gh_patches_debug_2281 | rasdani/github-patches | git_diff | rasterio__rasterio-438 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
polygonize method no longer in use / use shapes instead?
Hi,
just going through the examples folders.
If I ran the python interpreter on `poligonize.py` it complains about a missing attribute:
```
(venv)[] ~/Progetti/pyhton-usergroup/rasterio <master> ✗ python examples/polygonize.py
sys:1: FutureWarning: read_band() is deprecated and will be removed by Rasterio 1.0. Please use read() instead.
Traceback (most recent call last):
File "examples/polygonize.py", line 10, in <module>
list(ftrz.polygonize(image)))
AttributeError: 'module' object has no attribute 'polygonize'
```
But, going trough the commits history w/ @ligthyear it seems like instead of poligonize, one should use shapes.
If I ran it in fact with the new method it works smoothly
```
pprint.pprint(
list(ftrz.shapes(image)))
```
Cheers
</issue>
<code>
[start of examples/polygonize.py]
1 import pprint
2
3 import rasterio
4 import rasterio._features as ftrz
5
6 with rasterio.open('box.png') as src:
7 image = src.read_band(1)
8
9 pprint.pprint(
10 list(ftrz.polygonize(image)))
11
[end of examples/polygonize.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/polygonize.py b/examples/polygonize.py
--- a/examples/polygonize.py
+++ b/examples/polygonize.py
@@ -1,10 +1,12 @@
import pprint
import rasterio
-import rasterio._features as ftrz
+from rasterio.features import shapes
-with rasterio.open('box.png') as src:
- image = src.read_band(1)
+with rasterio.open('tests/data/shade.tif') as src:
+ image = src.read(1)
+# Print the first two shapes...
pprint.pprint(
- list(ftrz.polygonize(image)))
+ list(shapes(image))[:2]
+)
| {"golden_diff": "diff --git a/examples/polygonize.py b/examples/polygonize.py\n--- a/examples/polygonize.py\n+++ b/examples/polygonize.py\n@@ -1,10 +1,12 @@\n import pprint\n \n import rasterio\n-import rasterio._features as ftrz\n+from rasterio.features import shapes\n \n-with rasterio.open('box.png') as src:\n- image = src.read_band(1)\n+with rasterio.open('tests/data/shade.tif') as src:\n+ image = src.read(1)\n \n+# Print the first two shapes...\n pprint.pprint(\n- list(ftrz.polygonize(image)))\n+ list(shapes(image))[:2]\n+)\n", "issue": "polygonize method no longer in use / use shapes instead?\nHi,\n\njust going through the examples folders.\nIf I ran the python interpreter on `poligonize.py` it complains about a missing attribute:\n\n```\n(venv)[] ~/Progetti/pyhton-usergroup/rasterio <master> \u2717 python examples/polygonize.py\nsys:1: FutureWarning: read_band() is deprecated and will be removed by Rasterio 1.0. Please use read() instead.\nTraceback (most recent call last):\n File \"examples/polygonize.py\", line 10, in <module>\n list(ftrz.polygonize(image)))\nAttributeError: 'module' object has no attribute 'polygonize'\n```\n\nBut, going trough the commits history w/ @ligthyear it seems like instead of poligonize, one should use shapes. \nIf I ran it in fact with the new method it works smoothly\n\n```\npprint.pprint(\n list(ftrz.shapes(image)))\n```\n\nCheers\n\n", "before_files": [{"content": "import pprint\n\nimport rasterio\nimport rasterio._features as ftrz\n\nwith rasterio.open('box.png') as src:\n image = src.read_band(1)\n\npprint.pprint(\n list(ftrz.polygonize(image)))\n", "path": "examples/polygonize.py"}]} | 817 | 150 |
gh_patches_debug_2475 | rasdani/github-patches | git_diff | Gallopsled__pwntools-597 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
A little bug in Buffer class
There is a litttle bug in pwnlib.tubes.Buffer class.The class method unget has a type error in line 117.add a buffer and a list
```
Traceback (most recent call last):
File "<input>", line 1, in <module>
a.unget(b)
File "buffer.py", line 117, in unget
self.data = data + self.data
TypeError: unsupported operand type(s) for +: 'Buffer' and 'list'
```
</issue>
<code>
[start of pwnlib/tubes/buffer.py]
1 #!/usr/bin/env python2
2
3 class Buffer(Exception):
4 """
5 List of strings with some helper routines.
6
7 Example:
8
9 >>> b = Buffer()
10 >>> b.add("A" * 10)
11 >>> b.add("B" * 10)
12 >>> len(b)
13 20
14 >>> b.get(1)
15 'A'
16 >>> len(b)
17 19
18 >>> b.get(9999)
19 'AAAAAAAAABBBBBBBBBB'
20 >>> len(b)
21 0
22 >>> b.get(1)
23 ''
24
25 Implementation Details:
26
27 Implemented as a list. Strings are added onto the end.
28 The ``0th`` item in the buffer is the oldest item, and
29 will be received first.
30 """
31 def __init__(self):
32 self.data = [] # Buffer
33 self.size = 0 # Length
34
35
36 def __len__(self):
37 """
38 >>> b = Buffer()
39 >>> b.add('lol')
40 >>> len(b) == 3
41 True
42 >>> b.add('foobar')
43 >>> len(b) == 9
44 True
45 """
46 return self.size
47
48 def __nonzero__(self):
49 return len(self) > 0
50
51 def __contains__(self, x):
52 """
53 >>> b = Buffer()
54 >>> b.add('asdf')
55 >>> 'x' in b
56 False
57 >>> b.add('x')
58 >>> 'x' in b
59 True
60 """
61 for b in self.data:
62 if x in b:
63 return True
64 return False
65
66 def index(self, x):
67 """
68 >>> b = Buffer()
69 >>> b.add('asdf')
70 >>> b.add('qwert')
71 >>> b.index('t') == len(b) - 1
72 True
73 """
74 sofar = 0
75 for b in self.data:
76 if x in b:
77 return sofar + b.index(x)
78 sofar += len(b)
79 raise IndexError()
80
81 def add(self, data):
82 """
83 Adds data to the buffer.
84
85 Arguments:
86 data(str,Buffer): Data to add
87 """
88 # Fast path for ''
89 if not data: return
90
91 if isinstance(data, Buffer):
92 self.size += data.size
93 self.data += data.data
94 else:
95 self.size += len(data)
96 self.data.append(data)
97
98 def unget(self, data):
99 """
100 Places data at the front of the buffer.
101
102 Arguments:
103 data(str,Buffer): Data to place at the beginning of the buffer.
104
105 Example:
106
107 >>> b = Buffer()
108 >>> b.add("hello")
109 >>> b.add("world")
110 >>> b.get(5)
111 'hello'
112 >>> b.unget("goodbye")
113 >>> b.get()
114 'goodbyeworld'
115 """
116 if isinstance(data, Buffer):
117 self.data = data + self.data
118 self.size += data.size
119 else:
120 self.data.insert(0, data)
121 self.size += len(data)
122
123 def get(self, want=float('inf')):
124 """
125 Retrieves bytes from the buffer.
126
127 Arguments:
128 want(int): Maximum number of bytes to fetch
129
130 Returns:
131 Data as string
132
133 Example:
134
135 >>> b = Buffer()
136 >>> b.add('hello')
137 >>> b.add('world')
138 >>> b.get(1)
139 'h'
140 >>> b.get()
141 'elloworld'
142 """
143 # Fast path, get all of the data
144 if want >= self.size:
145 data = ''.join(self.data)
146 self.size = 0
147 self.data = []
148 return data
149
150 # Slow path, find the correct-index chunk
151 have = 0
152 i = 0
153 while want >= have:
154 have += len(self.data[i])
155 i += 1
156
157 # Join the chunks, evict from the buffer
158 data = ''.join(self.data[:i])
159 self.data = self.data[i:]
160
161 # If the last chunk puts us over the limit,
162 # stick the extra back at the beginning.
163 if have > want:
164 extra = data[want:]
165 data = data[:want]
166 self.data.insert(0, extra)
167
168 # Size update
169 self.size -= len(data)
170
171 return data
172
[end of pwnlib/tubes/buffer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwnlib/tubes/buffer.py b/pwnlib/tubes/buffer.py
--- a/pwnlib/tubes/buffer.py
+++ b/pwnlib/tubes/buffer.py
@@ -114,7 +114,7 @@
'goodbyeworld'
"""
if isinstance(data, Buffer):
- self.data = data + self.data
+ self.data = data.data + self.data
self.size += data.size
else:
self.data.insert(0, data)
| {"golden_diff": "diff --git a/pwnlib/tubes/buffer.py b/pwnlib/tubes/buffer.py\n--- a/pwnlib/tubes/buffer.py\n+++ b/pwnlib/tubes/buffer.py\n@@ -114,7 +114,7 @@\n 'goodbyeworld'\n \"\"\"\n if isinstance(data, Buffer):\n- self.data = data + self.data\n+ self.data = data.data + self.data\n self.size += data.size\n else:\n self.data.insert(0, data)\n", "issue": "A little bug in Buffer class\nThere is a litttle bug in pwnlib.tubes.Buffer class.The class method unget has a type error in line 117.add a buffer and a list\n\n```\nTraceback (most recent call last):\n File \"<input>\", line 1, in <module>\n a.unget(b)\n File \"buffer.py\", line 117, in unget\n self.data = data + self.data\nTypeError: unsupported operand type(s) for +: 'Buffer' and 'list'\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python2\n\nclass Buffer(Exception):\n \"\"\"\n List of strings with some helper routines.\n\n Example:\n\n >>> b = Buffer()\n >>> b.add(\"A\" * 10)\n >>> b.add(\"B\" * 10)\n >>> len(b)\n 20\n >>> b.get(1)\n 'A'\n >>> len(b)\n 19\n >>> b.get(9999)\n 'AAAAAAAAABBBBBBBBBB'\n >>> len(b)\n 0\n >>> b.get(1)\n ''\n\n Implementation Details:\n\n Implemented as a list. Strings are added onto the end.\n The ``0th`` item in the buffer is the oldest item, and\n will be received first.\n \"\"\"\n def __init__(self):\n self.data = [] # Buffer\n self.size = 0 # Length\n\n\n def __len__(self):\n \"\"\"\n >>> b = Buffer()\n >>> b.add('lol')\n >>> len(b) == 3\n True\n >>> b.add('foobar')\n >>> len(b) == 9\n True\n \"\"\"\n return self.size\n\n def __nonzero__(self):\n return len(self) > 0\n\n def __contains__(self, x):\n \"\"\"\n >>> b = Buffer()\n >>> b.add('asdf')\n >>> 'x' in b\n False\n >>> b.add('x')\n >>> 'x' in b\n True\n \"\"\"\n for b in self.data:\n if x in b:\n return True\n return False\n\n def index(self, x):\n \"\"\"\n >>> b = Buffer()\n >>> b.add('asdf')\n >>> b.add('qwert')\n >>> b.index('t') == len(b) - 1\n True\n \"\"\"\n sofar = 0\n for b in self.data:\n if x in b:\n return sofar + b.index(x)\n sofar += len(b)\n raise IndexError()\n\n def add(self, data):\n \"\"\"\n Adds data to the buffer.\n\n Arguments:\n data(str,Buffer): Data to add\n \"\"\"\n # Fast path for ''\n if not data: return\n\n if isinstance(data, Buffer):\n self.size += data.size\n self.data += data.data\n else:\n self.size += len(data)\n self.data.append(data)\n\n def unget(self, data):\n \"\"\"\n Places data at the front of the buffer.\n\n Arguments:\n data(str,Buffer): Data to place at the beginning of the buffer.\n\n Example:\n\n >>> b = Buffer()\n >>> b.add(\"hello\")\n >>> b.add(\"world\")\n >>> b.get(5)\n 'hello'\n >>> b.unget(\"goodbye\")\n >>> b.get()\n 'goodbyeworld'\n \"\"\"\n if isinstance(data, Buffer):\n self.data = data + self.data\n self.size += data.size\n else:\n self.data.insert(0, data)\n self.size += len(data)\n\n def get(self, want=float('inf')):\n \"\"\"\n Retrieves bytes from the buffer.\n\n Arguments:\n want(int): Maximum number of bytes to fetch\n\n Returns:\n Data as string\n\n Example:\n\n >>> b = Buffer()\n >>> b.add('hello')\n >>> b.add('world')\n >>> b.get(1)\n 'h'\n >>> b.get()\n 'elloworld'\n \"\"\"\n # Fast path, get all of the data\n if want >= self.size:\n data = ''.join(self.data)\n self.size = 0\n self.data = []\n return data\n\n # Slow path, find the correct-index chunk\n have = 0\n i = 0\n while want >= have:\n have += len(self.data[i])\n i += 1\n\n # Join the chunks, evict from the buffer\n data = ''.join(self.data[:i])\n self.data = self.data[i:]\n\n # If the last chunk puts us over the limit,\n # stick the extra back at the beginning.\n if have > want:\n extra = data[want:]\n data = data[:want]\n self.data.insert(0, extra)\n\n # Size update\n self.size -= len(data)\n\n return data\n", "path": "pwnlib/tubes/buffer.py"}]} | 2,019 | 114 |
gh_patches_debug_11967 | rasdani/github-patches | git_diff | translate__pootle-6574 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Configurable logo url and favicon
It would be nice to have a configurable logo in the config file like you did for the name of pootle site (`POOTLE_TITLE`) instead of creating a custom template or editing the css of the default one.
</issue>
<code>
[start of pootle/apps/pootle_misc/context_processors.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django.conf import settings
10
11 from pootle.core.markup import get_markup_filter_name
12 from pootle_project.models import Project
13 from staticpages.models import LegalPage
14
15
16 def _agreement_context(request):
17 """Returns whether the agreement box should be displayed or not."""
18 request_path = request.META['PATH_INFO']
19 nocheck = filter(lambda x: request_path.startswith(x),
20 settings.POOTLE_LEGALPAGE_NOCHECK_PREFIXES)
21
22 if (request.user.is_authenticated and not nocheck and
23 LegalPage.objects.has_pending_agreement(request.user)):
24 return True
25
26 return False
27
28
29 def _get_social_auth_providers(request):
30 if 'allauth.socialaccount' not in settings.INSTALLED_APPS:
31 return []
32
33 from allauth.socialaccount import providers
34 return [{'name': provider.name, 'url': provider.get_login_url(request)}
35 for provider in providers.registry.get_list()]
36
37
38 def pootle_context(request):
39 """Exposes settings to templates."""
40 # FIXME: maybe we should expose relevant settings only?
41
42 return {
43 'settings': {
44 'POOTLE_CUSTOM_LOGO': getattr(settings, "POOTLE_CUSTOM_LOGO", ""),
45 'POOTLE_TITLE': settings.POOTLE_TITLE,
46 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,
47 'POOTLE_CONTACT_ENABLED': (settings.POOTLE_CONTACT_ENABLED and
48 settings.POOTLE_CONTACT_EMAIL),
49 'POOTLE_MARKUP_FILTER': get_markup_filter_name(),
50 'POOTLE_SIGNUP_ENABLED': settings.POOTLE_SIGNUP_ENABLED,
51 'SCRIPT_NAME': settings.SCRIPT_NAME,
52 'POOTLE_CACHE_TIMEOUT': settings.POOTLE_CACHE_TIMEOUT,
53 'DEBUG': settings.DEBUG,
54 },
55 'custom': settings.POOTLE_CUSTOM_TEMPLATE_CONTEXT,
56 'ALL_PROJECTS': Project.objects.cached_dict(request.user),
57 'SOCIAL_AUTH_PROVIDERS': _get_social_auth_providers(request),
58 'display_agreement': _agreement_context(request),
59 }
60
[end of pootle/apps/pootle_misc/context_processors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_misc/context_processors.py b/pootle/apps/pootle_misc/context_processors.py
--- a/pootle/apps/pootle_misc/context_processors.py
+++ b/pootle/apps/pootle_misc/context_processors.py
@@ -43,6 +43,7 @@
'settings': {
'POOTLE_CUSTOM_LOGO': getattr(settings, "POOTLE_CUSTOM_LOGO", ""),
'POOTLE_TITLE': settings.POOTLE_TITLE,
+ 'POOTLE_FAVICONS_PATH': settings.POOTLE_FAVICONS_PATH,
'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,
'POOTLE_CONTACT_ENABLED': (settings.POOTLE_CONTACT_ENABLED and
settings.POOTLE_CONTACT_EMAIL),
| {"golden_diff": "diff --git a/pootle/apps/pootle_misc/context_processors.py b/pootle/apps/pootle_misc/context_processors.py\n--- a/pootle/apps/pootle_misc/context_processors.py\n+++ b/pootle/apps/pootle_misc/context_processors.py\n@@ -43,6 +43,7 @@\n 'settings': {\n 'POOTLE_CUSTOM_LOGO': getattr(settings, \"POOTLE_CUSTOM_LOGO\", \"\"),\n 'POOTLE_TITLE': settings.POOTLE_TITLE,\n+ 'POOTLE_FAVICONS_PATH': settings.POOTLE_FAVICONS_PATH,\n 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,\n 'POOTLE_CONTACT_ENABLED': (settings.POOTLE_CONTACT_ENABLED and\n settings.POOTLE_CONTACT_EMAIL),\n", "issue": "Configurable logo url and favicon\nIt would be nice to have a configurable logo in the config file like you did for the name of pootle site (`POOTLE_TITLE`) instead of creating a custom template or editing the css of the default one.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.conf import settings\n\nfrom pootle.core.markup import get_markup_filter_name\nfrom pootle_project.models import Project\nfrom staticpages.models import LegalPage\n\n\ndef _agreement_context(request):\n \"\"\"Returns whether the agreement box should be displayed or not.\"\"\"\n request_path = request.META['PATH_INFO']\n nocheck = filter(lambda x: request_path.startswith(x),\n settings.POOTLE_LEGALPAGE_NOCHECK_PREFIXES)\n\n if (request.user.is_authenticated and not nocheck and\n LegalPage.objects.has_pending_agreement(request.user)):\n return True\n\n return False\n\n\ndef _get_social_auth_providers(request):\n if 'allauth.socialaccount' not in settings.INSTALLED_APPS:\n return []\n\n from allauth.socialaccount import providers\n return [{'name': provider.name, 'url': provider.get_login_url(request)}\n for provider in providers.registry.get_list()]\n\n\ndef pootle_context(request):\n \"\"\"Exposes settings to templates.\"\"\"\n # FIXME: maybe we should expose relevant settings only?\n\n return {\n 'settings': {\n 'POOTLE_CUSTOM_LOGO': getattr(settings, \"POOTLE_CUSTOM_LOGO\", \"\"),\n 'POOTLE_TITLE': settings.POOTLE_TITLE,\n 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,\n 'POOTLE_CONTACT_ENABLED': (settings.POOTLE_CONTACT_ENABLED and\n settings.POOTLE_CONTACT_EMAIL),\n 'POOTLE_MARKUP_FILTER': get_markup_filter_name(),\n 'POOTLE_SIGNUP_ENABLED': settings.POOTLE_SIGNUP_ENABLED,\n 'SCRIPT_NAME': settings.SCRIPT_NAME,\n 'POOTLE_CACHE_TIMEOUT': settings.POOTLE_CACHE_TIMEOUT,\n 'DEBUG': settings.DEBUG,\n },\n 'custom': settings.POOTLE_CUSTOM_TEMPLATE_CONTEXT,\n 'ALL_PROJECTS': Project.objects.cached_dict(request.user),\n 'SOCIAL_AUTH_PROVIDERS': _get_social_auth_providers(request),\n 'display_agreement': _agreement_context(request),\n }\n", "path": "pootle/apps/pootle_misc/context_processors.py"}]} | 1,211 | 170 |
gh_patches_debug_6623 | rasdani/github-patches | git_diff | netbox-community__netbox-14901 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Changing event rule action type messes up Conditions field
### Deployment Type
Self-hosted
### NetBox Version
v3.7.1
### Python Version
3.11
### Steps to Reproduce
1. Go to **Operations - Event Rules - Add**
2. Set Conditions = `{ "and": [{"attr": "status.value", "value": "deprecated"}]}`
3. Select Action type = Script
### Expected Behavior
Conditions field stays as-is
### Observed Behavior
Conditions field is rewritten as invalid JSON:
`"{ \"and\": [{\"attr\": \"status.value\", \"value\": \"deprecated\"}]}"`
The quoting is added with every change of the Action type.
This also happens if editing an existing event rule and changing the Action type.
### Workaround
- Copy the Conditions field somewhere
- Change the Action type
- Paste the Conditions field back
</issue>
<code>
[start of netbox/utilities/forms/fields/fields.py]
1 import json
2
3 from django import forms
4 from django.db.models import Count
5 from django.forms.fields import JSONField as _JSONField, InvalidJSONInput
6 from django.templatetags.static import static
7 from django.utils.translation import gettext_lazy as _
8 from netaddr import AddrFormatError, EUI
9
10 from utilities.forms import widgets
11 from utilities.validators import EnhancedURLValidator
12
13 __all__ = (
14 'ColorField',
15 'CommentField',
16 'JSONField',
17 'LaxURLField',
18 'MACAddressField',
19 'SlugField',
20 'TagFilterField',
21 )
22
23
24 class CommentField(forms.CharField):
25 """
26 A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`.
27 """
28 widget = widgets.MarkdownWidget
29 label = _('Comments')
30 help_text = _(
31 '<i class="mdi mdi-information-outline"></i> '
32 '<a href="{url}" target="_blank" tabindex="-1">Markdown</a> syntax is supported'
33 ).format(url=static('docs/reference/markdown/'))
34
35 def __init__(self, *, label=label, help_text=help_text, required=False, **kwargs):
36 super().__init__(label=label, help_text=help_text, required=required, **kwargs)
37
38
39 class SlugField(forms.SlugField):
40 """
41 Extend Django's built-in SlugField to automatically populate from a field called `name` unless otherwise specified.
42
43 Parameters:
44 slug_source: Name of the form field from which the slug value will be derived
45 """
46 widget = widgets.SlugWidget
47 label = _('Slug')
48 help_text = _("URL-friendly unique shorthand")
49
50 def __init__(self, *, slug_source='name', label=label, help_text=help_text, **kwargs):
51 super().__init__(label=label, help_text=help_text, **kwargs)
52
53 self.widget.attrs['slug-source'] = slug_source
54
55
56 class ColorField(forms.CharField):
57 """
58 A field which represents a color value in hexadecimal `RRGGBB` format. Utilizes NetBox's `ColorSelect` widget to
59 render choices.
60 """
61 widget = widgets.ColorSelect
62
63
64 class TagFilterField(forms.MultipleChoiceField):
65 """
66 A filter field for the tags of a model. Only the tags used by a model are displayed.
67
68 :param model: The model of the filter
69 """
70
71 def __init__(self, model, *args, **kwargs):
72 def get_choices():
73 tags = model.tags.annotate(
74 count=Count('extras_taggeditem_items')
75 ).order_by('name')
76 return [
77 (str(tag.slug), '{} ({})'.format(tag.name, tag.count)) for tag in tags
78 ]
79
80 # Choices are fetched each time the form is initialized
81 super().__init__(label=_('Tags'), choices=get_choices, required=False, *args, **kwargs)
82
83
84 class LaxURLField(forms.URLField):
85 """
86 Modifies Django's built-in URLField to remove the requirement for fully-qualified domain names
87 (e.g. http://myserver/ is valid)
88 """
89 default_validators = [EnhancedURLValidator()]
90
91
92 class JSONField(_JSONField):
93 """
94 Custom wrapper around Django's built-in JSONField to avoid presenting "null" as the default text.
95 """
96 def __init__(self, *args, **kwargs):
97 super().__init__(*args, **kwargs)
98 if not self.help_text:
99 self.help_text = _('Enter context data in <a href="https://json.org/">JSON</a> format.')
100 self.widget.attrs['placeholder'] = ''
101 self.widget.attrs['class'] = 'font-monospace'
102
103 def prepare_value(self, value):
104 if isinstance(value, InvalidJSONInput):
105 return value
106 if value in ('', None):
107 return ''
108 return json.dumps(value, sort_keys=True, indent=4)
109
110
111 class MACAddressField(forms.Field):
112 """
113 Validates a 48-bit MAC address.
114 """
115 widget = forms.CharField
116 default_error_messages = {
117 'invalid': _('MAC address must be in EUI-48 format'),
118 }
119
120 def to_python(self, value):
121 value = super().to_python(value)
122
123 # Validate MAC address format
124 try:
125 value = EUI(value.strip())
126 except AddrFormatError:
127 raise forms.ValidationError(self.error_messages['invalid'], code='invalid')
128
129 return value
130
[end of netbox/utilities/forms/fields/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/utilities/forms/fields/fields.py b/netbox/utilities/forms/fields/fields.py
--- a/netbox/utilities/forms/fields/fields.py
+++ b/netbox/utilities/forms/fields/fields.py
@@ -105,7 +105,12 @@
return value
if value in ('', None):
return ''
- return json.dumps(value, sort_keys=True, indent=4)
+ if type(value) is str:
+ try:
+ value = json.loads(value, cls=self.decoder)
+ except json.decoder.JSONDecodeError:
+ return value
+ return json.dumps(value, sort_keys=True, indent=4, ensure_ascii=False, cls=self.encoder)
class MACAddressField(forms.Field):
| {"golden_diff": "diff --git a/netbox/utilities/forms/fields/fields.py b/netbox/utilities/forms/fields/fields.py\n--- a/netbox/utilities/forms/fields/fields.py\n+++ b/netbox/utilities/forms/fields/fields.py\n@@ -105,7 +105,12 @@\n return value\n if value in ('', None):\n return ''\n- return json.dumps(value, sort_keys=True, indent=4)\n+ if type(value) is str:\n+ try:\n+ value = json.loads(value, cls=self.decoder)\n+ except json.decoder.JSONDecodeError:\n+ return value\n+ return json.dumps(value, sort_keys=True, indent=4, ensure_ascii=False, cls=self.encoder)\n \n \n class MACAddressField(forms.Field):\n", "issue": "Changing event rule action type messes up Conditions field\n### Deployment Type\r\n\r\nSelf-hosted\r\n\r\n### NetBox Version\r\n\r\nv3.7.1\r\n\r\n### Python Version\r\n\r\n3.11\r\n\r\n### Steps to Reproduce\r\n\r\n1. Go to **Operations - Event Rules - Add**\r\n2. Set Conditions = `{ \"and\": [{\"attr\": \"status.value\", \"value\": \"deprecated\"}]}`\r\n3. Select Action type = Script\r\n\r\n### Expected Behavior\r\n\r\nConditions field stays as-is\r\n\r\n### Observed Behavior\r\n\r\nConditions field is rewritten as invalid JSON:\r\n\r\n`\"{ \\\"and\\\": [{\\\"attr\\\": \\\"status.value\\\", \\\"value\\\": \\\"deprecated\\\"}]}\"`\r\n\r\nThe quoting is added with every change of the Action type.\r\n\r\nThis also happens if editing an existing event rule and changing the Action type.\r\n\r\n### Workaround\r\n\r\n- Copy the Conditions field somewhere\r\n- Change the Action type\r\n- Paste the Conditions field back\n", "before_files": [{"content": "import json\n\nfrom django import forms\nfrom django.db.models import Count\nfrom django.forms.fields import JSONField as _JSONField, InvalidJSONInput\nfrom django.templatetags.static import static\nfrom django.utils.translation import gettext_lazy as _\nfrom netaddr import AddrFormatError, EUI\n\nfrom utilities.forms import widgets\nfrom utilities.validators import EnhancedURLValidator\n\n__all__ = (\n 'ColorField',\n 'CommentField',\n 'JSONField',\n 'LaxURLField',\n 'MACAddressField',\n 'SlugField',\n 'TagFilterField',\n)\n\n\nclass CommentField(forms.CharField):\n \"\"\"\n A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`.\n \"\"\"\n widget = widgets.MarkdownWidget\n label = _('Comments')\n help_text = _(\n '<i class=\"mdi mdi-information-outline\"></i> '\n '<a href=\"{url}\" target=\"_blank\" tabindex=\"-1\">Markdown</a> syntax is supported'\n ).format(url=static('docs/reference/markdown/'))\n\n def __init__(self, *, label=label, help_text=help_text, required=False, **kwargs):\n super().__init__(label=label, help_text=help_text, required=required, **kwargs)\n\n\nclass SlugField(forms.SlugField):\n \"\"\"\n Extend Django's built-in SlugField to automatically populate from a field called `name` unless otherwise specified.\n\n Parameters:\n slug_source: Name of the form field from which the slug value will be derived\n \"\"\"\n widget = widgets.SlugWidget\n label = _('Slug')\n help_text = _(\"URL-friendly unique shorthand\")\n\n def __init__(self, *, slug_source='name', label=label, help_text=help_text, **kwargs):\n super().__init__(label=label, help_text=help_text, **kwargs)\n\n self.widget.attrs['slug-source'] = slug_source\n\n\nclass ColorField(forms.CharField):\n \"\"\"\n A field which represents a color value in hexadecimal `RRGGBB` format. Utilizes NetBox's `ColorSelect` widget to\n render choices.\n \"\"\"\n widget = widgets.ColorSelect\n\n\nclass TagFilterField(forms.MultipleChoiceField):\n \"\"\"\n A filter field for the tags of a model. Only the tags used by a model are displayed.\n\n :param model: The model of the filter\n \"\"\"\n\n def __init__(self, model, *args, **kwargs):\n def get_choices():\n tags = model.tags.annotate(\n count=Count('extras_taggeditem_items')\n ).order_by('name')\n return [\n (str(tag.slug), '{} ({})'.format(tag.name, tag.count)) for tag in tags\n ]\n\n # Choices are fetched each time the form is initialized\n super().__init__(label=_('Tags'), choices=get_choices, required=False, *args, **kwargs)\n\n\nclass LaxURLField(forms.URLField):\n \"\"\"\n Modifies Django's built-in URLField to remove the requirement for fully-qualified domain names\n (e.g. http://myserver/ is valid)\n \"\"\"\n default_validators = [EnhancedURLValidator()]\n\n\nclass JSONField(_JSONField):\n \"\"\"\n Custom wrapper around Django's built-in JSONField to avoid presenting \"null\" as the default text.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not self.help_text:\n self.help_text = _('Enter context data in <a href=\"https://json.org/\">JSON</a> format.')\n self.widget.attrs['placeholder'] = ''\n self.widget.attrs['class'] = 'font-monospace'\n\n def prepare_value(self, value):\n if isinstance(value, InvalidJSONInput):\n return value\n if value in ('', None):\n return ''\n return json.dumps(value, sort_keys=True, indent=4)\n\n\nclass MACAddressField(forms.Field):\n \"\"\"\n Validates a 48-bit MAC address.\n \"\"\"\n widget = forms.CharField\n default_error_messages = {\n 'invalid': _('MAC address must be in EUI-48 format'),\n }\n\n def to_python(self, value):\n value = super().to_python(value)\n\n # Validate MAC address format\n try:\n value = EUI(value.strip())\n except AddrFormatError:\n raise forms.ValidationError(self.error_messages['invalid'], code='invalid')\n\n return value\n", "path": "netbox/utilities/forms/fields/fields.py"}]} | 1,967 | 168 |
gh_patches_debug_23338 | rasdani/github-patches | git_diff | python-pillow__Pillow-3950 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ImageGrab fails with multiple monitors
When calling ImageGrab.grab() passing in a bounding box that is outside the area of my primary monitor, I just get black.
For example, my primary monitor is 1920x1200, flanked on either side by monitors running at 1600x1200, making my total desktop size 5120x1200. Also, because my primary monitor is in the middle, the horizontal coordinates for the full virtual desktop go from -1600 to 3519, where 0 is the left-most pixel of my primary monitor. If I try to capture my rightmost monitor using the following code, all I get is a black image:
``` Python
from PIL import ImageGrab
img = ImageGrab.grab([1920, 0, 3519, 1199])
img.save("test.jpg")
```
Poking around the code, it looks like `ImageGrab.grab()` calls into `Image.core.grabscreen` which is an alias for [`PyImaging_GrabScreenWin32()`](https://github.com/python-pillow/Pillow/blob/2be12dec2b231d31400f44bfa855966484997c16/display.c#L323) in `display.c`. That function does retrieve a DC handle to the entire desktop, but the subsequent calls to `GetDeviceCaps` with `HORZRES` and `VERTRES` only return the x/y size of the primary monitor, not the entire desktop.
``` C
screen = CreateDC("DISPLAY", NULL, NULL, NULL);
// ...
width = GetDeviceCaps(screen, HORZRES);
height = GetDeviceCaps(screen, VERTRES);
// ...
if (!BitBlt(screen_copy, 0, 0, width, height, screen, 0, 0, SRCCOPY))
goto error;
```
Another problem with the above code is that monitors to the left of or above the primary display have negative coordinates in the `screen` DC. So, for example, capturing the monitor to the left of my primary display (which has a resolution of 1600x1200) would need to call BitBlt with the following coordinates:
``` C
left = -1600
top = 0
width = 1600
height = 1200
BitBlt(screen_copy, 0, 0, width, height, screen, left, top, SRCCOPY)
```
Similarly, if I was trying to capture a monitor above my primary display, then `top` would be negative. Because of the negative coordinates issue, I don't see any way of fixing this without passing in `left, top, width, height` from the calling python code, which could be calculated easily from the `bbox` parameter. Then it's simply up to the caller to know the coordinates of the monitor they want to capture. If no `bbox` is provided, then the coordinates would default to the primary display (0, 0, HORZRES, VERTRES), keeping the current functionality unchanged so as not to break existing code that uses `ImageGrab.grab()`.
</issue>
<code>
[start of src/PIL/ImageGrab.py]
1 #
2 # The Python Imaging Library
3 # $Id$
4 #
5 # screen grabber (macOS and Windows only)
6 #
7 # History:
8 # 2001-04-26 fl created
9 # 2001-09-17 fl use builtin driver, if present
10 # 2002-11-19 fl added grabclipboard support
11 #
12 # Copyright (c) 2001-2002 by Secret Labs AB
13 # Copyright (c) 2001-2002 by Fredrik Lundh
14 #
15 # See the README file for information on usage and redistribution.
16 #
17
18 import sys
19
20 from . import Image
21
22 if sys.platform == "win32":
23 grabber = Image.core.grabscreen
24 elif sys.platform == "darwin":
25 import os
26 import tempfile
27 import subprocess
28 else:
29 raise ImportError("ImageGrab is macOS and Windows only")
30
31
32 def grab(bbox=None, include_layered_windows=False):
33 if sys.platform == "darwin":
34 fh, filepath = tempfile.mkstemp(".png")
35 os.close(fh)
36 subprocess.call(["screencapture", "-x", filepath])
37 im = Image.open(filepath)
38 im.load()
39 os.unlink(filepath)
40 else:
41 size, data = grabber(include_layered_windows)
42 im = Image.frombytes(
43 "RGB",
44 size,
45 data,
46 # RGB, 32-bit line padding, origin lower left corner
47 "raw",
48 "BGR",
49 (size[0] * 3 + 3) & -4,
50 -1,
51 )
52 if bbox:
53 im = im.crop(bbox)
54 return im
55
56
57 def grabclipboard():
58 if sys.platform == "darwin":
59 fh, filepath = tempfile.mkstemp(".jpg")
60 os.close(fh)
61 commands = [
62 'set theFile to (open for access POSIX file "'
63 + filepath
64 + '" with write permission)',
65 "try",
66 " write (the clipboard as JPEG picture) to theFile",
67 "end try",
68 "close access theFile",
69 ]
70 script = ["osascript"]
71 for command in commands:
72 script += ["-e", command]
73 subprocess.call(script)
74
75 im = None
76 if os.stat(filepath).st_size != 0:
77 im = Image.open(filepath)
78 im.load()
79 os.unlink(filepath)
80 return im
81 else:
82 data = Image.core.grabclipboard()
83 if isinstance(data, bytes):
84 from . import BmpImagePlugin
85 import io
86
87 return BmpImagePlugin.DibImageFile(io.BytesIO(data))
88 return data
89
[end of src/PIL/ImageGrab.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/PIL/ImageGrab.py b/src/PIL/ImageGrab.py
--- a/src/PIL/ImageGrab.py
+++ b/src/PIL/ImageGrab.py
@@ -29,7 +29,7 @@
raise ImportError("ImageGrab is macOS and Windows only")
-def grab(bbox=None, include_layered_windows=False):
+def grab(bbox=None, include_layered_windows=False, all_screens=False):
if sys.platform == "darwin":
fh, filepath = tempfile.mkstemp(".png")
os.close(fh)
@@ -37,8 +37,10 @@
im = Image.open(filepath)
im.load()
os.unlink(filepath)
+ if bbox:
+ im = im.crop(bbox)
else:
- size, data = grabber(include_layered_windows)
+ offset, size, data = grabber(include_layered_windows, all_screens)
im = Image.frombytes(
"RGB",
size,
@@ -49,8 +51,10 @@
(size[0] * 3 + 3) & -4,
-1,
)
- if bbox:
- im = im.crop(bbox)
+ if bbox:
+ x0, y0 = offset
+ left, top, right, bottom = bbox
+ im = im.crop((left - x0, top - y0, right - x0, bottom - y0))
return im
| {"golden_diff": "diff --git a/src/PIL/ImageGrab.py b/src/PIL/ImageGrab.py\n--- a/src/PIL/ImageGrab.py\n+++ b/src/PIL/ImageGrab.py\n@@ -29,7 +29,7 @@\n raise ImportError(\"ImageGrab is macOS and Windows only\")\n \n \n-def grab(bbox=None, include_layered_windows=False):\n+def grab(bbox=None, include_layered_windows=False, all_screens=False):\n if sys.platform == \"darwin\":\n fh, filepath = tempfile.mkstemp(\".png\")\n os.close(fh)\n@@ -37,8 +37,10 @@\n im = Image.open(filepath)\n im.load()\n os.unlink(filepath)\n+ if bbox:\n+ im = im.crop(bbox)\n else:\n- size, data = grabber(include_layered_windows)\n+ offset, size, data = grabber(include_layered_windows, all_screens)\n im = Image.frombytes(\n \"RGB\",\n size,\n@@ -49,8 +51,10 @@\n (size[0] * 3 + 3) & -4,\n -1,\n )\n- if bbox:\n- im = im.crop(bbox)\n+ if bbox:\n+ x0, y0 = offset\n+ left, top, right, bottom = bbox\n+ im = im.crop((left - x0, top - y0, right - x0, bottom - y0))\n return im\n", "issue": "ImageGrab fails with multiple monitors\nWhen calling ImageGrab.grab() passing in a bounding box that is outside the area of my primary monitor, I just get black.\n\nFor example, my primary monitor is 1920x1200, flanked on either side by monitors running at 1600x1200, making my total desktop size 5120x1200. Also, because my primary monitor is in the middle, the horizontal coordinates for the full virtual desktop go from -1600 to 3519, where 0 is the left-most pixel of my primary monitor. If I try to capture my rightmost monitor using the following code, all I get is a black image:\n\n``` Python\nfrom PIL import ImageGrab\nimg = ImageGrab.grab([1920, 0, 3519, 1199])\nimg.save(\"test.jpg\")\n```\n\nPoking around the code, it looks like `ImageGrab.grab()` calls into `Image.core.grabscreen` which is an alias for [`PyImaging_GrabScreenWin32()`](https://github.com/python-pillow/Pillow/blob/2be12dec2b231d31400f44bfa855966484997c16/display.c#L323) in `display.c`. That function does retrieve a DC handle to the entire desktop, but the subsequent calls to `GetDeviceCaps` with `HORZRES` and `VERTRES` only return the x/y size of the primary monitor, not the entire desktop.\n\n``` C\nscreen = CreateDC(\"DISPLAY\", NULL, NULL, NULL);\n// ...\nwidth = GetDeviceCaps(screen, HORZRES);\nheight = GetDeviceCaps(screen, VERTRES);\n// ...\nif (!BitBlt(screen_copy, 0, 0, width, height, screen, 0, 0, SRCCOPY))\n goto error;\n```\n\nAnother problem with the above code is that monitors to the left of or above the primary display have negative coordinates in the `screen` DC. So, for example, capturing the monitor to the left of my primary display (which has a resolution of 1600x1200) would need to call BitBlt with the following coordinates:\n\n``` C\nleft = -1600\ntop = 0\nwidth = 1600\nheight = 1200\nBitBlt(screen_copy, 0, 0, width, height, screen, left, top, SRCCOPY)\n```\n\nSimilarly, if I was trying to capture a monitor above my primary display, then `top` would be negative. Because of the negative coordinates issue, I don't see any way of fixing this without passing in `left, top, width, height` from the calling python code, which could be calculated easily from the `bbox` parameter. Then it's simply up to the caller to know the coordinates of the monitor they want to capture. If no `bbox` is provided, then the coordinates would default to the primary display (0, 0, HORZRES, VERTRES), keeping the current functionality unchanged so as not to break existing code that uses `ImageGrab.grab()`.\n\n", "before_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# screen grabber (macOS and Windows only)\n#\n# History:\n# 2001-04-26 fl created\n# 2001-09-17 fl use builtin driver, if present\n# 2002-11-19 fl added grabclipboard support\n#\n# Copyright (c) 2001-2002 by Secret Labs AB\n# Copyright (c) 2001-2002 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport sys\n\nfrom . import Image\n\nif sys.platform == \"win32\":\n grabber = Image.core.grabscreen\nelif sys.platform == \"darwin\":\n import os\n import tempfile\n import subprocess\nelse:\n raise ImportError(\"ImageGrab is macOS and Windows only\")\n\n\ndef grab(bbox=None, include_layered_windows=False):\n if sys.platform == \"darwin\":\n fh, filepath = tempfile.mkstemp(\".png\")\n os.close(fh)\n subprocess.call([\"screencapture\", \"-x\", filepath])\n im = Image.open(filepath)\n im.load()\n os.unlink(filepath)\n else:\n size, data = grabber(include_layered_windows)\n im = Image.frombytes(\n \"RGB\",\n size,\n data,\n # RGB, 32-bit line padding, origin lower left corner\n \"raw\",\n \"BGR\",\n (size[0] * 3 + 3) & -4,\n -1,\n )\n if bbox:\n im = im.crop(bbox)\n return im\n\n\ndef grabclipboard():\n if sys.platform == \"darwin\":\n fh, filepath = tempfile.mkstemp(\".jpg\")\n os.close(fh)\n commands = [\n 'set theFile to (open for access POSIX file \"'\n + filepath\n + '\" with write permission)',\n \"try\",\n \" write (the clipboard as JPEG picture) to theFile\",\n \"end try\",\n \"close access theFile\",\n ]\n script = [\"osascript\"]\n for command in commands:\n script += [\"-e\", command]\n subprocess.call(script)\n\n im = None\n if os.stat(filepath).st_size != 0:\n im = Image.open(filepath)\n im.load()\n os.unlink(filepath)\n return im\n else:\n data = Image.core.grabclipboard()\n if isinstance(data, bytes):\n from . import BmpImagePlugin\n import io\n\n return BmpImagePlugin.DibImageFile(io.BytesIO(data))\n return data\n", "path": "src/PIL/ImageGrab.py"}]} | 1,984 | 313 |
gh_patches_debug_20985 | rasdani/github-patches | git_diff | facebookresearch__fairscale-295 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] pip package 0.1.3 fails to install
Both `python3.8 -m pip install fairscale --verbose` and `python3.8 -m pip install fairscale --no-build-isolation --verbose` trigger
```
ninja: error: '/tmp/pip-install-zqe21k7a/fairscale_4066f1fa225242299ead5fd852fd2ce8/fairscale/clib/fused_adam_cuda/fused_adam_cuda.cpp', needed by '/t
mp/pip-install-zqe21k7a/fairscale_4066f1fa225242299ead5fd852fd2ce8/build/temp.linux-x86_64-3.8/fairscale/clib/fused_adam_cuda/fused_adam_cuda.o', missin
g and no known rule to make it
```
Cloning the repository and running `python3.8 -m pip install . --verbose` from within the directory works fine.
</issue>
<code>
[start of fairscale/__init__.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 __version__ = "0.1.3"
7
8 ################################################################################
9 # Import most common subpackages
10 ################################################################################
11
12 from . import nn
13
[end of fairscale/__init__.py]
[start of setup.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
4
5 import os
6 import re
7 import warnings
8
9 import setuptools
10 import torch
11 from torch.utils.cpp_extension import CUDA_HOME, BuildExtension, CUDAExtension
12
13 this_dir = os.path.dirname(os.path.abspath(__file__))
14
15
16 def fetch_requirements():
17 with open("requirements.txt") as f:
18 reqs = f.read().strip().split("\n")
19 return reqs
20
21
22 # https://packaging.python.org/guides/single-sourcing-package-version/
23 def find_version(version_file_path):
24 with open(version_file_path) as version_file:
25 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file.read(), re.M)
26 if version_match:
27 return version_match.group(1)
28 raise RuntimeError("Unable to find version string.")
29
30
31 extensions = []
32 cmdclass = {}
33
34 force_cuda = os.getenv("FORCE_CUDA", "0") == "1"
35 if (torch.cuda.is_available() and CUDA_HOME is not None) or force_cuda:
36 extensions.extend(
37 [
38 CUDAExtension(
39 name="fairscale.fused_adam_cuda",
40 include_dirs=[os.path.join(this_dir, "fairscale/clib/fused_adam_cuda")],
41 sources=[
42 "fairscale/clib/fused_adam_cuda/fused_adam_cuda.cpp",
43 "fairscale/clib/fused_adam_cuda/fused_adam_cuda_kernel.cu",
44 ],
45 extra_compile_args={"cxx": ["-O3"], "nvcc": ["-O3", "--use_fast_math"]},
46 )
47 ]
48 )
49
50 cmdclass["build_ext"] = BuildExtension
51 else:
52 warnings.warn("Cannot install FusedAdam cuda.")
53
54
55 if __name__ == "__main__":
56 setuptools.setup(
57 name="fairscale",
58 description="fairscale: A PyTorch library for large-scale and high-performance training.",
59 version=find_version("fairscale/__init__.py"),
60 install_requires=fetch_requirements(),
61 include_package_data=True,
62 packages=setuptools.find_packages(exclude=("tests", "tests.*")),
63 ext_modules=extensions,
64 cmdclass=cmdclass,
65 python_requires=">=3.6",
66 author="Facebook AI Research",
67 author_email="[email protected]",
68 classifiers=[
69 "Programming Language :: Python :: 3.6",
70 "Programming Language :: Python :: 3.7",
71 "Programming Language :: Python :: 3.8",
72 "License :: OSI Approved :: BSD License",
73 "Topic :: Scientific/Engineering :: Artificial Intelligence",
74 "Operating System :: OS Independent",
75 ],
76 )
77
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/fairscale/__init__.py b/fairscale/__init__.py
--- a/fairscale/__init__.py
+++ b/fairscale/__init__.py
@@ -3,7 +3,7 @@
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
-__version__ = "0.1.3"
+__version__ = "0.1.4"
################################################################################
# Import most common subpackages
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -65,6 +65,8 @@
python_requires=">=3.6",
author="Facebook AI Research",
author_email="[email protected]",
+ long_description="FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. This library extends basic PyTorch capabilities while adding new experimental ones.",
+ long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
| {"golden_diff": "diff --git a/fairscale/__init__.py b/fairscale/__init__.py\n--- a/fairscale/__init__.py\n+++ b/fairscale/__init__.py\n@@ -3,7 +3,7 @@\n # This source code is licensed under the BSD license found in the\n # LICENSE file in the root directory of this source tree.\n \n-__version__ = \"0.1.3\"\n+__version__ = \"0.1.4\"\n \n ################################################################################\n # Import most common subpackages\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,6 +65,8 @@\n python_requires=\">=3.6\",\n author=\"Facebook AI Research\",\n author_email=\"[email protected]\",\n+ long_description=\"FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. This library extends basic PyTorch capabilities while adding new experimental ones.\",\n+ long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n", "issue": "[bug] pip package 0.1.3 fails to install\nBoth `python3.8 -m pip install fairscale --verbose` and `python3.8 -m pip install fairscale --no-build-isolation --verbose` trigger\r\n\r\n```\r\nninja: error: '/tmp/pip-install-zqe21k7a/fairscale_4066f1fa225242299ead5fd852fd2ce8/fairscale/clib/fused_adam_cuda/fused_adam_cuda.cpp', needed by '/t\r\nmp/pip-install-zqe21k7a/fairscale_4066f1fa225242299ead5fd852fd2ce8/build/temp.linux-x86_64-3.8/fairscale/clib/fused_adam_cuda/fused_adam_cuda.o', missin\r\ng and no known rule to make it\r\n```\r\n\r\nCloning the repository and running `python3.8 -m pip install . --verbose` from within the directory works fine.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n__version__ = \"0.1.3\"\n\n################################################################################\n# Import most common subpackages\n################################################################################\n\nfrom . import nn\n", "path": "fairscale/__init__.py"}, {"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport os\nimport re\nimport warnings\n\nimport setuptools\nimport torch\nfrom torch.utils.cpp_extension import CUDA_HOME, BuildExtension, CUDAExtension\n\nthis_dir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef fetch_requirements():\n with open(\"requirements.txt\") as f:\n reqs = f.read().strip().split(\"\\n\")\n return reqs\n\n\n# https://packaging.python.org/guides/single-sourcing-package-version/\ndef find_version(version_file_path):\n with open(version_file_path) as version_file:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nextensions = []\ncmdclass = {}\n\nforce_cuda = os.getenv(\"FORCE_CUDA\", \"0\") == \"1\"\nif (torch.cuda.is_available() and CUDA_HOME is not None) or force_cuda:\n extensions.extend(\n [\n CUDAExtension(\n name=\"fairscale.fused_adam_cuda\",\n include_dirs=[os.path.join(this_dir, \"fairscale/clib/fused_adam_cuda\")],\n sources=[\n \"fairscale/clib/fused_adam_cuda/fused_adam_cuda.cpp\",\n \"fairscale/clib/fused_adam_cuda/fused_adam_cuda_kernel.cu\",\n ],\n extra_compile_args={\"cxx\": [\"-O3\"], \"nvcc\": [\"-O3\", \"--use_fast_math\"]},\n )\n ]\n )\n\n cmdclass[\"build_ext\"] = BuildExtension\nelse:\n warnings.warn(\"Cannot install FusedAdam cuda.\")\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"fairscale\",\n description=\"fairscale: A PyTorch library for large-scale and high-performance training.\",\n version=find_version(\"fairscale/__init__.py\"),\n install_requires=fetch_requirements(),\n include_package_data=True,\n packages=setuptools.find_packages(exclude=(\"tests\", \"tests.*\")),\n ext_modules=extensions,\n cmdclass=cmdclass,\n python_requires=\">=3.6\",\n author=\"Facebook AI Research\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Operating System :: OS Independent\",\n ],\n )\n", "path": "setup.py"}]} | 1,600 | 250 |
gh_patches_debug_26947 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-4194 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Detecting and alerting of duplication keys/components/entries in YAML file
### Is your feature request related to a problem? Please describe
it was found in release 1.3.11 , a PR to update [manifest](https://github.com/opensearch-project/opensearch-build/blob/main/manifests/1.3.11/opensearch-1.3.11.yml) has duplicated components name.
It would cause the resource wasted on CI to rebuild the duplicated components
### Describe the solution you'd like
We want to have a check to detect if there is any duplication entries based on keys/components/names and probably fail the GitHub check
### Describe alternatives you've considered
Manually check for duplicate values
### Acceptance Criteria
* The manifest check should fail at CI level for components with duplicate components.name values in opensearch and opensearch-dashboard as well as test manifests. See what are [manifests](https://github.com/opensearch-project/opensearch-build/wiki/Building-an-OpenSearch-and-OpenSearch-Dashboards-Distribution#what-are-manifests)
</issue>
<code>
[start of src/ci_workflow/ci_manifests.py]
1 # Copyright OpenSearch Contributors
2 # SPDX-License-Identifier: Apache-2.0
3 #
4 # The OpenSearch Contributors require contributions made to
5 # this file be licensed under the Apache-2.0 license or a
6 # compatible open source license.
7
8
9 import re
10 from collections import Counter
11 from io import TextIOWrapper
12 from typing import Type, Union
13
14 import yaml
15
16 from ci_workflow.ci_args import CiArgs
17 from ci_workflow.ci_input_manifest import CiInputManifest
18 from ci_workflow.ci_test_manifest import CiTestManifest
19
20
21 class CiManifests:
22 @staticmethod
23 def __klass(filename: str) -> Union[Type[CiTestManifest], Type[CiInputManifest]]:
24 if re.search("-test.yml$", filename):
25 return CiTestManifest
26 else:
27 return CiInputManifest
28
29 @staticmethod
30 def __get_duplicate_component_names(count_component_names: Counter) -> list:
31 duplicate_component_names = []
32 for component_name, count in count_component_names.items():
33 if count > 1:
34 duplicate_component_names.append(component_name)
35 return duplicate_component_names
36
37 @staticmethod
38 def __check_duplicate_component_names(file: TextIOWrapper) -> None:
39 yaml_dict = yaml.safe_load(file)
40 component_names = []
41 for component in yaml_dict['components']:
42 component_names.append(component['name'])
43 count_component_names = Counter(component_names)
44
45 if set(count_component_names.values()) != set([1]):
46 duplicate_component_names = CiManifests.__get_duplicate_component_names(count_component_names)
47 duplicate_component_names_string = ', '.join(duplicate_component_names)
48 raise ValueError(f"Found {duplicate_component_names_string} as a duplicate component(s) in manifest {file.name}. ")
49 file.seek(0)
50
51 @classmethod
52 def from_file(cls, file: TextIOWrapper, args: CiArgs) -> Union[CiTestManifest, CiInputManifest]:
53 cls.__check_duplicate_component_names(file)
54 return cls.__klass(file.name)(file, args)
55
[end of src/ci_workflow/ci_manifests.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/ci_workflow/ci_manifests.py b/src/ci_workflow/ci_manifests.py
--- a/src/ci_workflow/ci_manifests.py
+++ b/src/ci_workflow/ci_manifests.py
@@ -26,25 +26,16 @@
else:
return CiInputManifest
- @staticmethod
- def __get_duplicate_component_names(count_component_names: Counter) -> list:
- duplicate_component_names = []
- for component_name, count in count_component_names.items():
- if count > 1:
- duplicate_component_names.append(component_name)
- return duplicate_component_names
-
@staticmethod
def __check_duplicate_component_names(file: TextIOWrapper) -> None:
yaml_dict = yaml.safe_load(file)
component_names = []
for component in yaml_dict['components']:
component_names.append(component['name'])
- count_component_names = Counter(component_names)
- if set(count_component_names.values()) != set([1]):
- duplicate_component_names = CiManifests.__get_duplicate_component_names(count_component_names)
- duplicate_component_names_string = ', '.join(duplicate_component_names)
+ duplicate_component_names = [comp for comp, count in Counter(component_names).items() if count > 1]
+ duplicate_component_names_string = ', '.join(duplicate_component_names)
+ if len(duplicate_component_names) > 0:
raise ValueError(f"Found {duplicate_component_names_string} as a duplicate component(s) in manifest {file.name}. ")
file.seek(0)
| {"golden_diff": "diff --git a/src/ci_workflow/ci_manifests.py b/src/ci_workflow/ci_manifests.py\n--- a/src/ci_workflow/ci_manifests.py\n+++ b/src/ci_workflow/ci_manifests.py\n@@ -26,25 +26,16 @@\n else:\n return CiInputManifest\n \n- @staticmethod\n- def __get_duplicate_component_names(count_component_names: Counter) -> list:\n- duplicate_component_names = []\n- for component_name, count in count_component_names.items():\n- if count > 1:\n- duplicate_component_names.append(component_name)\n- return duplicate_component_names\n-\n @staticmethod\n def __check_duplicate_component_names(file: TextIOWrapper) -> None:\n yaml_dict = yaml.safe_load(file)\n component_names = []\n for component in yaml_dict['components']:\n component_names.append(component['name'])\n- count_component_names = Counter(component_names)\n \n- if set(count_component_names.values()) != set([1]):\n- duplicate_component_names = CiManifests.__get_duplicate_component_names(count_component_names)\n- duplicate_component_names_string = ', '.join(duplicate_component_names)\n+ duplicate_component_names = [comp for comp, count in Counter(component_names).items() if count > 1]\n+ duplicate_component_names_string = ', '.join(duplicate_component_names)\n+ if len(duplicate_component_names) > 0:\n raise ValueError(f\"Found {duplicate_component_names_string} as a duplicate component(s) in manifest {file.name}. \")\n file.seek(0)\n", "issue": "Detecting and alerting of duplication keys/components/entries in YAML file\n### Is your feature request related to a problem? Please describe\r\n\r\nit was found in release 1.3.11 , a PR to update [manifest](https://github.com/opensearch-project/opensearch-build/blob/main/manifests/1.3.11/opensearch-1.3.11.yml) has duplicated components name.\r\nIt would cause the resource wasted on CI to rebuild the duplicated components \r\n\r\n### Describe the solution you'd like\r\n\r\nWe want to have a check to detect if there is any duplication entries based on keys/components/names and probably fail the GitHub check\r\n\r\n### Describe alternatives you've considered\r\n\r\nManually check for duplicate values\r\n\r\n### Acceptance Criteria\r\n* The manifest check should fail at CI level for components with duplicate components.name values in opensearch and opensearch-dashboard as well as test manifests. See what are [manifests](https://github.com/opensearch-project/opensearch-build/wiki/Building-an-OpenSearch-and-OpenSearch-Dashboards-Distribution#what-are-manifests)\n", "before_files": [{"content": "# Copyright OpenSearch Contributors\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\n\nimport re\nfrom collections import Counter\nfrom io import TextIOWrapper\nfrom typing import Type, Union\n\nimport yaml\n\nfrom ci_workflow.ci_args import CiArgs\nfrom ci_workflow.ci_input_manifest import CiInputManifest\nfrom ci_workflow.ci_test_manifest import CiTestManifest\n\n\nclass CiManifests:\n @staticmethod\n def __klass(filename: str) -> Union[Type[CiTestManifest], Type[CiInputManifest]]:\n if re.search(\"-test.yml$\", filename):\n return CiTestManifest\n else:\n return CiInputManifest\n\n @staticmethod\n def __get_duplicate_component_names(count_component_names: Counter) -> list:\n duplicate_component_names = []\n for component_name, count in count_component_names.items():\n if count > 1:\n duplicate_component_names.append(component_name)\n return duplicate_component_names\n\n @staticmethod\n def __check_duplicate_component_names(file: TextIOWrapper) -> None:\n yaml_dict = yaml.safe_load(file)\n component_names = []\n for component in yaml_dict['components']:\n component_names.append(component['name'])\n count_component_names = Counter(component_names)\n\n if set(count_component_names.values()) != set([1]):\n duplicate_component_names = CiManifests.__get_duplicate_component_names(count_component_names)\n duplicate_component_names_string = ', '.join(duplicate_component_names)\n raise ValueError(f\"Found {duplicate_component_names_string} as a duplicate component(s) in manifest {file.name}. \")\n file.seek(0)\n\n @classmethod\n def from_file(cls, file: TextIOWrapper, args: CiArgs) -> Union[CiTestManifest, CiInputManifest]:\n cls.__check_duplicate_component_names(file)\n return cls.__klass(file.name)(file, args)\n", "path": "src/ci_workflow/ci_manifests.py"}]} | 1,299 | 333 |
gh_patches_debug_62586 | rasdani/github-patches | git_diff | pwr-Solaar__Solaar-907 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
locale.Error: unsupported locale setting exception when glibc locale files are not present
**Information**
- Solaar version: 1.0.3
- Distribution: Fedora
- Kernel version (ex. `uname -srmo`): `Linux 5.7.11-200.fc32.x86_64 x86_64 GNU/Linux`
- Output of `solaar show`: N/A
**Describe the bug**
Any solaar invocation is failing with a traceback when locale.setlocale() call fails, e.g. due to missing glibc locale files for the currently set locale.
**To Reproduce**
Steps to reproduce the behavior:
```
$ sudo dnf remove glibc-langpack-de
$ export LC_ALL=de_CH.UTF-8
$ export LANG=de_CH.UTF-8
$ solaar --help
Traceback (most recent call last):
File "/usr/bin/solaar", line 59, in <module>
import solaar.gtk
File "/usr/lib/python3.8/site-packages/solaar/gtk.py", line 29, in <module>
import solaar.i18n as _i18n
File "/usr/lib/python3.8/site-packages/solaar/i18n.py", line 50, in <module>
locale.setlocale(locale.LC_ALL, '')
File "/usr/lib64/python3.8/locale.py", line 608, in setlocale
return _setlocale(category, locale)
locale.Error: unsupported locale setting
$
```
**Additional context**
Looks like #190 is still unfixed. Downstream bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1811313 .
</issue>
<code>
[start of lib/solaar/i18n.py]
1 # -*- python-mode -*-
2 # -*- coding: UTF-8 -*-
3
4 ## Copyright (C) 2012-2013 Daniel Pavel
5 ##
6 ## This program is free software; you can redistribute it and/or modify
7 ## it under the terms of the GNU General Public License as published by
8 ## the Free Software Foundation; either version 2 of the License, or
9 ## (at your option) any later version.
10 ##
11 ## This program is distributed in the hope that it will be useful,
12 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ## GNU General Public License for more details.
15 ##
16 ## You should have received a copy of the GNU General Public License along
17 ## with this program; if not, write to the Free Software Foundation, Inc.,
18 ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19
20 from __future__ import absolute_import, division, print_function, unicode_literals
21
22 import gettext as _gettext
23 import locale
24
25 from solaar import NAME as _NAME
26
27 #
28 #
29 #
30
31
32 def _find_locale_path(lc_domain):
33 import os.path as _path
34
35 import sys as _sys
36 prefix_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..'))
37 src_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..', 'share'))
38 del _sys
39
40 from glob import glob as _glob
41
42 for location in prefix_share, src_share:
43 mo_files = _glob(_path.join(location, 'locale', '*', 'LC_MESSAGES', lc_domain + '.mo'))
44 if mo_files:
45 return _path.join(location, 'locale')
46
47 # del _path
48
49
50 locale.setlocale(locale.LC_ALL, '')
51 language, encoding = locale.getlocale()
52 del locale
53
54 _LOCALE_DOMAIN = _NAME.lower()
55 path = _find_locale_path(_LOCALE_DOMAIN)
56
57 _gettext.bindtextdomain(_LOCALE_DOMAIN, path)
58 _gettext.textdomain(_LOCALE_DOMAIN)
59 _gettext.install(_LOCALE_DOMAIN)
60
61 try:
62 unicode # noqa: F821
63 _ = lambda x: _gettext.gettext(x).decode('UTF-8')
64 ngettext = lambda *x: _gettext.ngettext(*x).decode('UTF-8')
65 except Exception:
66 _ = _gettext.gettext
67 ngettext = _gettext.ngettext
68
[end of lib/solaar/i18n.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/solaar/i18n.py b/lib/solaar/i18n.py
--- a/lib/solaar/i18n.py
+++ b/lib/solaar/i18n.py
@@ -47,7 +47,11 @@
# del _path
-locale.setlocale(locale.LC_ALL, '')
+try:
+ locale.setlocale(locale.LC_ALL, '')
+except Exception:
+ pass
+
language, encoding = locale.getlocale()
del locale
| {"golden_diff": "diff --git a/lib/solaar/i18n.py b/lib/solaar/i18n.py\n--- a/lib/solaar/i18n.py\n+++ b/lib/solaar/i18n.py\n@@ -47,7 +47,11 @@\n # del _path\n \n \n-locale.setlocale(locale.LC_ALL, '')\n+try:\n+ locale.setlocale(locale.LC_ALL, '')\n+except Exception:\n+ pass\n+\n language, encoding = locale.getlocale()\n del locale\n", "issue": "locale.Error: unsupported locale setting exception when glibc locale files are not present\n**Information**\r\n- Solaar version: 1.0.3\r\n- Distribution: Fedora\r\n- Kernel version (ex. `uname -srmo`): `Linux 5.7.11-200.fc32.x86_64 x86_64 GNU/Linux`\r\n- Output of `solaar show`: N/A\r\n\r\n**Describe the bug**\r\nAny solaar invocation is failing with a traceback when locale.setlocale() call fails, e.g. due to missing glibc locale files for the currently set locale.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n```\r\n$ sudo dnf remove glibc-langpack-de\r\n$ export LC_ALL=de_CH.UTF-8\r\n$ export LANG=de_CH.UTF-8\r\n$ solaar --help\r\nTraceback (most recent call last):\r\n File \"/usr/bin/solaar\", line 59, in <module>\r\n import solaar.gtk\r\n File \"/usr/lib/python3.8/site-packages/solaar/gtk.py\", line 29, in <module>\r\n import solaar.i18n as _i18n\r\n File \"/usr/lib/python3.8/site-packages/solaar/i18n.py\", line 50, in <module>\r\n locale.setlocale(locale.LC_ALL, '')\r\n File \"/usr/lib64/python3.8/locale.py\", line 608, in setlocale\r\n return _setlocale(category, locale)\r\nlocale.Error: unsupported locale setting\r\n$ \r\n```\r\n\r\n**Additional context**\r\nLooks like #190 is still unfixed. Downstream bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1811313 .\n", "before_files": [{"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport gettext as _gettext\nimport locale\n\nfrom solaar import NAME as _NAME\n\n#\n#\n#\n\n\ndef _find_locale_path(lc_domain):\n import os.path as _path\n\n import sys as _sys\n prefix_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..'))\n src_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..', 'share'))\n del _sys\n\n from glob import glob as _glob\n\n for location in prefix_share, src_share:\n mo_files = _glob(_path.join(location, 'locale', '*', 'LC_MESSAGES', lc_domain + '.mo'))\n if mo_files:\n return _path.join(location, 'locale')\n\n # del _path\n\n\nlocale.setlocale(locale.LC_ALL, '')\nlanguage, encoding = locale.getlocale()\ndel locale\n\n_LOCALE_DOMAIN = _NAME.lower()\npath = _find_locale_path(_LOCALE_DOMAIN)\n\n_gettext.bindtextdomain(_LOCALE_DOMAIN, path)\n_gettext.textdomain(_LOCALE_DOMAIN)\n_gettext.install(_LOCALE_DOMAIN)\n\ntry:\n unicode # noqa: F821\n _ = lambda x: _gettext.gettext(x).decode('UTF-8')\n ngettext = lambda *x: _gettext.ngettext(*x).decode('UTF-8')\nexcept Exception:\n _ = _gettext.gettext\n ngettext = _gettext.ngettext\n", "path": "lib/solaar/i18n.py"}]} | 1,584 | 113 |
gh_patches_debug_32615 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-446 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve documentation of view decorators
Explain what our view decorators are doing and add docstrings of the following format:
```
"""
[Summary]
:param [ParamName]: [ParamDescription], defaults to [DefaultParamVal]
:type [ParamName]: [ParamType](, optional)
...
:raises [ErrorType]: [ErrorDescription]
...
:return: [ReturnDescription]
:rtype: [ReturnType]
"""
```
Improve documentation of view decorators
Explain what our view decorators are doing and add docstrings of the following format:
```
"""
[Summary]
:param [ParamName]: [ParamDescription], defaults to [DefaultParamVal]
:type [ParamName]: [ParamType](, optional)
...
:raises [ErrorType]: [ErrorDescription]
...
:return: [ReturnDescription]
:rtype: [ReturnType]
"""
```
</issue>
<code>
[start of src/cms/decorators.py]
1 import time
2 from functools import wraps
3
4 from django.core.exceptions import PermissionDenied
5 from django.shortcuts import redirect
6
7 from .models import Region
8
9
10 def staff_required(function):
11 @wraps(function)
12 def wrap(request, *args, **kwargs):
13 user = request.user
14 # superusers and staff have access to this areas
15 if user.is_superuser or user.is_staff:
16 return function(request, *args, **kwargs)
17 raise PermissionDenied
18 return wrap
19
20
21 def region_permission_required(function):
22 @wraps(function)
23 def wrap(request, *args, **kwargs):
24 user = request.user
25 # superusers and staff have permissions for all regions
26 if user.is_superuser or user.is_staff:
27 return function(request, *args, **kwargs)
28 region = Region.get_current_region(request)
29 if region in user.profile.regions.all():
30 return function(request, *args, **kwargs)
31 raise PermissionDenied
32 return wrap
33
34 def modify_mfa_authenticated(function):
35 @wraps(function)
36 def wrap(request, *args, **kwargs):
37 if not 'modify_mfa_authentication_time' in request.session or request.session['modify_mfa_authentication_time'] < (time.time() - 5 * 60):
38 request.session['mfa_redirect_url'] = request.path
39 return redirect('user_settings_auth_modify_mfa')
40 return function(request, *args, **kwargs)
41 return wrap
42
[end of src/cms/decorators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cms/decorators.py b/src/cms/decorators.py
--- a/src/cms/decorators.py
+++ b/src/cms/decorators.py
@@ -1,3 +1,8 @@
+"""
+Django view decorators can be used to restrict the execution of a view function on certain conditions.
+
+For more information, see :doc:`topics/http/decorators`.
+"""
import time
from functools import wraps
@@ -8,6 +13,15 @@
def staff_required(function):
+ """
+ This decorator can be used to make sure a view can only be retrieved by users who are either staff or superusers.
+
+ :param function: The view function which should be protected
+ :type function: ~collections.abc.Callable
+
+ :return: The decorated function
+ :rtype: ~collections.abc.Callable
+ """
@wraps(function)
def wrap(request, *args, **kwargs):
user = request.user
@@ -19,6 +33,15 @@
def region_permission_required(function):
+ """
+ This decorator can be used to make sure a view can only be retrieved by users of the requested region.
+
+ :param function: The view function which should be protected
+ :type function: ~collections.abc.Callable
+
+ :return: The decorated function
+ :rtype: ~collections.abc.Callable
+ """
@wraps(function)
def wrap(request, *args, **kwargs):
user = request.user
@@ -32,6 +55,15 @@
return wrap
def modify_mfa_authenticated(function):
+ """
+ This decorator can be used to make sure a user can only modify his 2FA settings when he has a valid 2FA session.
+
+ :param function: The view function which should be protected
+ :type function: ~collections.abc.Callable
+
+ :return: The decorated function
+ :rtype: ~collections.abc.Callable
+ """
@wraps(function)
def wrap(request, *args, **kwargs):
if not 'modify_mfa_authentication_time' in request.session or request.session['modify_mfa_authentication_time'] < (time.time() - 5 * 60):
| {"golden_diff": "diff --git a/src/cms/decorators.py b/src/cms/decorators.py\n--- a/src/cms/decorators.py\n+++ b/src/cms/decorators.py\n@@ -1,3 +1,8 @@\n+\"\"\"\n+Django view decorators can be used to restrict the execution of a view function on certain conditions.\n+\n+For more information, see :doc:`topics/http/decorators`.\n+\"\"\"\n import time\n from functools import wraps\n \n@@ -8,6 +13,15 @@\n \n \n def staff_required(function):\n+ \"\"\"\n+ This decorator can be used to make sure a view can only be retrieved by users who are either staff or superusers.\n+\n+ :param function: The view function which should be protected\n+ :type function: ~collections.abc.Callable\n+\n+ :return: The decorated function\n+ :rtype: ~collections.abc.Callable\n+ \"\"\"\n @wraps(function)\n def wrap(request, *args, **kwargs):\n user = request.user\n@@ -19,6 +33,15 @@\n \n \n def region_permission_required(function):\n+ \"\"\"\n+ This decorator can be used to make sure a view can only be retrieved by users of the requested region.\n+\n+ :param function: The view function which should be protected\n+ :type function: ~collections.abc.Callable\n+\n+ :return: The decorated function\n+ :rtype: ~collections.abc.Callable\n+ \"\"\"\n @wraps(function)\n def wrap(request, *args, **kwargs):\n user = request.user\n@@ -32,6 +55,15 @@\n return wrap\n \n def modify_mfa_authenticated(function):\n+ \"\"\"\n+ This decorator can be used to make sure a user can only modify his 2FA settings when he has a valid 2FA session.\n+\n+ :param function: The view function which should be protected\n+ :type function: ~collections.abc.Callable\n+\n+ :return: The decorated function\n+ :rtype: ~collections.abc.Callable\n+ \"\"\"\n @wraps(function)\n def wrap(request, *args, **kwargs):\n if not 'modify_mfa_authentication_time' in request.session or request.session['modify_mfa_authentication_time'] < (time.time() - 5 * 60):\n", "issue": "Improve documentation of view decorators\nExplain what our view decorators are doing and add docstrings of the following format:\r\n```\r\n\"\"\"\r\n[Summary]\r\n\r\n:param [ParamName]: [ParamDescription], defaults to [DefaultParamVal]\r\n:type [ParamName]: [ParamType](, optional)\r\n...\r\n:raises [ErrorType]: [ErrorDescription]\r\n...\r\n:return: [ReturnDescription]\r\n:rtype: [ReturnType]\r\n\"\"\"\r\n```\nImprove documentation of view decorators\nExplain what our view decorators are doing and add docstrings of the following format:\r\n```\r\n\"\"\"\r\n[Summary]\r\n\r\n:param [ParamName]: [ParamDescription], defaults to [DefaultParamVal]\r\n:type [ParamName]: [ParamType](, optional)\r\n...\r\n:raises [ErrorType]: [ErrorDescription]\r\n...\r\n:return: [ReturnDescription]\r\n:rtype: [ReturnType]\r\n\"\"\"\r\n```\n", "before_files": [{"content": "import time\nfrom functools import wraps\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import redirect\n\nfrom .models import Region\n\n\ndef staff_required(function):\n @wraps(function)\n def wrap(request, *args, **kwargs):\n user = request.user\n # superusers and staff have access to this areas\n if user.is_superuser or user.is_staff:\n return function(request, *args, **kwargs)\n raise PermissionDenied\n return wrap\n\n\ndef region_permission_required(function):\n @wraps(function)\n def wrap(request, *args, **kwargs):\n user = request.user\n # superusers and staff have permissions for all regions\n if user.is_superuser or user.is_staff:\n return function(request, *args, **kwargs)\n region = Region.get_current_region(request)\n if region in user.profile.regions.all():\n return function(request, *args, **kwargs)\n raise PermissionDenied\n return wrap\n\ndef modify_mfa_authenticated(function):\n @wraps(function)\n def wrap(request, *args, **kwargs):\n if not 'modify_mfa_authentication_time' in request.session or request.session['modify_mfa_authentication_time'] < (time.time() - 5 * 60):\n request.session['mfa_redirect_url'] = request.path\n return redirect('user_settings_auth_modify_mfa')\n return function(request, *args, **kwargs)\n return wrap\n", "path": "src/cms/decorators.py"}]} | 1,088 | 503 |
gh_patches_debug_15148 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-1977 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FIX #565 (Import for apollo tracing extension requires telemetry)
## Description
FIX #565
## Types of Changes
<!--- What types of changes does your pull request introduce? Put an `x` in all the boxes that apply. -->
- [ ] Core
- [x] Bugfix
- [ ] New feature
- [ ] Enhancement/optimization
- [ ] Documentation
## Issues Fixed or Closed by This PR
* #565
## Checklist
<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->
<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
- [x] My code follows the code style of this project.
- [ ] My change requires a change to the documentation.
- [ ] I have updated the documentation accordingly.
- [x] I have read the CONTRIBUTING document.
- [ ] I have added tests to cover my changes.
- [x] I have tested the changes and verified that they work and don't break anything (as well as I can manage).
</issue>
<code>
[start of strawberry/extensions/tracing/__init__.py]
1 from .apollo import ApolloTracingExtension, ApolloTracingExtensionSync # noqa
2 from .opentelemetry import OpenTelemetryExtension, OpenTelemetryExtensionSync # noqa
3
[end of strawberry/extensions/tracing/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/extensions/tracing/__init__.py b/strawberry/extensions/tracing/__init__.py
--- a/strawberry/extensions/tracing/__init__.py
+++ b/strawberry/extensions/tracing/__init__.py
@@ -1,2 +1,27 @@
-from .apollo import ApolloTracingExtension, ApolloTracingExtensionSync # noqa
-from .opentelemetry import OpenTelemetryExtension, OpenTelemetryExtensionSync # noqa
+import importlib
+from typing import TYPE_CHECKING
+
+
+if TYPE_CHECKING:
+ from .apollo import ApolloTracingExtension, ApolloTracingExtensionSync # noqa
+ from .opentelemetry import ( # noqa
+ OpenTelemetryExtension,
+ OpenTelemetryExtensionSync,
+ )
+
+__all__ = [
+ "ApolloTracingExtension",
+ "ApolloTracingExtensionSync",
+ "OpenTelemetryExtension",
+ "OpenTelemetryExtensionSync",
+]
+
+
+def __getattr__(name: str):
+ if name in {"ApolloTracingExtension", "ApolloTracingExtensionSync"}:
+ return getattr(importlib.import_module(".apollo", __name__), name)
+
+ if name in {"OpenTelemetryExtension", "OpenTelemetryExtensionSync"}:
+ return getattr(importlib.import_module(".opentelemetry", __name__), name)
+
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
| {"golden_diff": "diff --git a/strawberry/extensions/tracing/__init__.py b/strawberry/extensions/tracing/__init__.py\n--- a/strawberry/extensions/tracing/__init__.py\n+++ b/strawberry/extensions/tracing/__init__.py\n@@ -1,2 +1,27 @@\n-from .apollo import ApolloTracingExtension, ApolloTracingExtensionSync # noqa\n-from .opentelemetry import OpenTelemetryExtension, OpenTelemetryExtensionSync # noqa\n+import importlib\n+from typing import TYPE_CHECKING\n+\n+\n+if TYPE_CHECKING:\n+ from .apollo import ApolloTracingExtension, ApolloTracingExtensionSync # noqa\n+ from .opentelemetry import ( # noqa\n+ OpenTelemetryExtension,\n+ OpenTelemetryExtensionSync,\n+ )\n+\n+__all__ = [\n+ \"ApolloTracingExtension\",\n+ \"ApolloTracingExtensionSync\",\n+ \"OpenTelemetryExtension\",\n+ \"OpenTelemetryExtensionSync\",\n+]\n+\n+\n+def __getattr__(name: str):\n+ if name in {\"ApolloTracingExtension\", \"ApolloTracingExtensionSync\"}:\n+ return getattr(importlib.import_module(\".apollo\", __name__), name)\n+\n+ if name in {\"OpenTelemetryExtension\", \"OpenTelemetryExtensionSync\"}:\n+ return getattr(importlib.import_module(\".opentelemetry\", __name__), name)\n+\n+ raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n", "issue": "FIX #565 (Import for apollo tracing extension requires telemetry)\n## Description\r\n\r\nFIX #565 \r\n\r\n## Types of Changes\r\n\r\n<!--- What types of changes does your pull request introduce? Put an `x` in all the boxes that apply. -->\r\n- [ ] Core\r\n- [x] Bugfix\r\n- [ ] New feature\r\n- [ ] Enhancement/optimization\r\n- [ ] Documentation\r\n\r\n## Issues Fixed or Closed by This PR\r\n\r\n* #565 \r\n\r\n## Checklist\r\n\r\n<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->\r\n<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->\r\n- [x] My code follows the code style of this project.\r\n- [ ] My change requires a change to the documentation.\r\n- [ ] I have updated the documentation accordingly.\r\n- [x] I have read the CONTRIBUTING document.\r\n- [ ] I have added tests to cover my changes.\r\n- [x] I have tested the changes and verified that they work and don't break anything (as well as I can manage).\r\n\n", "before_files": [{"content": "from .apollo import ApolloTracingExtension, ApolloTracingExtensionSync # noqa\nfrom .opentelemetry import OpenTelemetryExtension, OpenTelemetryExtensionSync # noqa\n", "path": "strawberry/extensions/tracing/__init__.py"}]} | 822 | 328 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.