problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_10157 | rasdani/github-patches | git_diff | huggingface__transformers-193 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py error
attributeError: 'BertForPreTraining' object has no attribute 'global_step'
</issue>
<code>
[start of pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py]
1 # coding=utf-8
2 # Copyright 2018 The HugginFace Inc. team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Convert BERT checkpoint."""
16
17 from __future__ import absolute_import
18 from __future__ import division
19 from __future__ import print_function
20
21 import os
22 import re
23 import argparse
24 import tensorflow as tf
25 import torch
26 import numpy as np
27
28 from .modeling import BertConfig, BertForPreTraining
29
30 def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
31 config_path = os.path.abspath(bert_config_file)
32 tf_path = os.path.abspath(tf_checkpoint_path)
33 print("Converting TensorFlow checkpoint from {} with config at {}".format(tf_path, config_path))
34 # Load weights from TF model
35 init_vars = tf.train.list_variables(tf_path)
36 names = []
37 arrays = []
38 for name, shape in init_vars:
39 print("Loading TF weight {} with shape {}".format(name, shape))
40 array = tf.train.load_variable(tf_path, name)
41 names.append(name)
42 arrays.append(array)
43
44 # Initialise PyTorch model
45 config = BertConfig.from_json_file(bert_config_file)
46 print("Building PyTorch model from configuration: {}".format(str(config)))
47 model = BertForPreTraining(config)
48
49 for name, array in zip(names, arrays):
50 name = name.split('/')
51 # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
52 # which are not required for using pretrained model
53 if any(n in ["adam_v", "adam_m"] for n in name):
54 print("Skipping {}".format("/".join(name)))
55 continue
56 pointer = model
57 for m_name in name:
58 if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
59 l = re.split(r'_(\d+)', m_name)
60 else:
61 l = [m_name]
62 if l[0] == 'kernel' or l[0] == 'gamma':
63 pointer = getattr(pointer, 'weight')
64 elif l[0] == 'output_bias' or l[0] == 'beta':
65 pointer = getattr(pointer, 'bias')
66 elif l[0] == 'output_weights':
67 pointer = getattr(pointer, 'weight')
68 else:
69 pointer = getattr(pointer, l[0])
70 if len(l) >= 2:
71 num = int(l[1])
72 pointer = pointer[num]
73 if m_name[-11:] == '_embeddings':
74 pointer = getattr(pointer, 'weight')
75 elif m_name == 'kernel':
76 array = np.transpose(array)
77 try:
78 assert pointer.shape == array.shape
79 except AssertionError as e:
80 e.args += (pointer.shape, array.shape)
81 raise
82 print("Initialize PyTorch weight {}".format(name))
83 pointer.data = torch.from_numpy(array)
84
85 # Save pytorch-model
86 print("Save PyTorch model to {}".format(pytorch_dump_path))
87 torch.save(model.state_dict(), pytorch_dump_path)
88
89
90 if __name__ == "__main__":
91 parser = argparse.ArgumentParser()
92 ## Required parameters
93 parser.add_argument("--tf_checkpoint_path",
94 default = None,
95 type = str,
96 required = True,
97 help = "Path the TensorFlow checkpoint path.")
98 parser.add_argument("--bert_config_file",
99 default = None,
100 type = str,
101 required = True,
102 help = "The config json file corresponding to the pre-trained BERT model. \n"
103 "This specifies the model architecture.")
104 parser.add_argument("--pytorch_dump_path",
105 default = None,
106 type = str,
107 required = True,
108 help = "Path to the output PyTorch model.")
109 args = parser.parse_args()
110 convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path,
111 args.bert_config_file,
112 args.pytorch_dump_path)
113
[end of pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py b/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py
--- a/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py
+++ b/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py
@@ -50,7 +50,7 @@
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
- if any(n in ["adam_v", "adam_m"] for n in name):
+ if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
| {"golden_diff": "diff --git a/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py b/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py\n--- a/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py\n+++ b/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py\n@@ -50,7 +50,7 @@\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n- if any(n in [\"adam_v\", \"adam_m\"] for n in name):\n+ if any(n in [\"adam_v\", \"adam_m\", \"global_step\"] for n in name):\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n", "issue": "pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py error\nattributeError: 'BertForPreTraining' object has no attribute 'global_step'\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2018 The HugginFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Convert BERT checkpoint.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport argparse\nimport tensorflow as tf\nimport torch\nimport numpy as np\n\nfrom .modeling import BertConfig, BertForPreTraining\n\ndef convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):\n config_path = os.path.abspath(bert_config_file)\n tf_path = os.path.abspath(tf_checkpoint_path)\n print(\"Converting TensorFlow checkpoint from {} with config at {}\".format(tf_path, config_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n print(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n # Initialise PyTorch model\n config = BertConfig.from_json_file(bert_config_file)\n print(\"Building PyTorch model from configuration: {}\".format(str(config)))\n model = BertForPreTraining(config)\n\n for name, array in zip(names, arrays):\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\"] for n in name):\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n else:\n pointer = getattr(pointer, l[0])\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n\n # Save pytorch-model\n print(\"Save PyTorch model to {}\".format(pytorch_dump_path))\n torch.save(model.state_dict(), pytorch_dump_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n ## Required parameters\n parser.add_argument(\"--tf_checkpoint_path\",\n default = None,\n type = str,\n required = True,\n help = \"Path the TensorFlow checkpoint path.\")\n parser.add_argument(\"--bert_config_file\",\n default = None,\n type = str,\n required = True,\n help = \"The config json file corresponding to the pre-trained BERT model. \\n\"\n \"This specifies the model architecture.\")\n parser.add_argument(\"--pytorch_dump_path\",\n default = None,\n type = str,\n required = True,\n help = \"Path to the output PyTorch model.\")\n args = parser.parse_args()\n convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path,\n args.bert_config_file,\n args.pytorch_dump_path)\n", "path": "pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py"}]} | 1,766 | 181 |
gh_patches_debug_57079 | rasdani/github-patches | git_diff | searx__searx-672 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Infinite scroll: answer are repeated on each page
How to reproduce : search for ["user agent"](https://searx.me/?q=user+agent) with Infinite scroll activated.
Should the answer be disabled except the first page ? or should Infinite Scroll hide the answer ?
I vote for the first option : disabled answers except on the first page on the server side.
</issue>
<code>
[start of searx/plugins/self_info.py]
1 '''
2 searx is free software: you can redistribute it and/or modify
3 it under the terms of the GNU Affero General Public License as published by
4 the Free Software Foundation, either version 3 of the License, or
5 (at your option) any later version.
6
7 searx is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU Affero General Public License for more details.
11
12 You should have received a copy of the GNU Affero General Public License
13 along with searx. If not, see < http://www.gnu.org/licenses/ >.
14
15 (C) 2015 by Adam Tauber, <[email protected]>
16 '''
17 from flask_babel import gettext
18 import re
19 name = "Self Informations"
20 description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".')
21 default_on = True
22
23
24 # Self User Agent regex
25 p = re.compile('.*user[ -]agent.*', re.IGNORECASE)
26
27
28 # attach callback to the post search hook
29 # request: flask request object
30 # ctx: the whole local context of the pre search hook
31 def post_search(request, ctx):
32 if ctx['search'].query == 'ip':
33 x_forwarded_for = request.headers.getlist("X-Forwarded-For")
34 if x_forwarded_for:
35 ip = x_forwarded_for[0]
36 else:
37 ip = request.remote_addr
38 ctx['search'].result_container.answers.clear()
39 ctx['search'].result_container.answers.add(ip)
40 elif p.match(ctx['search'].query):
41 ua = request.user_agent
42 ctx['search'].result_container.answers.clear()
43 ctx['search'].result_container.answers.add(ua)
44 return True
45
[end of searx/plugins/self_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py
--- a/searx/plugins/self_info.py
+++ b/searx/plugins/self_info.py
@@ -29,6 +29,8 @@
# request: flask request object
# ctx: the whole local context of the pre search hook
def post_search(request, ctx):
+ if ctx['search'].pageno > 1:
+ return True
if ctx['search'].query == 'ip':
x_forwarded_for = request.headers.getlist("X-Forwarded-For")
if x_forwarded_for:
| {"golden_diff": "diff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py\n--- a/searx/plugins/self_info.py\n+++ b/searx/plugins/self_info.py\n@@ -29,6 +29,8 @@\n # request: flask request object\n # ctx: the whole local context of the pre search hook\n def post_search(request, ctx):\n+ if ctx['search'].pageno > 1:\n+ return True\n if ctx['search'].query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n", "issue": "Infinite scroll: answer are repeated on each page\nHow to reproduce : search for [\"user agent\"](https://searx.me/?q=user+agent) with Infinite scroll activated.\n\nShould the answer be disabled except the first page ? or should Infinite Scroll hide the answer ?\n\nI vote for the first option : disabled answers except on the first page on the server side. \n\n", "before_files": [{"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n'''\nfrom flask_babel import gettext\nimport re\nname = \"Self Informations\"\ndescription = gettext('Displays your IP if the query is \"ip\" and your user agent if the query contains \"user agent\".')\ndefault_on = True\n\n\n# Self User Agent regex\np = re.compile('.*user[ -]agent.*', re.IGNORECASE)\n\n\n# attach callback to the post search hook\n# request: flask request object\n# ctx: the whole local context of the pre search hook\ndef post_search(request, ctx):\n if ctx['search'].query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n ip = x_forwarded_for[0]\n else:\n ip = request.remote_addr\n ctx['search'].result_container.answers.clear()\n ctx['search'].result_container.answers.add(ip)\n elif p.match(ctx['search'].query):\n ua = request.user_agent\n ctx['search'].result_container.answers.clear()\n ctx['search'].result_container.answers.add(ua)\n return True\n", "path": "searx/plugins/self_info.py"}]} | 1,092 | 136 |
gh_patches_debug_6064 | rasdani/github-patches | git_diff | benoitc__gunicorn-1441 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Supporting newest version of python
Gunicorn currently doesn't run tests with python3.6.
Since 3.6 is release and some of us are preparing to use it in production it would be great if gunicorn had confirmed support.
Also `setup.py` classifiers doesn't include 3.5 or 3.6.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 import os
7 import sys
8
9 from setuptools import setup, find_packages
10 from setuptools.command.test import test as TestCommand
11
12 from gunicorn import __version__
13
14
15 CLASSIFIERS = [
16 'Development Status :: 4 - Beta',
17 'Environment :: Other Environment',
18 'Intended Audience :: Developers',
19 'License :: OSI Approved :: MIT License',
20 'Operating System :: MacOS :: MacOS X',
21 'Operating System :: POSIX',
22 'Programming Language :: Python',
23 'Programming Language :: Python :: 2',
24 'Programming Language :: Python :: 2.6',
25 'Programming Language :: Python :: 2.7',
26 'Programming Language :: Python :: 3',
27 'Programming Language :: Python :: 3.2',
28 'Programming Language :: Python :: 3.3',
29 'Programming Language :: Python :: 3.4',
30 'Topic :: Internet',
31 'Topic :: Utilities',
32 'Topic :: Software Development :: Libraries :: Python Modules',
33 'Topic :: Internet :: WWW/HTTP',
34 'Topic :: Internet :: WWW/HTTP :: WSGI',
35 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
36 'Topic :: Internet :: WWW/HTTP :: Dynamic Content']
37
38 # read long description
39 with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
40 long_description = f.read()
41
42 # read dev requirements
43 fname = os.path.join(os.path.dirname(__file__), 'requirements_test.txt')
44 with open(fname) as f:
45 tests_require = [l.strip() for l in f.readlines()]
46
47 if sys.version_info[:2] < (3, 3):
48 tests_require.append('mock')
49 if sys.version_info[:2] < (2, 7):
50 tests_require.append('unittest2')
51
52 class PyTestCommand(TestCommand):
53 user_options = [
54 ("cov", None, "measure coverage")
55 ]
56
57 def initialize_options(self):
58 TestCommand.initialize_options(self)
59 self.cov = None
60
61 def finalize_options(self):
62 TestCommand.finalize_options(self)
63 self.test_args = ['tests']
64 if self.cov:
65 self.test_args += ['--cov', 'gunicorn']
66 self.test_suite = True
67
68 def run_tests(self):
69 import pytest
70 errno = pytest.main(self.test_args)
71 sys.exit(errno)
72
73 setup(
74 name='gunicorn',
75 version=__version__,
76
77 description='WSGI HTTP Server for UNIX',
78 long_description=long_description,
79 author='Benoit Chesneau',
80 author_email='[email protected]',
81 license='MIT',
82 url='http://gunicorn.org',
83
84 classifiers=CLASSIFIERS,
85 zip_safe=False,
86 packages=find_packages(exclude=['examples', 'tests']),
87 include_package_data=True,
88
89 tests_require=tests_require,
90 cmdclass={'test': PyTestCommand},
91
92 entry_points="""
93 [console_scripts]
94 gunicorn=gunicorn.app.wsgiapp:run
95 gunicorn_django=gunicorn.app.djangoapp:run
96 gunicorn_paster=gunicorn.app.pasterapp:run
97
98 [paste.server_runner]
99 main=gunicorn.app.pasterapp:paste_server
100 """
101 )
102
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,6 +27,8 @@
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
'Topic :: Internet',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,6 +27,8 @@\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n+ 'Programming Language :: Python :: 3.5',\n+ 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet',\n 'Topic :: Utilities',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n", "issue": "Supporting newest version of python\nGunicorn currently doesn't run tests with python3.6.\r\n\r\nSince 3.6 is release and some of us are preparing to use it in production it would be great if gunicorn had confirmed support.\r\n\r\nAlso `setup.py` classifiers doesn't include 3.5 or 3.6.\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\nfrom gunicorn import __version__\n\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Internet',\n 'Topic :: Utilities',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content']\n\n# read long description\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:\n long_description = f.read()\n\n# read dev requirements\nfname = os.path.join(os.path.dirname(__file__), 'requirements_test.txt')\nwith open(fname) as f:\n tests_require = [l.strip() for l in f.readlines()]\n\nif sys.version_info[:2] < (3, 3):\n tests_require.append('mock')\nif sys.version_info[:2] < (2, 7):\n tests_require.append('unittest2')\n\nclass PyTestCommand(TestCommand):\n user_options = [\n (\"cov\", None, \"measure coverage\")\n ]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.cov = None\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['tests']\n if self.cov:\n self.test_args += ['--cov', 'gunicorn']\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\nsetup(\n name='gunicorn',\n version=__version__,\n\n description='WSGI HTTP Server for UNIX',\n long_description=long_description,\n author='Benoit Chesneau',\n author_email='[email protected]',\n license='MIT',\n url='http://gunicorn.org',\n\n classifiers=CLASSIFIERS,\n zip_safe=False,\n packages=find_packages(exclude=['examples', 'tests']),\n include_package_data=True,\n\n tests_require=tests_require,\n cmdclass={'test': PyTestCommand},\n\n entry_points=\"\"\"\n [console_scripts]\n gunicorn=gunicorn.app.wsgiapp:run\n gunicorn_django=gunicorn.app.djangoapp:run\n gunicorn_paster=gunicorn.app.pasterapp:run\n\n [paste.server_runner]\n main=gunicorn.app.pasterapp:paste_server\n \"\"\"\n)\n", "path": "setup.py"}]} | 1,513 | 119 |
gh_patches_debug_20410 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-1692 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use `importlib.metadata` to get package version instead of `pkg_resources`
Now that we're using Python 3.8+, we should switch to using `importlib.metadata` to get our version at runtime in `plasmapy/__init__.py`. We're using `pkg_resources` right now, but that has a "[significant runtime cost](https://github.com/pypa/setuptools_scm/#retrieving-package-version-at-runtime)".
</issue>
<code>
[start of plasmapy/__init__.py]
1 """
2 Welcome to the `plasmapy` package, an open source community-developed Python
3 package for the plasma community. Documentation is available in the docstrings
4 and online at https://docs.plasmapy.org (accessible also using the
5 :func:`~plasmapy.online_help` function).
6 """
7 __all__ = [
8 "online_help",
9 "analysis",
10 "diagnostics",
11 "dispersion",
12 "formulary",
13 "particles",
14 "plasma",
15 "simulation",
16 "utils",
17 "__version__",
18 "__citation__",
19 ]
20
21 # Enforce Python version check during package import.
22 # This is the same check as the one at the top of setup.py
23 import sys
24
25 if sys.version_info < (3, 8): # coverage: ignore
26 raise ImportError("PlasmaPy does not support Python < 3.8")
27
28 # Packages may add whatever they like to this file, but
29 # should keep this content at the top.
30 # ----------------------------------------------------------------------------
31 import pkg_resources
32
33 from plasmapy import (
34 analysis,
35 diagnostics,
36 dispersion,
37 formulary,
38 particles,
39 plasma,
40 simulation,
41 utils,
42 )
43
44 # define version
45 try:
46 # this places a runtime dependency on setuptools
47 #
48 # note: if there's any distribution metadata in your source files, then this
49 # will find a version based on those files. Keep distribution metadata
50 # out of your repository unless you've intentionally installed the package
51 # as editable (e.g. `pip install -e {plasmapy_directory_root}`),
52 # but then __version__ will not be updated with each commit, it is
53 # frozen to the version at time of install.
54 #
55 #: PlasmaPy version string
56 __version__ = pkg_resources.get_distribution("plasmapy").version
57 except pkg_resources.DistributionNotFound:
58 # package is not installed
59 fallback_version = "unknown"
60 try:
61 # code most likely being used from source
62 # if setuptools_scm is installed then generate a version
63 from setuptools_scm import get_version
64
65 __version__ = get_version(
66 root="..", relative_to=__file__, fallback_version=fallback_version
67 )
68 del get_version
69 warn_add = "setuptools_scm failed to detect the version"
70 except ModuleNotFoundError:
71 # setuptools_scm is not installed
72 __version__ = fallback_version
73 warn_add = "setuptools_scm is not installed"
74
75 if __version__ == fallback_version:
76 from warnings import warn
77
78 warn(
79 f"plasmapy.__version__ not generated (set to 'unknown'), PlasmaPy is "
80 f"not an installed package and {warn_add}.",
81 RuntimeWarning,
82 )
83
84 del warn
85 del fallback_version, warn_add
86
87 # ----------------------------------------------------------------------------
88 #: PlasmaPy citation instructions
89 __citation__ = (
90 "Instructions on how to cite and acknowledge PlasmaPy are provided in the "
91 "online documentation at: http://docs.plasmapy.org/en/stable/about/citation.html"
92 )
93
94
95 def online_help(query: str):
96 """
97 Open a webpage containing a search page in `PlasmaPy's documentation`_,
98 or another page that contains relevant online help.
99
100 This function requires an active internet connection, and will open
101 the page in the default web browser.
102
103 Parameters
104 ----------
105 query : str
106 The search query.
107 """
108 import webbrowser
109
110 from urllib.parse import urlencode
111
112 url = (
113 "http://docs.plasmapy.org/en/stable/search.html?"
114 "{}&check_keywords=yes&area=default"
115 ).format(urlencode({"q": query}))
116
117 if query.lower() in ("unit", "units", "quantity", "quantities"):
118 url = "http://docs.astropy.org/en/stable/units/"
119
120 webbrowser.open(url)
121
122
123 del pkg_resources, sys
124
[end of plasmapy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py
--- a/plasmapy/__init__.py
+++ b/plasmapy/__init__.py
@@ -28,7 +28,7 @@
# Packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
-import pkg_resources
+from importlib.metadata import PackageNotFoundError, version
from plasmapy import (
analysis,
@@ -53,8 +53,8 @@
# frozen to the version at time of install.
#
#: PlasmaPy version string
- __version__ = pkg_resources.get_distribution("plasmapy").version
-except pkg_resources.DistributionNotFound:
+ __version__ = version("plasmapy")
+except PackageNotFoundError:
# package is not installed
fallback_version = "unknown"
try:
@@ -120,4 +120,4 @@
webbrowser.open(url)
-del pkg_resources, sys
+del sys
| {"golden_diff": "diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py\n--- a/plasmapy/__init__.py\n+++ b/plasmapy/__init__.py\n@@ -28,7 +28,7 @@\n # Packages may add whatever they like to this file, but\n # should keep this content at the top.\n # ----------------------------------------------------------------------------\n-import pkg_resources\n+from importlib.metadata import PackageNotFoundError, version\n \n from plasmapy import (\n analysis,\n@@ -53,8 +53,8 @@\n # frozen to the version at time of install.\n #\n #: PlasmaPy version string\n- __version__ = pkg_resources.get_distribution(\"plasmapy\").version\n-except pkg_resources.DistributionNotFound:\n+ __version__ = version(\"plasmapy\")\n+except PackageNotFoundError:\n # package is not installed\n fallback_version = \"unknown\"\n try:\n@@ -120,4 +120,4 @@\n webbrowser.open(url)\n \n \n-del pkg_resources, sys\n+del sys\n", "issue": "Use `importlib.metadata` to get package version instead of `pkg_resources`\nNow that we're using Python 3.8+, we should switch to using `importlib.metadata` to get our version at runtime in `plasmapy/__init__.py`. We're using `pkg_resources` right now, but that has a \"[significant runtime cost](https://github.com/pypa/setuptools_scm/#retrieving-package-version-at-runtime)\".\n", "before_files": [{"content": "\"\"\"\nWelcome to the `plasmapy` package, an open source community-developed Python\npackage for the plasma community. Documentation is available in the docstrings\nand online at https://docs.plasmapy.org (accessible also using the\n:func:`~plasmapy.online_help` function).\n\"\"\"\n__all__ = [\n \"online_help\",\n \"analysis\",\n \"diagnostics\",\n \"dispersion\",\n \"formulary\",\n \"particles\",\n \"plasma\",\n \"simulation\",\n \"utils\",\n \"__version__\",\n \"__citation__\",\n]\n\n# Enforce Python version check during package import.\n# This is the same check as the one at the top of setup.py\nimport sys\n\nif sys.version_info < (3, 8): # coverage: ignore\n raise ImportError(\"PlasmaPy does not support Python < 3.8\")\n\n# Packages may add whatever they like to this file, but\n# should keep this content at the top.\n# ----------------------------------------------------------------------------\nimport pkg_resources\n\nfrom plasmapy import (\n analysis,\n diagnostics,\n dispersion,\n formulary,\n particles,\n plasma,\n simulation,\n utils,\n)\n\n# define version\ntry:\n # this places a runtime dependency on setuptools\n #\n # note: if there's any distribution metadata in your source files, then this\n # will find a version based on those files. Keep distribution metadata\n # out of your repository unless you've intentionally installed the package\n # as editable (e.g. `pip install -e {plasmapy_directory_root}`),\n # but then __version__ will not be updated with each commit, it is\n # frozen to the version at time of install.\n #\n #: PlasmaPy version string\n __version__ = pkg_resources.get_distribution(\"plasmapy\").version\nexcept pkg_resources.DistributionNotFound:\n # package is not installed\n fallback_version = \"unknown\"\n try:\n # code most likely being used from source\n # if setuptools_scm is installed then generate a version\n from setuptools_scm import get_version\n\n __version__ = get_version(\n root=\"..\", relative_to=__file__, fallback_version=fallback_version\n )\n del get_version\n warn_add = \"setuptools_scm failed to detect the version\"\n except ModuleNotFoundError:\n # setuptools_scm is not installed\n __version__ = fallback_version\n warn_add = \"setuptools_scm is not installed\"\n\n if __version__ == fallback_version:\n from warnings import warn\n\n warn(\n f\"plasmapy.__version__ not generated (set to 'unknown'), PlasmaPy is \"\n f\"not an installed package and {warn_add}.\",\n RuntimeWarning,\n )\n\n del warn\n del fallback_version, warn_add\n\n# ----------------------------------------------------------------------------\n#: PlasmaPy citation instructions\n__citation__ = (\n \"Instructions on how to cite and acknowledge PlasmaPy are provided in the \"\n \"online documentation at: http://docs.plasmapy.org/en/stable/about/citation.html\"\n)\n\n\ndef online_help(query: str):\n \"\"\"\n Open a webpage containing a search page in `PlasmaPy's documentation`_,\n or another page that contains relevant online help.\n\n This function requires an active internet connection, and will open\n the page in the default web browser.\n\n Parameters\n ----------\n query : str\n The search query.\n \"\"\"\n import webbrowser\n\n from urllib.parse import urlencode\n\n url = (\n \"http://docs.plasmapy.org/en/stable/search.html?\"\n \"{}&check_keywords=yes&area=default\"\n ).format(urlencode({\"q\": query}))\n\n if query.lower() in (\"unit\", \"units\", \"quantity\", \"quantities\"):\n url = \"http://docs.astropy.org/en/stable/units/\"\n\n webbrowser.open(url)\n\n\ndel pkg_resources, sys\n", "path": "plasmapy/__init__.py"}]} | 1,749 | 231 |
gh_patches_debug_41922 | rasdani/github-patches | git_diff | spack__spack-851 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
spack can't bootstrap from release tarball
Spack release tarballs don't include `.git` in the top directory like a clone of the repo would. The bootstrap relies on this to bootrstrap a copy from github:
```
[jawestlu@master4-centos71 spack-0.8.17]$ ./bin/spack bootstrap /tmp/
==> Error: command '/bin/git --git-dir=/mnt/lustre/jawestlu/rpmbuild/BUILD/spack-0.8.17/.git config --get remote.origin.url' returned error code 1
[jawestlu@master4-centos71 spack-0.8.17]$ ls -la /mnt/lustre/jawestlu/rpmbuild/BUILD/spack-0.8.17/
total 52
drwxr-xr-x 6 jawestlu jawestlu 4096 Jan 13 15:21 .
drwxr-xr-x 14 jawestlu jawestlu 4096 Jan 13 15:16 ..
-rw-r--r-- 1 jawestlu jawestlu 106 Mar 24 2015 .gitignore
-rw-r--r-- 1 jawestlu jawestlu 20309 Mar 24 2015 LICENSE
-rw-r--r-- 1 jawestlu jawestlu 2894 Mar 24 2015 README.md
drwxr-xr-x 2 jawestlu jawestlu 4096 Mar 24 2015 bin
drwxr-xr-x 3 jawestlu jawestlu 4096 Mar 24 2015 lib
drwxr-xr-x 3 jawestlu jawestlu 4096 Mar 24 2015 share
drwxr-xr-x 3 jawestlu jawestlu 4096 Mar 24 2015 var
```
</issue>
<code>
[start of lib/spack/spack/cmd/bootstrap.py]
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the LICENSE file for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 import os
26 from subprocess import check_call
27
28 import llnl.util.tty as tty
29 from llnl.util.filesystem import join_path, mkdirp
30
31 import spack
32 from spack.util.executable import which
33
34 description = "Create a new installation of spack in another prefix"
35
36 def setup_parser(subparser):
37 subparser.add_argument('prefix', help="names of prefix where we should install spack")
38
39
40 def get_origin_url():
41 git_dir = join_path(spack.prefix, '.git')
42 git = which('git', required=True)
43 origin_url = git(
44 '--git-dir=%s' % git_dir, 'config', '--get', 'remote.origin.url',
45 output=str)
46 return origin_url.strip()
47
48
49 def bootstrap(parser, args):
50 origin_url = get_origin_url()
51 prefix = args.prefix
52
53 tty.msg("Fetching spack from origin: %s" % origin_url)
54
55 if os.path.isfile(prefix):
56 tty.die("There is already a file at %s" % prefix)
57
58 mkdirp(prefix)
59
60 if os.path.exists(join_path(prefix, '.git')):
61 tty.die("There already seems to be a git repository in %s" % prefix)
62
63 files_in_the_way = os.listdir(prefix)
64 if files_in_the_way:
65 tty.die("There are already files there! Delete these files before boostrapping spack.",
66 *files_in_the_way)
67
68 tty.msg("Installing:",
69 "%s/bin/spack" % prefix,
70 "%s/lib/spack/..." % prefix)
71
72 os.chdir(prefix)
73 git = which('git', required=True)
74 git('init', '--shared', '-q')
75 git('remote', 'add', 'origin', origin_url)
76 git('fetch', 'origin', 'master:refs/remotes/origin/master', '-n', '-q')
77 git('reset', '--hard', 'origin/master', '-q')
78
79 tty.msg("Successfully created a new spack in %s" % prefix,
80 "Run %s/bin/spack to use this installation." % prefix)
81
[end of lib/spack/spack/cmd/bootstrap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/spack/spack/cmd/bootstrap.py b/lib/spack/spack/cmd/bootstrap.py
--- a/lib/spack/spack/cmd/bootstrap.py
+++ b/lib/spack/spack/cmd/bootstrap.py
@@ -23,7 +23,6 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
-from subprocess import check_call
import llnl.util.tty as tty
from llnl.util.filesystem import join_path, mkdirp
@@ -31,26 +30,49 @@
import spack
from spack.util.executable import which
+_SPACK_UPSTREAM = 'https://github.com/llnl/spack'
+
description = "Create a new installation of spack in another prefix"
+
def setup_parser(subparser):
- subparser.add_argument('prefix', help="names of prefix where we should install spack")
+ subparser.add_argument(
+ '-r', '--remote', action='store', dest='remote',
+ help="name of the remote to bootstrap from", default='origin')
+ subparser.add_argument(
+ 'prefix',
+ help="names of prefix where we should install spack")
-def get_origin_url():
+def get_origin_info(remote):
git_dir = join_path(spack.prefix, '.git')
git = which('git', required=True)
- origin_url = git(
- '--git-dir=%s' % git_dir, 'config', '--get', 'remote.origin.url',
- output=str)
- return origin_url.strip()
+ try:
+ branch = git('symbolic-ref', '--short', 'HEAD', output=str)
+ except ProcessError:
+ branch = 'develop'
+ tty.warn('No branch found; using default branch: %s' % branch)
+ if remote == 'origin' and \
+ branch not in ('master', 'develop'):
+ branch = 'develop'
+ tty.warn('Unknown branch found; using default branch: %s' % branch)
+ try:
+ origin_url = git(
+ '--git-dir=%s' % git_dir,
+ 'config', '--get', 'remote.%s.url' % remote,
+ output=str)
+ except ProcessError:
+ origin_url = _SPACK_UPSTREAM
+ tty.warn('No git repository found; '
+ 'using default upstream URL: %s' % origin_url)
+ return (origin_url.strip(), branch.strip())
def bootstrap(parser, args):
- origin_url = get_origin_url()
+ origin_url, branch = get_origin_info(args.remote)
prefix = args.prefix
- tty.msg("Fetching spack from origin: %s" % origin_url)
+ tty.msg("Fetching spack from '%s': %s" % (args.remote, origin_url))
if os.path.isfile(prefix):
tty.die("There is already a file at %s" % prefix)
@@ -62,7 +84,8 @@
files_in_the_way = os.listdir(prefix)
if files_in_the_way:
- tty.die("There are already files there! Delete these files before boostrapping spack.",
+ tty.die("There are already files there! "
+ "Delete these files before boostrapping spack.",
*files_in_the_way)
tty.msg("Installing:",
@@ -73,8 +96,10 @@
git = which('git', required=True)
git('init', '--shared', '-q')
git('remote', 'add', 'origin', origin_url)
- git('fetch', 'origin', 'master:refs/remotes/origin/master', '-n', '-q')
- git('reset', '--hard', 'origin/master', '-q')
+ git('fetch', 'origin', '%s:refs/remotes/origin/%s' % (branch, branch),
+ '-n', '-q')
+ git('reset', '--hard', 'origin/%s' % branch, '-q')
+ git('checkout', '-B', branch, 'origin/%s' % branch, '-q')
tty.msg("Successfully created a new spack in %s" % prefix,
"Run %s/bin/spack to use this installation." % prefix)
| {"golden_diff": "diff --git a/lib/spack/spack/cmd/bootstrap.py b/lib/spack/spack/cmd/bootstrap.py\n--- a/lib/spack/spack/cmd/bootstrap.py\n+++ b/lib/spack/spack/cmd/bootstrap.py\n@@ -23,7 +23,6 @@\n # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n ##############################################################################\n import os\n-from subprocess import check_call\n \n import llnl.util.tty as tty\n from llnl.util.filesystem import join_path, mkdirp\n@@ -31,26 +30,49 @@\n import spack\n from spack.util.executable import which\n \n+_SPACK_UPSTREAM = 'https://github.com/llnl/spack'\n+\n description = \"Create a new installation of spack in another prefix\"\n \n+\n def setup_parser(subparser):\n- subparser.add_argument('prefix', help=\"names of prefix where we should install spack\")\n+ subparser.add_argument(\n+ '-r', '--remote', action='store', dest='remote',\n+ help=\"name of the remote to bootstrap from\", default='origin')\n+ subparser.add_argument(\n+ 'prefix',\n+ help=\"names of prefix where we should install spack\")\n \n \n-def get_origin_url():\n+def get_origin_info(remote):\n git_dir = join_path(spack.prefix, '.git')\n git = which('git', required=True)\n- origin_url = git(\n- '--git-dir=%s' % git_dir, 'config', '--get', 'remote.origin.url',\n- output=str)\n- return origin_url.strip()\n+ try:\n+ branch = git('symbolic-ref', '--short', 'HEAD', output=str)\n+ except ProcessError:\n+ branch = 'develop'\n+ tty.warn('No branch found; using default branch: %s' % branch)\n+ if remote == 'origin' and \\\n+ branch not in ('master', 'develop'):\n+ branch = 'develop'\n+ tty.warn('Unknown branch found; using default branch: %s' % branch)\n+ try:\n+ origin_url = git(\n+ '--git-dir=%s' % git_dir,\n+ 'config', '--get', 'remote.%s.url' % remote,\n+ output=str)\n+ except ProcessError:\n+ origin_url = _SPACK_UPSTREAM\n+ tty.warn('No git repository found; '\n+ 'using default upstream URL: %s' % origin_url)\n+ return (origin_url.strip(), branch.strip())\n \n \n def bootstrap(parser, args):\n- origin_url = get_origin_url()\n+ origin_url, branch = get_origin_info(args.remote)\n prefix = args.prefix\n \n- tty.msg(\"Fetching spack from origin: %s\" % origin_url)\n+ tty.msg(\"Fetching spack from '%s': %s\" % (args.remote, origin_url))\n \n if os.path.isfile(prefix):\n tty.die(\"There is already a file at %s\" % prefix)\n@@ -62,7 +84,8 @@\n \n files_in_the_way = os.listdir(prefix)\n if files_in_the_way:\n- tty.die(\"There are already files there! Delete these files before boostrapping spack.\",\n+ tty.die(\"There are already files there! \"\n+ \"Delete these files before boostrapping spack.\",\n *files_in_the_way)\n \n tty.msg(\"Installing:\",\n@@ -73,8 +96,10 @@\n git = which('git', required=True)\n git('init', '--shared', '-q')\n git('remote', 'add', 'origin', origin_url)\n- git('fetch', 'origin', 'master:refs/remotes/origin/master', '-n', '-q')\n- git('reset', '--hard', 'origin/master', '-q')\n+ git('fetch', 'origin', '%s:refs/remotes/origin/%s' % (branch, branch),\n+ '-n', '-q')\n+ git('reset', '--hard', 'origin/%s' % branch, '-q')\n+ git('checkout', '-B', branch, 'origin/%s' % branch, '-q')\n \n tty.msg(\"Successfully created a new spack in %s\" % prefix,\n \"Run %s/bin/spack to use this installation.\" % prefix)\n", "issue": "spack can't bootstrap from release tarball\nSpack release tarballs don't include `.git` in the top directory like a clone of the repo would. The bootstrap relies on this to bootrstrap a copy from github:\n\n```\n[jawestlu@master4-centos71 spack-0.8.17]$ ./bin/spack bootstrap /tmp/\n==> Error: command '/bin/git --git-dir=/mnt/lustre/jawestlu/rpmbuild/BUILD/spack-0.8.17/.git config --get remote.origin.url' returned error code 1\n[jawestlu@master4-centos71 spack-0.8.17]$ ls -la /mnt/lustre/jawestlu/rpmbuild/BUILD/spack-0.8.17/\ntotal 52\ndrwxr-xr-x 6 jawestlu jawestlu 4096 Jan 13 15:21 .\ndrwxr-xr-x 14 jawestlu jawestlu 4096 Jan 13 15:16 ..\n-rw-r--r-- 1 jawestlu jawestlu 106 Mar 24 2015 .gitignore\n-rw-r--r-- 1 jawestlu jawestlu 20309 Mar 24 2015 LICENSE\n-rw-r--r-- 1 jawestlu jawestlu 2894 Mar 24 2015 README.md\ndrwxr-xr-x 2 jawestlu jawestlu 4096 Mar 24 2015 bin\ndrwxr-xr-x 3 jawestlu jawestlu 4096 Mar 24 2015 lib\ndrwxr-xr-x 3 jawestlu jawestlu 4096 Mar 24 2015 share\ndrwxr-xr-x 3 jawestlu jawestlu 4096 Mar 24 2015 var\n```\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nimport os\nfrom subprocess import check_call\n\nimport llnl.util.tty as tty\nfrom llnl.util.filesystem import join_path, mkdirp\n\nimport spack\nfrom spack.util.executable import which\n\ndescription = \"Create a new installation of spack in another prefix\"\n\ndef setup_parser(subparser):\n subparser.add_argument('prefix', help=\"names of prefix where we should install spack\")\n\n\ndef get_origin_url():\n git_dir = join_path(spack.prefix, '.git')\n git = which('git', required=True)\n origin_url = git(\n '--git-dir=%s' % git_dir, 'config', '--get', 'remote.origin.url',\n output=str)\n return origin_url.strip()\n\n\ndef bootstrap(parser, args):\n origin_url = get_origin_url()\n prefix = args.prefix\n\n tty.msg(\"Fetching spack from origin: %s\" % origin_url)\n\n if os.path.isfile(prefix):\n tty.die(\"There is already a file at %s\" % prefix)\n\n mkdirp(prefix)\n\n if os.path.exists(join_path(prefix, '.git')):\n tty.die(\"There already seems to be a git repository in %s\" % prefix)\n\n files_in_the_way = os.listdir(prefix)\n if files_in_the_way:\n tty.die(\"There are already files there! Delete these files before boostrapping spack.\",\n *files_in_the_way)\n\n tty.msg(\"Installing:\",\n \"%s/bin/spack\" % prefix,\n \"%s/lib/spack/...\" % prefix)\n\n os.chdir(prefix)\n git = which('git', required=True)\n git('init', '--shared', '-q')\n git('remote', 'add', 'origin', origin_url)\n git('fetch', 'origin', 'master:refs/remotes/origin/master', '-n', '-q')\n git('reset', '--hard', 'origin/master', '-q')\n\n tty.msg(\"Successfully created a new spack in %s\" % prefix,\n \"Run %s/bin/spack to use this installation.\" % prefix)\n", "path": "lib/spack/spack/cmd/bootstrap.py"}]} | 1,910 | 950 |
gh_patches_debug_15365 | rasdani/github-patches | git_diff | uclapi__uclapi-1028 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Medium Articles Bug
Bug in getting medium articles on the homepage
</issue>
<code>
[start of backend/uclapi/dashboard/app_helpers.py]
1 from binascii import hexlify
2 from random import SystemRandom
3
4 from common.helpers import generate_api_token
5 from uclapi.settings import (
6 MEDIUM_ARTICLE_QUANTITY,
7 REDIS_UCLAPI_HOST
8 )
9
10 import os
11 import redis
12 import textwrap
13 import validators
14
15
16 def get_articles():
17 r = redis.Redis(host=REDIS_UCLAPI_HOST)
18 pipe = r.pipeline()
19 articles = []
20 for i in range(0, MEDIUM_ARTICLE_QUANTITY):
21 articles.append({})
22 redis_key_url = "Blog:item:{}:url".format(i)
23 redis_key_title = "Blog:item:{}:title".format(i)
24 pipe.get(redis_key_url)
25 pipe.get(redis_key_title)
26 redis_response = pipe.execute()
27 for i in range(0, MEDIUM_ARTICLE_QUANTITY):
28 articles[i]['url'] = redis_response[i*2].decode("utf-8")
29 articles[i]['title'] = redis_response[i*2+1].decode("utf-8")
30 return articles
31
32
33 def generate_temp_api_token():
34 return generate_api_token("temp")
35
36
37 def get_temp_token():
38 r = redis.Redis(host=REDIS_UCLAPI_HOST)
39
40 token = generate_temp_api_token()
41 # We initialise a new temporary token and set it to 1
42 # as it is generated at its first usage.
43 r.set(token, 1, 600)
44 return token
45
46
47 def generate_app_id():
48 key = hexlify(os.urandom(5)).decode()
49 final = "A" + key
50
51 return final
52
53
54 def generate_app_client_id():
55 sr = SystemRandom()
56
57 client_id = '{}.{}'.format(
58 ''.join(str(sr.randint(0, 9)) for _ in range(16)),
59 ''.join(str(sr.randint(0, 9)) for _ in range(16))
60 )
61
62 return client_id
63
64
65 def generate_app_client_secret():
66 client_secret = hexlify(os.urandom(32)).decode()
67 return client_secret
68
69
70 def is_url_safe(url):
71 if not url.startswith("https://"):
72 return False
73
74 if not validators.url(url, public=True):
75 return False
76
77 whitelist_urls = os.environ["WHITELISTED_CALLBACK_URLS"].split(';')
78 if url in whitelist_urls:
79 return True
80
81 forbidden_urls = os.environ["FORBIDDEN_CALLBACK_URLS"].split(';')
82 for furl in forbidden_urls:
83 if furl in url:
84 return False
85
86 return True
87
88
89 def generate_secret():
90 key = hexlify(os.urandom(30)).decode()
91 dashed = '-'.join(textwrap.wrap(key, 15))
92
93 return dashed
94
[end of backend/uclapi/dashboard/app_helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/uclapi/dashboard/app_helpers.py b/backend/uclapi/dashboard/app_helpers.py
--- a/backend/uclapi/dashboard/app_helpers.py
+++ b/backend/uclapi/dashboard/app_helpers.py
@@ -4,9 +4,10 @@
from common.helpers import generate_api_token
from uclapi.settings import (
MEDIUM_ARTICLE_QUANTITY,
- REDIS_UCLAPI_HOST
+ REDIS_UCLAPI_HOST,
+ DEBUG
)
-
+from django.core.management import call_command
import os
import redis
import textwrap
@@ -15,6 +16,11 @@
def get_articles():
r = redis.Redis(host=REDIS_UCLAPI_HOST)
+ if not r.exists("Blog:item:1:url"):
+ if DEBUG:
+ call_command('update_medium')
+ else:
+ return []
pipe = r.pipeline()
articles = []
for i in range(0, MEDIUM_ARTICLE_QUANTITY):
| {"golden_diff": "diff --git a/backend/uclapi/dashboard/app_helpers.py b/backend/uclapi/dashboard/app_helpers.py\n--- a/backend/uclapi/dashboard/app_helpers.py\n+++ b/backend/uclapi/dashboard/app_helpers.py\n@@ -4,9 +4,10 @@\n from common.helpers import generate_api_token\n from uclapi.settings import (\n MEDIUM_ARTICLE_QUANTITY,\n- REDIS_UCLAPI_HOST\n+ REDIS_UCLAPI_HOST,\n+ DEBUG\n )\n-\n+from django.core.management import call_command\n import os\n import redis\n import textwrap\n@@ -15,6 +16,11 @@\n \n def get_articles():\n r = redis.Redis(host=REDIS_UCLAPI_HOST)\n+ if not r.exists(\"Blog:item:1:url\"):\n+ if DEBUG:\n+ call_command('update_medium')\n+ else:\n+ return []\n pipe = r.pipeline()\n articles = []\n for i in range(0, MEDIUM_ARTICLE_QUANTITY):\n", "issue": "Medium Articles Bug\nBug in getting medium articles on the homepage\n", "before_files": [{"content": "from binascii import hexlify\nfrom random import SystemRandom\n\nfrom common.helpers import generate_api_token\nfrom uclapi.settings import (\n MEDIUM_ARTICLE_QUANTITY,\n REDIS_UCLAPI_HOST\n)\n\nimport os\nimport redis\nimport textwrap\nimport validators\n\n\ndef get_articles():\n r = redis.Redis(host=REDIS_UCLAPI_HOST)\n pipe = r.pipeline()\n articles = []\n for i in range(0, MEDIUM_ARTICLE_QUANTITY):\n articles.append({})\n redis_key_url = \"Blog:item:{}:url\".format(i)\n redis_key_title = \"Blog:item:{}:title\".format(i)\n pipe.get(redis_key_url)\n pipe.get(redis_key_title)\n redis_response = pipe.execute()\n for i in range(0, MEDIUM_ARTICLE_QUANTITY):\n articles[i]['url'] = redis_response[i*2].decode(\"utf-8\")\n articles[i]['title'] = redis_response[i*2+1].decode(\"utf-8\")\n return articles\n\n\ndef generate_temp_api_token():\n return generate_api_token(\"temp\")\n\n\ndef get_temp_token():\n r = redis.Redis(host=REDIS_UCLAPI_HOST)\n\n token = generate_temp_api_token()\n # We initialise a new temporary token and set it to 1\n # as it is generated at its first usage.\n r.set(token, 1, 600)\n return token\n\n\ndef generate_app_id():\n key = hexlify(os.urandom(5)).decode()\n final = \"A\" + key\n\n return final\n\n\ndef generate_app_client_id():\n sr = SystemRandom()\n\n client_id = '{}.{}'.format(\n ''.join(str(sr.randint(0, 9)) for _ in range(16)),\n ''.join(str(sr.randint(0, 9)) for _ in range(16))\n )\n\n return client_id\n\n\ndef generate_app_client_secret():\n client_secret = hexlify(os.urandom(32)).decode()\n return client_secret\n\n\ndef is_url_safe(url):\n if not url.startswith(\"https://\"):\n return False\n\n if not validators.url(url, public=True):\n return False\n\n whitelist_urls = os.environ[\"WHITELISTED_CALLBACK_URLS\"].split(';')\n if url in whitelist_urls:\n return True\n\n forbidden_urls = os.environ[\"FORBIDDEN_CALLBACK_URLS\"].split(';')\n for furl in forbidden_urls:\n if furl in url:\n return False\n\n return True\n\n\ndef generate_secret():\n key = hexlify(os.urandom(30)).decode()\n dashed = '-'.join(textwrap.wrap(key, 15))\n\n return dashed\n", "path": "backend/uclapi/dashboard/app_helpers.py"}]} | 1,323 | 212 |
gh_patches_debug_1224 | rasdani/github-patches | git_diff | projectmesa__mesa-826 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Push new Mesa release
Wee are overdue for an official release. Before I push one, does anyone have anything that really want to try to get in or should I just tag and release?
Discuss.
</issue>
<code>
[start of mesa/__init__.py]
1 # -*- coding: utf-8 -*-
2 """
3 Mesa Agent-Based Modeling Framework
4
5 Core Objects: Model, and Agent.
6
7 """
8 import datetime
9
10 from .model import Model
11 from .agent import Agent
12
13
14 __all__ = ["Model", "Agent"]
15
16 __title__ = "mesa"
17 __version__ = "0.8.6"
18 __license__ = "Apache 2.0"
19 __copyright__ = "Copyright %s Project Mesa Team" % datetime.date.today().year
20
[end of mesa/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mesa/__init__.py b/mesa/__init__.py
--- a/mesa/__init__.py
+++ b/mesa/__init__.py
@@ -14,6 +14,6 @@
__all__ = ["Model", "Agent"]
__title__ = "mesa"
-__version__ = "0.8.6"
+__version__ = "0.8.7"
__license__ = "Apache 2.0"
__copyright__ = "Copyright %s Project Mesa Team" % datetime.date.today().year
| {"golden_diff": "diff --git a/mesa/__init__.py b/mesa/__init__.py\n--- a/mesa/__init__.py\n+++ b/mesa/__init__.py\n@@ -14,6 +14,6 @@\n __all__ = [\"Model\", \"Agent\"]\n \n __title__ = \"mesa\"\n-__version__ = \"0.8.6\"\n+__version__ = \"0.8.7\"\n __license__ = \"Apache 2.0\"\n __copyright__ = \"Copyright %s Project Mesa Team\" % datetime.date.today().year\n", "issue": "Push new Mesa release\nWee are overdue for an official release. Before I push one, does anyone have anything that really want to try to get in or should I just tag and release? \r\n\r\nDiscuss. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nMesa Agent-Based Modeling Framework\n\nCore Objects: Model, and Agent.\n\n\"\"\"\nimport datetime\n\nfrom .model import Model\nfrom .agent import Agent\n\n\n__all__ = [\"Model\", \"Agent\"]\n\n__title__ = \"mesa\"\n__version__ = \"0.8.6\"\n__license__ = \"Apache 2.0\"\n__copyright__ = \"Copyright %s Project Mesa Team\" % datetime.date.today().year\n", "path": "mesa/__init__.py"}]} | 714 | 122 |
gh_patches_debug_27345 | rasdani/github-patches | git_diff | internetarchive__openlibrary-5001 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Is there a way to limit the page-size of search API requests?
### Question
Is there a way to limit the page-size of search API requests?
The default Search-API page-size is 100 items: http://openlibrary.org/search.json?q=the+lord+of+the+rings
I would like to reduce the page-size (limit) for Search API calls, since the user can just 'page' through the results if he/she wants. Fetching more results also requires more processing on the client-side.
Side notes:
- The number is 20 for the search-inside API: http://openlibrary.org/search/inside.json?q=thanks%20for%20all%20the%20fish
- I think both default page-sizes should probably be the same (20 seems like a reasonable number to me).
- The Archive.org API has the "limit" parameter to do this.
Thanks!
</issue>
<code>
[start of openlibrary/plugins/inside/code.py]
1 from time import time
2
3 import json
4 import web
5
6 from infogami.utils import delegate
7 from infogami.utils.view import render_template
8
9 from openlibrary.core.fulltext import fulltext_search
10
11 RESULTS_PER_PAGE = 20
12
13
14 class search_inside(delegate.page):
15
16 path = '/search/inside'
17
18 def GET(self):
19 search_start = time() # should probably use a @timeit decorator
20 i = web.input(q='', page=1)
21 query = i.q
22 page = int(i.page)
23 results = fulltext_search(query, page=page, limit=RESULTS_PER_PAGE)
24 search_time = time() - search_start
25
26 return render_template('search/inside.tmpl', query, results, search_time,
27 page=page, results_per_page=RESULTS_PER_PAGE)
28 page.v2 = True # page is mobile-first
29 return page
30
31
32 class search_inside_json(delegate.page):
33 path = "/search/inside"
34 encoding = "json"
35
36 def GET(self):
37 i = web.input(q='', page=1, limit=RESULTS_PER_PAGE)
38 limit = min(i.limit, RESULTS_PER_PAGE) if i.limit else RESULTS_PER_PAGE
39 query = i.q
40 page = int(i.page)
41 results = fulltext_search(query, page=page, limit=limit, js=True)
42 web.header('Content-Type', 'application/json')
43 return delegate.RawText(json.dumps(results, indent=4))
44
[end of openlibrary/plugins/inside/code.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openlibrary/plugins/inside/code.py b/openlibrary/plugins/inside/code.py
--- a/openlibrary/plugins/inside/code.py
+++ b/openlibrary/plugins/inside/code.py
@@ -4,7 +4,7 @@
import web
from infogami.utils import delegate
-from infogami.utils.view import render_template
+from infogami.utils.view import render_template, safeint
from openlibrary.core.fulltext import fulltext_search
@@ -12,7 +12,6 @@
class search_inside(delegate.page):
-
path = '/search/inside'
def GET(self):
@@ -25,8 +24,6 @@
return render_template('search/inside.tmpl', query, results, search_time,
page=page, results_per_page=RESULTS_PER_PAGE)
- page.v2 = True # page is mobile-first
- return page
class search_inside_json(delegate.page):
@@ -35,7 +32,7 @@
def GET(self):
i = web.input(q='', page=1, limit=RESULTS_PER_PAGE)
- limit = min(i.limit, RESULTS_PER_PAGE) if i.limit else RESULTS_PER_PAGE
+ limit = min(safeint(i.limit, RESULTS_PER_PAGE), RESULTS_PER_PAGE)
query = i.q
page = int(i.page)
results = fulltext_search(query, page=page, limit=limit, js=True)
| {"golden_diff": "diff --git a/openlibrary/plugins/inside/code.py b/openlibrary/plugins/inside/code.py\n--- a/openlibrary/plugins/inside/code.py\n+++ b/openlibrary/plugins/inside/code.py\n@@ -4,7 +4,7 @@\n import web\n \n from infogami.utils import delegate\n-from infogami.utils.view import render_template\n+from infogami.utils.view import render_template, safeint\n \n from openlibrary.core.fulltext import fulltext_search\n \n@@ -12,7 +12,6 @@\n \n \n class search_inside(delegate.page):\n-\n path = '/search/inside'\n \n def GET(self):\n@@ -25,8 +24,6 @@\n \n return render_template('search/inside.tmpl', query, results, search_time,\n page=page, results_per_page=RESULTS_PER_PAGE)\n- page.v2 = True # page is mobile-first\n- return page\n \n \n class search_inside_json(delegate.page):\n@@ -35,7 +32,7 @@\n \n def GET(self):\n i = web.input(q='', page=1, limit=RESULTS_PER_PAGE)\n- limit = min(i.limit, RESULTS_PER_PAGE) if i.limit else RESULTS_PER_PAGE\n+ limit = min(safeint(i.limit, RESULTS_PER_PAGE), RESULTS_PER_PAGE)\n query = i.q\n page = int(i.page)\n results = fulltext_search(query, page=page, limit=limit, js=True)\n", "issue": "Is there a way to limit the page-size of search API requests?\n### Question\r\nIs there a way to limit the page-size of search API requests?\r\n\r\nThe default Search-API page-size is 100 items: http://openlibrary.org/search.json?q=the+lord+of+the+rings\r\n\r\nI would like to reduce the page-size (limit) for Search API calls, since the user can just 'page' through the results if he/she wants. Fetching more results also requires more processing on the client-side.\r\n\r\nSide notes:\r\n- The number is 20 for the search-inside API: http://openlibrary.org/search/inside.json?q=thanks%20for%20all%20the%20fish\r\n- I think both default page-sizes should probably be the same (20 seems like a reasonable number to me).\r\n- The Archive.org API has the \"limit\" parameter to do this.\r\n\r\nThanks!\r\n\r\n\n", "before_files": [{"content": "from time import time\n\nimport json\nimport web\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import render_template\n\nfrom openlibrary.core.fulltext import fulltext_search\n\nRESULTS_PER_PAGE = 20\n\n\nclass search_inside(delegate.page):\n\n path = '/search/inside'\n\n def GET(self):\n search_start = time() # should probably use a @timeit decorator\n i = web.input(q='', page=1)\n query = i.q\n page = int(i.page)\n results = fulltext_search(query, page=page, limit=RESULTS_PER_PAGE)\n search_time = time() - search_start\n\n return render_template('search/inside.tmpl', query, results, search_time,\n page=page, results_per_page=RESULTS_PER_PAGE)\n page.v2 = True # page is mobile-first\n return page\n\n\nclass search_inside_json(delegate.page):\n path = \"/search/inside\"\n encoding = \"json\"\n\n def GET(self):\n i = web.input(q='', page=1, limit=RESULTS_PER_PAGE)\n limit = min(i.limit, RESULTS_PER_PAGE) if i.limit else RESULTS_PER_PAGE\n query = i.q\n page = int(i.page)\n results = fulltext_search(query, page=page, limit=limit, js=True)\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(results, indent=4))\n", "path": "openlibrary/plugins/inside/code.py"}]} | 1,131 | 308 |
gh_patches_debug_20619 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-1348 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`merge_type` `types` type hint
https://github.com/strawberry-graphql/strawberry/blob/main/strawberry/tools/merge_types.py#L9
The current `Tuple[Type]` produces:
```
*.py:15:5: error: Argument 2 to "merge_types" has incompatible type "Tuple[Type[QueryA], Type[QueryB], Type[QueryC]]"; expected "Tuple[Type[Any]]" [arg-type]
```
According to [mypy](https://mypy.readthedocs.io/en/stable/kinds_of_types.html#tuple-types), we should either change it to `Tuple[Type, ...]` or follow mypy's suggestion and go with a generic `Sequence`.
</issue>
<code>
[start of strawberry/tools/merge_types.py]
1 import warnings
2 from collections import Counter
3 from itertools import chain
4 from typing import Tuple, Type
5
6 import strawberry
7
8
9 def merge_types(name: str, types: Tuple[Type]) -> Type:
10 """Merge multiple Strawberry types into one
11
12 For example, given two queries `A` and `B`, one can merge them into a
13 super type as follows:
14
15 merge_types("SuperQuery", (B, A))
16
17 This is essentially the same as:
18
19 class SuperQuery(B, A):
20 ...
21 """
22
23 if not types:
24 raise ValueError("Can't merge types if none are supplied")
25
26 fields = chain(*(t._type_definition.fields for t in types))
27 counter = Counter(f.name for f in fields)
28 dupes = [f for f, c in counter.most_common() if c > 1]
29 if dupes:
30 warnings.warn("{} has overridden fields: {}".format(name, ", ".join(dupes)))
31
32 return strawberry.type(type(name, types, {}))
33
[end of strawberry/tools/merge_types.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/tools/merge_types.py b/strawberry/tools/merge_types.py
--- a/strawberry/tools/merge_types.py
+++ b/strawberry/tools/merge_types.py
@@ -1,12 +1,12 @@
import warnings
from collections import Counter
from itertools import chain
-from typing import Tuple, Type
+from typing import Tuple
import strawberry
-def merge_types(name: str, types: Tuple[Type]) -> Type:
+def merge_types(name: str, types: Tuple[type, ...]) -> type:
"""Merge multiple Strawberry types into one
For example, given two queries `A` and `B`, one can merge them into a
@@ -23,7 +23,9 @@
if not types:
raise ValueError("Can't merge types if none are supplied")
- fields = chain(*(t._type_definition.fields for t in types))
+ fields = chain(
+ *(t._type_definition.fields for t in types) # type: ignore[attr-defined]
+ )
counter = Counter(f.name for f in fields)
dupes = [f for f, c in counter.most_common() if c > 1]
if dupes:
| {"golden_diff": "diff --git a/strawberry/tools/merge_types.py b/strawberry/tools/merge_types.py\n--- a/strawberry/tools/merge_types.py\n+++ b/strawberry/tools/merge_types.py\n@@ -1,12 +1,12 @@\n import warnings\n from collections import Counter\n from itertools import chain\n-from typing import Tuple, Type\n+from typing import Tuple\n \n import strawberry\n \n \n-def merge_types(name: str, types: Tuple[Type]) -> Type:\n+def merge_types(name: str, types: Tuple[type, ...]) -> type:\n \"\"\"Merge multiple Strawberry types into one\n \n For example, given two queries `A` and `B`, one can merge them into a\n@@ -23,7 +23,9 @@\n if not types:\n raise ValueError(\"Can't merge types if none are supplied\")\n \n- fields = chain(*(t._type_definition.fields for t in types))\n+ fields = chain(\n+ *(t._type_definition.fields for t in types) # type: ignore[attr-defined]\n+ )\n counter = Counter(f.name for f in fields)\n dupes = [f for f, c in counter.most_common() if c > 1]\n if dupes:\n", "issue": "`merge_type` `types` type hint\nhttps://github.com/strawberry-graphql/strawberry/blob/main/strawberry/tools/merge_types.py#L9\r\n\r\nThe current `Tuple[Type]` produces:\r\n```\r\n*.py:15:5: error: Argument 2 to \"merge_types\" has incompatible type \"Tuple[Type[QueryA], Type[QueryB], Type[QueryC]]\"; expected \"Tuple[Type[Any]]\" [arg-type]\r\n```\r\n\r\nAccording to [mypy](https://mypy.readthedocs.io/en/stable/kinds_of_types.html#tuple-types), we should either change it to `Tuple[Type, ...]` or follow mypy's suggestion and go with a generic `Sequence`.\r\n\r\n\n", "before_files": [{"content": "import warnings\nfrom collections import Counter\nfrom itertools import chain\nfrom typing import Tuple, Type\n\nimport strawberry\n\n\ndef merge_types(name: str, types: Tuple[Type]) -> Type:\n \"\"\"Merge multiple Strawberry types into one\n\n For example, given two queries `A` and `B`, one can merge them into a\n super type as follows:\n\n merge_types(\"SuperQuery\", (B, A))\n\n This is essentially the same as:\n\n class SuperQuery(B, A):\n ...\n \"\"\"\n\n if not types:\n raise ValueError(\"Can't merge types if none are supplied\")\n\n fields = chain(*(t._type_definition.fields for t in types))\n counter = Counter(f.name for f in fields)\n dupes = [f for f, c in counter.most_common() if c > 1]\n if dupes:\n warnings.warn(\"{} has overridden fields: {}\".format(name, \", \".join(dupes)))\n\n return strawberry.type(type(name, types, {}))\n", "path": "strawberry/tools/merge_types.py"}]} | 976 | 270 |
gh_patches_debug_18567 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-945 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update and unify a number of metrics in `torchmetrics` docs
## 📚 Documentation
Before the next feature release, it'd be nice to update the number of implemented metrics and unify this number over all occurrences within the docs/pages.
**Additional context:** It looks like we've already had almost 80 metrics, so it'd be pity to underestimate these before another feature release O:]
```bash
$ grep -w docs/source/references/functional.rst -e "func" | wc -l
78
```
</issue>
<code>
[start of torchmetrics/__about__.py]
1 __version__ = "0.8.0dev"
2 __author__ = "PyTorchLightning et al."
3 __author_email__ = "[email protected]"
4 __license__ = "Apache-2.0"
5 __copyright__ = f"Copyright (c) 2020-2022, {__author__}."
6 __homepage__ = "https://github.com/PyTorchLightning/metrics"
7 __docs__ = "PyTorch native Metrics"
8 __docs_url__ = "https://torchmetrics.readthedocs.io/en/stable/"
9 __long_doc__ = """
10 Torchmetrics is a metrics API created for easy metric development and usage in both PyTorch and
11 [PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). It was originally a part of
12 Pytorch Lightning, but got split off so users could take advantage of the large collection of metrics
13 implemented without having to install Pytorch Lightning (even though we would love for you to try it out).
14 We currently have around 60+ metrics implemented and we continuously are adding more metrics, both within
15 already covered domains (classification, regression ect.) but also new domains (object detection ect.).
16 We make sure that all our metrics are rigorously tested such that you can trust them.
17 """
18
19 __all__ = [
20 "__author__",
21 "__author_email__",
22 "__copyright__",
23 "__docs__",
24 "__homepage__",
25 "__license__",
26 "__version__",
27 ]
28
[end of torchmetrics/__about__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchmetrics/__about__.py b/torchmetrics/__about__.py
--- a/torchmetrics/__about__.py
+++ b/torchmetrics/__about__.py
@@ -11,7 +11,7 @@
[PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). It was originally a part of
Pytorch Lightning, but got split off so users could take advantage of the large collection of metrics
implemented without having to install Pytorch Lightning (even though we would love for you to try it out).
-We currently have around 60+ metrics implemented and we continuously are adding more metrics, both within
+We currently have around 80+ metrics implemented and we continuously are adding more metrics, both within
already covered domains (classification, regression ect.) but also new domains (object detection ect.).
We make sure that all our metrics are rigorously tested such that you can trust them.
"""
| {"golden_diff": "diff --git a/torchmetrics/__about__.py b/torchmetrics/__about__.py\n--- a/torchmetrics/__about__.py\n+++ b/torchmetrics/__about__.py\n@@ -11,7 +11,7 @@\n [PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). It was originally a part of\n Pytorch Lightning, but got split off so users could take advantage of the large collection of metrics\n implemented without having to install Pytorch Lightning (even though we would love for you to try it out).\n-We currently have around 60+ metrics implemented and we continuously are adding more metrics, both within\n+We currently have around 80+ metrics implemented and we continuously are adding more metrics, both within\n already covered domains (classification, regression ect.) but also new domains (object detection ect.).\n We make sure that all our metrics are rigorously tested such that you can trust them.\n \"\"\"\n", "issue": "Update and unify a number of metrics in `torchmetrics` docs\n## \ud83d\udcda Documentation\r\n\r\nBefore the next feature release, it'd be nice to update the number of implemented metrics and unify this number over all occurrences within the docs/pages.\r\n\r\n**Additional context:** It looks like we've already had almost 80 metrics, so it'd be pity to underestimate these before another feature release O:]\r\n\r\n```bash\r\n$ grep -w docs/source/references/functional.rst -e \"func\" | wc -l\r\n 78\r\n```\r\n\n", "before_files": [{"content": "__version__ = \"0.8.0dev\"\n__author__ = \"PyTorchLightning et al.\"\n__author_email__ = \"[email protected]\"\n__license__ = \"Apache-2.0\"\n__copyright__ = f\"Copyright (c) 2020-2022, {__author__}.\"\n__homepage__ = \"https://github.com/PyTorchLightning/metrics\"\n__docs__ = \"PyTorch native Metrics\"\n__docs_url__ = \"https://torchmetrics.readthedocs.io/en/stable/\"\n__long_doc__ = \"\"\"\nTorchmetrics is a metrics API created for easy metric development and usage in both PyTorch and\n[PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). It was originally a part of\nPytorch Lightning, but got split off so users could take advantage of the large collection of metrics\nimplemented without having to install Pytorch Lightning (even though we would love for you to try it out).\nWe currently have around 60+ metrics implemented and we continuously are adding more metrics, both within\nalready covered domains (classification, regression ect.) but also new domains (object detection ect.).\nWe make sure that all our metrics are rigorously tested such that you can trust them.\n\"\"\"\n\n__all__ = [\n \"__author__\",\n \"__author_email__\",\n \"__copyright__\",\n \"__docs__\",\n \"__homepage__\",\n \"__license__\",\n \"__version__\",\n]\n", "path": "torchmetrics/__about__.py"}]} | 1,020 | 204 |
gh_patches_debug_454 | rasdani/github-patches | git_diff | Textualize__textual-2755 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
A lone `Static` results in a `TooManyMatches` error when using `query_one`
I've not dived into this beyond knocking up this example to isolate what I saw (about to head out of the door but wanted to record this as a reminder). With 0.27.0 (perhaps before too, just noting the version here for the record), this code:
```python
from textual.app import App, ComposeResult
from textual.widgets import Static
class OneStatic( App[ None ] ):
def compose( self ) -> ComposeResult:
yield Static()
def on_mount( self ) -> None:
self.query_one( Static ).update( "Hello, World!" )
if __name__ == "__main__":
OneStatic().run()
```
results in a `TooManyMatches` error being raised from the `query_one`. With very early testing this only seems to be the case with `Static` (at least, I tested with `Label` and `Button` and they're fine).
I think most people would rightly find this surprising.
</issue>
<code>
[start of src/textual/widgets/_tooltip.py]
1 from __future__ import annotations
2
3 from textual.widgets import Static
4
5
6 class Tooltip(Static):
7 DEFAULT_CSS = """
8 Tooltip {
9 layer: _tooltips;
10 margin: 1 2;
11 padding: 1 2;
12 background: $panel;
13 width: auto;
14 height: auto;
15 constrain: inflect;
16 max-width: 40;
17 display: none;
18 }
19 """
20
[end of src/textual/widgets/_tooltip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/textual/widgets/_tooltip.py b/src/textual/widgets/_tooltip.py
--- a/src/textual/widgets/_tooltip.py
+++ b/src/textual/widgets/_tooltip.py
@@ -3,7 +3,7 @@
from textual.widgets import Static
-class Tooltip(Static):
+class Tooltip(Static, inherit_css=False):
DEFAULT_CSS = """
Tooltip {
layer: _tooltips;
| {"golden_diff": "diff --git a/src/textual/widgets/_tooltip.py b/src/textual/widgets/_tooltip.py\n--- a/src/textual/widgets/_tooltip.py\n+++ b/src/textual/widgets/_tooltip.py\n@@ -3,7 +3,7 @@\n from textual.widgets import Static\n \n \n-class Tooltip(Static):\n+class Tooltip(Static, inherit_css=False):\n DEFAULT_CSS = \"\"\"\n Tooltip {\n layer: _tooltips;\n", "issue": "A lone `Static` results in a `TooManyMatches` error when using `query_one`\nI've not dived into this beyond knocking up this example to isolate what I saw (about to head out of the door but wanted to record this as a reminder). With 0.27.0 (perhaps before too, just noting the version here for the record), this code:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.widgets import Static\r\n\r\nclass OneStatic( App[ None ] ):\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Static()\r\n\r\n def on_mount( self ) -> None:\r\n self.query_one( Static ).update( \"Hello, World!\" )\r\n\r\nif __name__ == \"__main__\":\r\n OneStatic().run()\r\n```\r\n\r\nresults in a `TooManyMatches` error being raised from the `query_one`. With very early testing this only seems to be the case with `Static` (at least, I tested with `Label` and `Button` and they're fine).\r\n\r\nI think most people would rightly find this surprising.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom textual.widgets import Static\n\n\nclass Tooltip(Static):\n DEFAULT_CSS = \"\"\"\n Tooltip {\n layer: _tooltips;\n margin: 1 2;\n padding: 1 2;\n background: $panel;\n width: auto;\n height: auto;\n constrain: inflect;\n max-width: 40;\n display: none;\n }\n \"\"\"\n", "path": "src/textual/widgets/_tooltip.py"}]} | 891 | 89 |
gh_patches_debug_22794 | rasdani/github-patches | git_diff | ultrabug__py3status-2007 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clock terminated with Exception
With the latest Manjaro Testing Update, I received version 3.32 with Python 3.9.1.
All modules still work except the clock module which is terminated. The journal simply says
```Exception in `i3pystatus clock` post_config_hook().```
The config didn't change and works with 3.31:
```
clock {
format = "{Local}"
format_time = "{icon} %a, %d.%m.%Y %H:%M"
}
```
Downgrading to 3.31 works. What else information do you need?
</issue>
<code>
[start of py3status/storage.py]
1 import os
2 import time
3
4 from pathlib import Path
5 from pickle import dump, load
6 from tempfile import NamedTemporaryFile
7
8
9 class Storage:
10
11 data = {}
12 initialized = False
13
14 def init(self, py3_wrapper):
15 self.py3_wrapper = py3_wrapper
16 self.config = py3_wrapper.config
17 py3_config = self.config.get("py3_config", {})
18
19 # check for legacy storage cache
20 legacy_storage_path = self.get_legacy_storage_path()
21
22 # cutting edge storage cache
23 storage_config = py3_config.get("py3status", {}).get("storage")
24 if storage_config:
25 storage_file = os.path.expandvars(storage_config.expanduser())
26 if "/" in storage_file:
27 storage_dir = None
28 else:
29 storage_dir = os.environ.get("XDG_CACHE_HOME")
30 else:
31 storage_dir = os.environ.get("XDG_CACHE_HOME")
32 storage_file = Path("py3status_cache.data")
33
34 if not storage_dir:
35 storage_dir = Path("~/.cache").expanduser()
36 self.storage_path = storage_dir / storage_file
37
38 # move legacy storage cache to new desired / default location
39 if legacy_storage_path:
40 self.py3_wrapper.log(
41 "moving legacy storage_path {} to {}".format(
42 legacy_storage_path, self.storage_path
43 )
44 )
45 legacy_storage_path.rename(self.storage_path)
46
47 try:
48 with self.storage_path.open("rb") as f:
49 self.data = load(f, encoding="bytes")
50 except OSError:
51 pass
52
53 self.py3_wrapper.log(f"storage_path: {self.storage_path}")
54 if self.data:
55 self.py3_wrapper.log(f"storage_data: {self.data}")
56 self.initialized = True
57
58 def get_legacy_storage_path(self):
59 """
60 Detect and return existing legacy storage path.
61 """
62 config_dir = Path(
63 self.py3_wrapper.config.get("i3status_config_path", "/tmp")
64 ).parent
65 storage_path = config_dir / "py3status.data"
66 if storage_path.exists():
67 return storage_path
68 else:
69 return None
70
71 def save(self):
72 """
73 Save our data to disk. We want to always have a valid file.
74 """
75 with NamedTemporaryFile(dir=self.storage_path.parent, delete=False) as f:
76 # we use protocol=2 for python 2/3 compatibility
77 dump(self.data, f, protocol=2)
78 f.flush()
79 os.fsync(f.fileno())
80 tmppath = Path(f.name)
81 tmppath.rename(self.storage_path)
82
83 def storage_set(self, module_name, key, value):
84 if key.startswith("_"):
85 raise ValueError('cannot set keys starting with an underscore "_"')
86
87 if self.data.get(module_name, {}).get(key) == value:
88 return
89
90 if module_name not in self.data:
91 self.data[module_name] = {}
92 self.data[module_name][key] = value
93 ts = time.time()
94 if "_ctime" not in self.data[module_name]:
95 self.data[module_name]["_ctime"] = ts
96 self.data[module_name]["_mtime"] = ts
97 self.save()
98
99 def storage_get(self, module_name, key):
100 return self.data.get(module_name, {}).get(key, None)
101
102 def storage_del(self, module_name, key=None):
103 if module_name in self.data and key in self.data[module_name]:
104 del self.data[module_name][key]
105 self.save()
106
107 def storage_keys(self, module_name):
108 return list(self.data.get(module_name, {}))
109
[end of py3status/storage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py3status/storage.py b/py3status/storage.py
--- a/py3status/storage.py
+++ b/py3status/storage.py
@@ -22,18 +22,18 @@
# cutting edge storage cache
storage_config = py3_config.get("py3status", {}).get("storage")
if storage_config:
- storage_file = os.path.expandvars(storage_config.expanduser())
+ storage_file = os.path.expandvars(os.path.expanduser(storage_config))
if "/" in storage_file:
storage_dir = None
else:
storage_dir = os.environ.get("XDG_CACHE_HOME")
else:
storage_dir = os.environ.get("XDG_CACHE_HOME")
- storage_file = Path("py3status_cache.data")
+ storage_file = "py3status_cache.data"
if not storage_dir:
storage_dir = Path("~/.cache").expanduser()
- self.storage_path = storage_dir / storage_file
+ self.storage_path = Path(storage_dir, storage_file)
# move legacy storage cache to new desired / default location
if legacy_storage_path:
| {"golden_diff": "diff --git a/py3status/storage.py b/py3status/storage.py\n--- a/py3status/storage.py\n+++ b/py3status/storage.py\n@@ -22,18 +22,18 @@\n # cutting edge storage cache\n storage_config = py3_config.get(\"py3status\", {}).get(\"storage\")\n if storage_config:\n- storage_file = os.path.expandvars(storage_config.expanduser())\n+ storage_file = os.path.expandvars(os.path.expanduser(storage_config))\n if \"/\" in storage_file:\n storage_dir = None\n else:\n storage_dir = os.environ.get(\"XDG_CACHE_HOME\")\n else:\n storage_dir = os.environ.get(\"XDG_CACHE_HOME\")\n- storage_file = Path(\"py3status_cache.data\")\n+ storage_file = \"py3status_cache.data\"\n \n if not storage_dir:\n storage_dir = Path(\"~/.cache\").expanduser()\n- self.storage_path = storage_dir / storage_file\n+ self.storage_path = Path(storage_dir, storage_file)\n \n # move legacy storage cache to new desired / default location\n if legacy_storage_path:\n", "issue": "Clock terminated with Exception\nWith the latest Manjaro Testing Update, I received version 3.32 with Python 3.9.1.\r\n\r\nAll modules still work except the clock module which is terminated. The journal simply says\r\n```Exception in `i3pystatus clock` post_config_hook().```\r\n\r\nThe config didn't change and works with 3.31:\r\n```\r\nclock {\r\n format = \"{Local}\"\r\n format_time = \"{icon} %a, %d.%m.%Y %H:%M\"\r\n}\r\n```\r\n\r\nDowngrading to 3.31 works. What else information do you need?\n", "before_files": [{"content": "import os\nimport time\n\nfrom pathlib import Path\nfrom pickle import dump, load\nfrom tempfile import NamedTemporaryFile\n\n\nclass Storage:\n\n data = {}\n initialized = False\n\n def init(self, py3_wrapper):\n self.py3_wrapper = py3_wrapper\n self.config = py3_wrapper.config\n py3_config = self.config.get(\"py3_config\", {})\n\n # check for legacy storage cache\n legacy_storage_path = self.get_legacy_storage_path()\n\n # cutting edge storage cache\n storage_config = py3_config.get(\"py3status\", {}).get(\"storage\")\n if storage_config:\n storage_file = os.path.expandvars(storage_config.expanduser())\n if \"/\" in storage_file:\n storage_dir = None\n else:\n storage_dir = os.environ.get(\"XDG_CACHE_HOME\")\n else:\n storage_dir = os.environ.get(\"XDG_CACHE_HOME\")\n storage_file = Path(\"py3status_cache.data\")\n\n if not storage_dir:\n storage_dir = Path(\"~/.cache\").expanduser()\n self.storage_path = storage_dir / storage_file\n\n # move legacy storage cache to new desired / default location\n if legacy_storage_path:\n self.py3_wrapper.log(\n \"moving legacy storage_path {} to {}\".format(\n legacy_storage_path, self.storage_path\n )\n )\n legacy_storage_path.rename(self.storage_path)\n\n try:\n with self.storage_path.open(\"rb\") as f:\n self.data = load(f, encoding=\"bytes\")\n except OSError:\n pass\n\n self.py3_wrapper.log(f\"storage_path: {self.storage_path}\")\n if self.data:\n self.py3_wrapper.log(f\"storage_data: {self.data}\")\n self.initialized = True\n\n def get_legacy_storage_path(self):\n \"\"\"\n Detect and return existing legacy storage path.\n \"\"\"\n config_dir = Path(\n self.py3_wrapper.config.get(\"i3status_config_path\", \"/tmp\")\n ).parent\n storage_path = config_dir / \"py3status.data\"\n if storage_path.exists():\n return storage_path\n else:\n return None\n\n def save(self):\n \"\"\"\n Save our data to disk. We want to always have a valid file.\n \"\"\"\n with NamedTemporaryFile(dir=self.storage_path.parent, delete=False) as f:\n # we use protocol=2 for python 2/3 compatibility\n dump(self.data, f, protocol=2)\n f.flush()\n os.fsync(f.fileno())\n tmppath = Path(f.name)\n tmppath.rename(self.storage_path)\n\n def storage_set(self, module_name, key, value):\n if key.startswith(\"_\"):\n raise ValueError('cannot set keys starting with an underscore \"_\"')\n\n if self.data.get(module_name, {}).get(key) == value:\n return\n\n if module_name not in self.data:\n self.data[module_name] = {}\n self.data[module_name][key] = value\n ts = time.time()\n if \"_ctime\" not in self.data[module_name]:\n self.data[module_name][\"_ctime\"] = ts\n self.data[module_name][\"_mtime\"] = ts\n self.save()\n\n def storage_get(self, module_name, key):\n return self.data.get(module_name, {}).get(key, None)\n\n def storage_del(self, module_name, key=None):\n if module_name in self.data and key in self.data[module_name]:\n del self.data[module_name][key]\n self.save()\n\n def storage_keys(self, module_name):\n return list(self.data.get(module_name, {}))\n", "path": "py3status/storage.py"}]} | 1,660 | 241 |
gh_patches_debug_1684 | rasdani/github-patches | git_diff | geopandas__geopandas-2398 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop Python 3.7
We should consider dropping support for Python 3.7. We are roughly following numpy model (#1457) and numpy itself is 3.8+ now. Same applies to pyproj, which requires 3.8 (and causes some macOS CI failures because of some conda issues).
I forgot about Python versions when doing #2358 and bumped only packages.
@jorisvandenbossche if you're fine with that, I'll update CI matrix and related things.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env/python
2 """Installation script
3
4 """
5
6 import os
7
8 try:
9 from setuptools import setup
10 except ImportError:
11 from distutils.core import setup
12
13 import versioneer
14
15 LONG_DESCRIPTION = """GeoPandas is a project to add support for geographic data to
16 `pandas`_ objects.
17
18 The goal of GeoPandas is to make working with geospatial data in
19 python easier. It combines the capabilities of `pandas`_ and `shapely`_,
20 providing geospatial operations in pandas and a high-level interface
21 to multiple geometries to shapely. GeoPandas enables you to easily do
22 operations in python that would otherwise require a spatial database
23 such as PostGIS.
24
25 .. _pandas: http://pandas.pydata.org
26 .. _shapely: http://shapely.readthedocs.io/en/latest/
27 """
28
29 if os.environ.get("READTHEDOCS", False) == "True":
30 INSTALL_REQUIRES = []
31 else:
32 INSTALL_REQUIRES = [
33 "pandas >= 1.0.0",
34 "shapely >= 1.7",
35 "fiona >= 1.8",
36 "pyproj >= 2.6.1.post1",
37 "packaging",
38 ]
39
40 # get all data dirs in the datasets module
41 data_files = []
42
43 for item in os.listdir("geopandas/datasets"):
44 if not item.startswith("__"):
45 if os.path.isdir(os.path.join("geopandas/datasets/", item)):
46 data_files.append(os.path.join("datasets", item, "*"))
47 elif item.endswith(".zip"):
48 data_files.append(os.path.join("datasets", item))
49
50 data_files.append("tests/data/*")
51
52
53 setup(
54 name="geopandas",
55 version=versioneer.get_version(),
56 description="Geographic pandas extensions",
57 license="BSD",
58 author="GeoPandas contributors",
59 author_email="[email protected]",
60 url="http://geopandas.org",
61 project_urls={
62 "Source": "https://github.com/geopandas/geopandas",
63 },
64 long_description=LONG_DESCRIPTION,
65 packages=[
66 "geopandas",
67 "geopandas.io",
68 "geopandas.tools",
69 "geopandas.datasets",
70 "geopandas.tests",
71 "geopandas.tools.tests",
72 ],
73 package_data={"geopandas": data_files},
74 python_requires=">=3.7",
75 install_requires=INSTALL_REQUIRES,
76 cmdclass=versioneer.get_cmdclass(),
77 )
78
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -71,7 +71,7 @@
"geopandas.tools.tests",
],
package_data={"geopandas": data_files},
- python_requires=">=3.7",
+ python_requires=">=3.8",
install_requires=INSTALL_REQUIRES,
cmdclass=versioneer.get_cmdclass(),
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -71,7 +71,7 @@\n \"geopandas.tools.tests\",\n ],\n package_data={\"geopandas\": data_files},\n- python_requires=\">=3.7\",\n+ python_requires=\">=3.8\",\n install_requires=INSTALL_REQUIRES,\n cmdclass=versioneer.get_cmdclass(),\n )\n", "issue": "Drop Python 3.7\nWe should consider dropping support for Python 3.7. We are roughly following numpy model (#1457) and numpy itself is 3.8+ now. Same applies to pyproj, which requires 3.8 (and causes some macOS CI failures because of some conda issues). \r\n\r\nI forgot about Python versions when doing #2358 and bumped only packages.\r\n\r\n@jorisvandenbossche if you're fine with that, I'll update CI matrix and related things.\n", "before_files": [{"content": "#!/usr/bin/env/python\n\"\"\"Installation script\n\n\"\"\"\n\nimport os\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"GeoPandas is a project to add support for geographic data to\n`pandas`_ objects.\n\nThe goal of GeoPandas is to make working with geospatial data in\npython easier. It combines the capabilities of `pandas`_ and `shapely`_,\nproviding geospatial operations in pandas and a high-level interface\nto multiple geometries to shapely. GeoPandas enables you to easily do\noperations in python that would otherwise require a spatial database\nsuch as PostGIS.\n\n.. _pandas: http://pandas.pydata.org\n.. _shapely: http://shapely.readthedocs.io/en/latest/\n\"\"\"\n\nif os.environ.get(\"READTHEDOCS\", False) == \"True\":\n INSTALL_REQUIRES = []\nelse:\n INSTALL_REQUIRES = [\n \"pandas >= 1.0.0\",\n \"shapely >= 1.7\",\n \"fiona >= 1.8\",\n \"pyproj >= 2.6.1.post1\",\n \"packaging\",\n ]\n\n# get all data dirs in the datasets module\ndata_files = []\n\nfor item in os.listdir(\"geopandas/datasets\"):\n if not item.startswith(\"__\"):\n if os.path.isdir(os.path.join(\"geopandas/datasets/\", item)):\n data_files.append(os.path.join(\"datasets\", item, \"*\"))\n elif item.endswith(\".zip\"):\n data_files.append(os.path.join(\"datasets\", item))\n\ndata_files.append(\"tests/data/*\")\n\n\nsetup(\n name=\"geopandas\",\n version=versioneer.get_version(),\n description=\"Geographic pandas extensions\",\n license=\"BSD\",\n author=\"GeoPandas contributors\",\n author_email=\"[email protected]\",\n url=\"http://geopandas.org\",\n project_urls={\n \"Source\": \"https://github.com/geopandas/geopandas\",\n },\n long_description=LONG_DESCRIPTION,\n packages=[\n \"geopandas\",\n \"geopandas.io\",\n \"geopandas.tools\",\n \"geopandas.datasets\",\n \"geopandas.tests\",\n \"geopandas.tools.tests\",\n ],\n package_data={\"geopandas\": data_files},\n python_requires=\">=3.7\",\n install_requires=INSTALL_REQUIRES,\n cmdclass=versioneer.get_cmdclass(),\n)\n", "path": "setup.py"}]} | 1,330 | 93 |
gh_patches_debug_5914 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-1585 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
profiling/line2def does not handle empty filenames
### Which version of dd-trace-py are you using?
We're not running dd-trace - we're running the profiler by importing `ddtrace.profiling.auto`.
### Which version of the libraries are you using?
ddtrace: 0.40.0
datadog: 0.38.0
You can copy/paste the output of `pip freeze` here.
### How can we reproduce your problem?
I'm unsure - this appears to happen sporadically.
### What is the result that you get?
First, ddtrace runs into a KeyError in `_to_Location`, line 90:
```
def _to_Location(self, filename, lineno, funcname=None):
try:
return self._locations[(filename, lineno, funcname)]
```
`filename` is '', `lineno` is 1, `funcname` is None.
Next, in `filename_and_lineno_to_def`, line 63, we get an IndexError:
```
def filename_and_lineno_to_def(filename, lineno):
if filename[0] == "<" and filename[-1] == ">":
return default_def(filename, lineno)
```
Since the filename is an empty string, this complains.
### What is the result that you expected?
Not an error.
If you need more information, please let me know!
</issue>
<code>
[start of ddtrace/profiling/_line2def.py]
1 # -*- encoding: utf-8 -*-
2 import ast
3
4 import intervaltree
5
6
7 try:
8 from functools import lru_cache
9 except ImportError:
10 # This is for Python 2 but Python 2 does not use this module.
11 # It's just useful for unit tests.
12 def lru_cache(maxsize):
13 def w(f):
14 return f
15
16 return w
17
18
19 try:
20 # Python 2 does not have this.
21 from tokenize import open as source_open
22 except ImportError:
23 source_open = open
24
25 from ddtrace.vendor import six
26
27
28 def _compute_interval(node):
29 min_lineno = node.lineno
30 max_lineno = node.lineno
31 for node in ast.walk(node):
32 if hasattr(node, "lineno"):
33 min_lineno = min(min_lineno, node.lineno)
34 max_lineno = max(max_lineno, node.lineno)
35 return (min_lineno, max_lineno + 1)
36
37
38 if six.PY3:
39 _DEFS = (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)
40 else:
41 _DEFS = (ast.FunctionDef, ast.ClassDef)
42
43
44 @lru_cache(maxsize=256)
45 def file_to_tree(filename):
46 # Use tokenize.open to detect encoding
47 with source_open(filename) as f:
48 parsed = ast.parse(f.read(), filename=filename)
49 tree = intervaltree.IntervalTree()
50 for node in ast.walk(parsed):
51 if isinstance(node, _DEFS):
52 start, end = _compute_interval(node)
53 tree[start:end] = node
54 return tree
55
56
57 def default_def(filename, lineno):
58 return filename + ":" + str(lineno)
59
60
61 @lru_cache(maxsize=8192)
62 def filename_and_lineno_to_def(filename, lineno):
63 if filename[0] == "<" and filename[-1] == ">":
64 return default_def(filename, lineno)
65
66 try:
67 matches = file_to_tree(filename)[lineno]
68 except (IOError, OSError, SyntaxError):
69 return default_def(filename, lineno)
70 if matches:
71 return min(matches, key=lambda i: i.length()).data.name
72
73 return default_def(filename, lineno)
74
[end of ddtrace/profiling/_line2def.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/profiling/_line2def.py b/ddtrace/profiling/_line2def.py
--- a/ddtrace/profiling/_line2def.py
+++ b/ddtrace/profiling/_line2def.py
@@ -55,12 +55,12 @@
def default_def(filename, lineno):
- return filename + ":" + str(lineno)
+ return str(filename) + ":" + str(lineno)
@lru_cache(maxsize=8192)
def filename_and_lineno_to_def(filename, lineno):
- if filename[0] == "<" and filename[-1] == ">":
+ if not filename or (filename[0] == "<" and filename[-1] == ">"):
return default_def(filename, lineno)
try:
| {"golden_diff": "diff --git a/ddtrace/profiling/_line2def.py b/ddtrace/profiling/_line2def.py\n--- a/ddtrace/profiling/_line2def.py\n+++ b/ddtrace/profiling/_line2def.py\n@@ -55,12 +55,12 @@\n \n \n def default_def(filename, lineno):\n- return filename + \":\" + str(lineno)\n+ return str(filename) + \":\" + str(lineno)\n \n \n @lru_cache(maxsize=8192)\n def filename_and_lineno_to_def(filename, lineno):\n- if filename[0] == \"<\" and filename[-1] == \">\":\n+ if not filename or (filename[0] == \"<\" and filename[-1] == \">\"):\n return default_def(filename, lineno)\n \n try:\n", "issue": "profiling/line2def does not handle empty filenames\n### Which version of dd-trace-py are you using?\r\nWe're not running dd-trace - we're running the profiler by importing `ddtrace.profiling.auto`.\r\n\r\n### Which version of the libraries are you using?\r\nddtrace: 0.40.0\r\ndatadog: 0.38.0\r\n\r\nYou can copy/paste the output of `pip freeze` here.\r\n\r\n### How can we reproduce your problem?\r\nI'm unsure - this appears to happen sporadically.\r\n\r\n### What is the result that you get?\r\nFirst, ddtrace runs into a KeyError in `_to_Location`, line 90:\r\n```\r\ndef _to_Location(self, filename, lineno, funcname=None):\r\n try:\r\n return self._locations[(filename, lineno, funcname)]\r\n```\r\n`filename` is '', `lineno` is 1, `funcname` is None.\r\n\r\nNext, in `filename_and_lineno_to_def`, line 63, we get an IndexError:\r\n```\r\ndef filename_and_lineno_to_def(filename, lineno):\r\n if filename[0] == \"<\" and filename[-1] == \">\":\r\n return default_def(filename, lineno)\r\n```\r\nSince the filename is an empty string, this complains.\r\n\r\n\r\n\r\n### What is the result that you expected?\r\nNot an error.\r\n\r\nIf you need more information, please let me know!\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\nimport ast\n\nimport intervaltree\n\n\ntry:\n from functools import lru_cache\nexcept ImportError:\n # This is for Python\u00a02 but Python\u00a02 does not use this module.\n # It's just useful for unit tests.\n def lru_cache(maxsize):\n def w(f):\n return f\n\n return w\n\n\ntry:\n # Python\u00a02 does not have this.\n from tokenize import open as source_open\nexcept ImportError:\n source_open = open\n\nfrom ddtrace.vendor import six\n\n\ndef _compute_interval(node):\n min_lineno = node.lineno\n max_lineno = node.lineno\n for node in ast.walk(node):\n if hasattr(node, \"lineno\"):\n min_lineno = min(min_lineno, node.lineno)\n max_lineno = max(max_lineno, node.lineno)\n return (min_lineno, max_lineno + 1)\n\n\nif six.PY3:\n _DEFS = (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)\nelse:\n _DEFS = (ast.FunctionDef, ast.ClassDef)\n\n\n@lru_cache(maxsize=256)\ndef file_to_tree(filename):\n # Use tokenize.open to detect encoding\n with source_open(filename) as f:\n parsed = ast.parse(f.read(), filename=filename)\n tree = intervaltree.IntervalTree()\n for node in ast.walk(parsed):\n if isinstance(node, _DEFS):\n start, end = _compute_interval(node)\n tree[start:end] = node\n return tree\n\n\ndef default_def(filename, lineno):\n return filename + \":\" + str(lineno)\n\n\n@lru_cache(maxsize=8192)\ndef filename_and_lineno_to_def(filename, lineno):\n if filename[0] == \"<\" and filename[-1] == \">\":\n return default_def(filename, lineno)\n\n try:\n matches = file_to_tree(filename)[lineno]\n except (IOError, OSError, SyntaxError):\n return default_def(filename, lineno)\n if matches:\n return min(matches, key=lambda i: i.length()).data.name\n\n return default_def(filename, lineno)\n", "path": "ddtrace/profiling/_line2def.py"}]} | 1,441 | 176 |
gh_patches_debug_40179 | rasdani/github-patches | git_diff | Project-MONAI__MONAI-3464 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`reduction` for `ContrastiveLoss`
**Describe the bug**
the error messages and docstring should be consistent
https://github.com/Project-MONAI/MONAI/blob/a7bc4a3cbaeaa3c505a25ca2ddf6922bda8ea7dc/monai/losses/contrastive.py#L89-L91
https://github.com/Project-MONAI/MONAI/blob/a7bc4a3cbaeaa3c505a25ca2ddf6922bda8ea7dc/monai/losses/contrastive.py#L58
**Expected behavior**
implementing the option `reduction="none"`?
</issue>
<code>
[start of monai/losses/contrastive.py]
1 # Copyright 2020 - 2021 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 from typing import Union
13
14 import torch
15 from torch.nn import functional as F
16 from torch.nn.modules.loss import _Loss
17
18 from monai.utils import LossReduction
19
20
21 class ContrastiveLoss(_Loss):
22
23 """
24 Compute the Contrastive loss defined in:
25
26 Chen, Ting, et al. "A simple framework for contrastive learning of visual representations." International
27 conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html)
28
29 Adapted from:
30 https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5
31
32 """
33
34 def __init__(
35 self, temperature: float = 0.5, batch_size: int = 1, reduction: Union[LossReduction, str] = LossReduction.SUM
36 ) -> None:
37 """
38 Args:
39 temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.
40
41 Raises:
42 AssertionError: When an input of dimension length > 2 is passed
43 AssertionError: When input and target are of different shapes
44
45 """
46 super().__init__(reduction=LossReduction(reduction).value)
47
48 self.batch_size = batch_size
49 self.temperature = temperature
50
51 def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
52 """
53 Args:
54 input: the shape should be B[F].
55 target: the shape should be B[F].
56
57 Raises:
58 ValueError: When ``self.reduction`` is not one of ["sum", "none"].
59 """
60 if len(target.shape) > 2 or len(input.shape) > 2:
61 raise AssertionError(
62 f"Either target or input has dimensions greater than 2 where target "
63 f"shape is ({target.shape}) and input shape is ({input.shape})"
64 )
65
66 if target.shape != input.shape:
67 raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
68
69 temperature_tensor = torch.tensor(self.temperature).to(input.device)
70
71 norm_i = F.normalize(input, dim=1)
72 norm_j = F.normalize(target, dim=1)
73
74 negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)
75 negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)
76 negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)
77
78 repr = torch.cat([norm_i, norm_j], dim=0)
79 sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)
80 sim_ij = torch.diag(sim_matrix, self.batch_size)
81 sim_ji = torch.diag(sim_matrix, -self.batch_size)
82
83 positives = torch.cat([sim_ij, sim_ji], dim=0)
84 nominator = torch.exp(positives / temperature_tensor)
85 denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor)
86
87 loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))
88
89 if self.reduction == LossReduction.SUM.value:
90 return torch.sum(loss_partial) / (2 * self.batch_size)
91 raise ValueError(f"Unsupported reduction: {self.reduction}, " f'available options are ["mean", "sum", "none"].')
92
[end of monai/losses/contrastive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/monai/losses/contrastive.py b/monai/losses/contrastive.py
--- a/monai/losses/contrastive.py
+++ b/monai/losses/contrastive.py
@@ -9,13 +9,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Union
-
import torch
from torch.nn import functional as F
from torch.nn.modules.loss import _Loss
-from monai.utils import LossReduction
+from monai.utils import deprecated_arg
class ContrastiveLoss(_Loss):
@@ -31,19 +29,23 @@
"""
- def __init__(
- self, temperature: float = 0.5, batch_size: int = 1, reduction: Union[LossReduction, str] = LossReduction.SUM
- ) -> None:
+ @deprecated_arg(name="reduction", since="0.8", msg_suffix="`reduction` is no longer supported.")
+ def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction="sum") -> None:
"""
Args:
temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.
+ batch_size: The number of samples.
Raises:
- AssertionError: When an input of dimension length > 2 is passed
- AssertionError: When input and target are of different shapes
+ ValueError: When an input of dimension length > 2 is passed
+ ValueError: When input and target are of different shapes
+
+ .. deprecated:: 0.8.0
+
+ `reduction` is no longer supported.
"""
- super().__init__(reduction=LossReduction(reduction).value)
+ super().__init__()
self.batch_size = batch_size
self.temperature = temperature
@@ -53,18 +55,15 @@
Args:
input: the shape should be B[F].
target: the shape should be B[F].
-
- Raises:
- ValueError: When ``self.reduction`` is not one of ["sum", "none"].
"""
if len(target.shape) > 2 or len(input.shape) > 2:
- raise AssertionError(
+ raise ValueError(
f"Either target or input has dimensions greater than 2 where target "
f"shape is ({target.shape}) and input shape is ({input.shape})"
)
if target.shape != input.shape:
- raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
+ raise ValueError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
temperature_tensor = torch.tensor(self.temperature).to(input.device)
@@ -86,6 +85,4 @@
loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))
- if self.reduction == LossReduction.SUM.value:
- return torch.sum(loss_partial) / (2 * self.batch_size)
- raise ValueError(f"Unsupported reduction: {self.reduction}, " f'available options are ["mean", "sum", "none"].')
+ return torch.sum(loss_partial) / (2 * self.batch_size)
| {"golden_diff": "diff --git a/monai/losses/contrastive.py b/monai/losses/contrastive.py\n--- a/monai/losses/contrastive.py\n+++ b/monai/losses/contrastive.py\n@@ -9,13 +9,11 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from typing import Union\n-\n import torch\n from torch.nn import functional as F\n from torch.nn.modules.loss import _Loss\n \n-from monai.utils import LossReduction\n+from monai.utils import deprecated_arg\n \n \n class ContrastiveLoss(_Loss):\n@@ -31,19 +29,23 @@\n \n \"\"\"\n \n- def __init__(\n- self, temperature: float = 0.5, batch_size: int = 1, reduction: Union[LossReduction, str] = LossReduction.SUM\n- ) -> None:\n+ @deprecated_arg(name=\"reduction\", since=\"0.8\", msg_suffix=\"`reduction` is no longer supported.\")\n+ def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction=\"sum\") -> None:\n \"\"\"\n Args:\n temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.\n+ batch_size: The number of samples.\n \n Raises:\n- AssertionError: When an input of dimension length > 2 is passed\n- AssertionError: When input and target are of different shapes\n+ ValueError: When an input of dimension length > 2 is passed\n+ ValueError: When input and target are of different shapes\n+\n+ .. deprecated:: 0.8.0\n+\n+ `reduction` is no longer supported.\n \n \"\"\"\n- super().__init__(reduction=LossReduction(reduction).value)\n+ super().__init__()\n \n self.batch_size = batch_size\n self.temperature = temperature\n@@ -53,18 +55,15 @@\n Args:\n input: the shape should be B[F].\n target: the shape should be B[F].\n-\n- Raises:\n- ValueError: When ``self.reduction`` is not one of [\"sum\", \"none\"].\n \"\"\"\n if len(target.shape) > 2 or len(input.shape) > 2:\n- raise AssertionError(\n+ raise ValueError(\n f\"Either target or input has dimensions greater than 2 where target \"\n f\"shape is ({target.shape}) and input shape is ({input.shape})\"\n )\n \n if target.shape != input.shape:\n- raise AssertionError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n+ raise ValueError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n \n temperature_tensor = torch.tensor(self.temperature).to(input.device)\n \n@@ -86,6 +85,4 @@\n \n loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))\n \n- if self.reduction == LossReduction.SUM.value:\n- return torch.sum(loss_partial) / (2 * self.batch_size)\n- raise ValueError(f\"Unsupported reduction: {self.reduction}, \" f'available options are [\"mean\", \"sum\", \"none\"].')\n+ return torch.sum(loss_partial) / (2 * self.batch_size)\n", "issue": "`reduction` for `ContrastiveLoss`\n**Describe the bug**\r\nthe error messages and docstring should be consistent\r\nhttps://github.com/Project-MONAI/MONAI/blob/a7bc4a3cbaeaa3c505a25ca2ddf6922bda8ea7dc/monai/losses/contrastive.py#L89-L91\r\n\r\nhttps://github.com/Project-MONAI/MONAI/blob/a7bc4a3cbaeaa3c505a25ca2ddf6922bda8ea7dc/monai/losses/contrastive.py#L58\r\n\r\n**Expected behavior**\r\nimplementing the option `reduction=\"none\"`?\r\n\n", "before_files": [{"content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Union\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch.nn.modules.loss import _Loss\n\nfrom monai.utils import LossReduction\n\n\nclass ContrastiveLoss(_Loss):\n\n \"\"\"\n Compute the Contrastive loss defined in:\n\n Chen, Ting, et al. \"A simple framework for contrastive learning of visual representations.\" International\n conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html)\n\n Adapted from:\n https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5\n\n \"\"\"\n\n def __init__(\n self, temperature: float = 0.5, batch_size: int = 1, reduction: Union[LossReduction, str] = LossReduction.SUM\n ) -> None:\n \"\"\"\n Args:\n temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.\n\n Raises:\n AssertionError: When an input of dimension length > 2 is passed\n AssertionError: When input and target are of different shapes\n\n \"\"\"\n super().__init__(reduction=LossReduction(reduction).value)\n\n self.batch_size = batch_size\n self.temperature = temperature\n\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n input: the shape should be B[F].\n target: the shape should be B[F].\n\n Raises:\n ValueError: When ``self.reduction`` is not one of [\"sum\", \"none\"].\n \"\"\"\n if len(target.shape) > 2 or len(input.shape) > 2:\n raise AssertionError(\n f\"Either target or input has dimensions greater than 2 where target \"\n f\"shape is ({target.shape}) and input shape is ({input.shape})\"\n )\n\n if target.shape != input.shape:\n raise AssertionError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n\n temperature_tensor = torch.tensor(self.temperature).to(input.device)\n\n norm_i = F.normalize(input, dim=1)\n norm_j = F.normalize(target, dim=1)\n\n negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)\n negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)\n negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)\n\n repr = torch.cat([norm_i, norm_j], dim=0)\n sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)\n sim_ij = torch.diag(sim_matrix, self.batch_size)\n sim_ji = torch.diag(sim_matrix, -self.batch_size)\n\n positives = torch.cat([sim_ij, sim_ji], dim=0)\n nominator = torch.exp(positives / temperature_tensor)\n denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor)\n\n loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))\n\n if self.reduction == LossReduction.SUM.value:\n return torch.sum(loss_partial) / (2 * self.batch_size)\n raise ValueError(f\"Unsupported reduction: {self.reduction}, \" f'available options are [\"mean\", \"sum\", \"none\"].')\n", "path": "monai/losses/contrastive.py"}]} | 1,788 | 739 |
gh_patches_debug_20192 | rasdani/github-patches | git_diff | certbot__certbot-7163 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update SSL session cache size to match Mozilla recommendations
This is a followup from the research issue at #6903.
Ideally, https://github.com/mozilla/server-side-tls/issues/198 is resolved and Mozilla updates their recommendations. If not, I think we should update our value in https://github.com/certbot/certbot/blob/master/certbot-nginx/certbot_nginx/options-ssl-nginx.conf.
Exactly what these values should be is up for discussion, however, nginx's default timeout of 5 minutes seems like a reasonable place to start to me. I don't know of the top of my head how I think the cache should be configured.
</issue>
<code>
[start of certbot-nginx/certbot_nginx/constants.py]
1 """nginx plugin constants."""
2 import platform
3
4 FREEBSD_DARWIN_SERVER_ROOT = "/usr/local/etc/nginx"
5 LINUX_SERVER_ROOT = "/etc/nginx"
6
7 if platform.system() in ('FreeBSD', 'Darwin'):
8 server_root_tmp = FREEBSD_DARWIN_SERVER_ROOT
9 else:
10 server_root_tmp = LINUX_SERVER_ROOT
11
12 CLI_DEFAULTS = dict(
13 server_root=server_root_tmp,
14 ctl="nginx",
15 )
16 """CLI defaults."""
17
18
19 MOD_SSL_CONF_DEST = "options-ssl-nginx.conf"
20 """Name of the mod_ssl config file as saved in `IConfig.config_dir`."""
21
22 UPDATED_MOD_SSL_CONF_DIGEST = ".updated-options-ssl-nginx-conf-digest.txt"
23 """Name of the hash of the updated or informed mod_ssl_conf as saved in `IConfig.config_dir`."""
24
25 SSL_OPTIONS_HASHES_NEW = [
26 '63e2bddebb174a05c9d8a7cf2adf72f7af04349ba59a1a925fe447f73b2f1abf',
27 ]
28 """SHA256 hashes of the contents of versions of MOD_SSL_CONF_SRC for nginx >= 1.5.9"""
29
30 ALL_SSL_OPTIONS_HASHES = [
31 '0f81093a1465e3d4eaa8b0c14e77b2a2e93568b0fc1351c2b87893a95f0de87c',
32 '9a7b32c49001fed4cff8ad24353329472a50e86ade1ef9b2b9e43566a619612e',
33 'a6d9f1c7d6b36749b52ba061fff1421f9a0a3d2cfdafbd63c05d06f65b990937',
34 '7f95624dd95cf5afc708b9f967ee83a24b8025dc7c8d9df2b556bbc64256b3ff',
35 '394732f2bbe3e5e637c3fb5c6e980a1f1b90b01e2e8d6b7cff41dde16e2a756d',
36 '4b16fec2bcbcd8a2f3296d886f17f9953ffdcc0af54582452ca1e52f5f776f16',
37 ] + SSL_OPTIONS_HASHES_NEW
38 """SHA256 hashes of the contents of all versions of MOD_SSL_CONF_SRC"""
39
40 def os_constant(key):
41 # XXX TODO: In the future, this could return different constants
42 # based on what OS we are running under. To see an
43 # approach to how to handle different OSes, see the
44 # apache version of this file. Currently, we do not
45 # actually have any OS-specific constants on Nginx.
46 """
47 Get a constant value for operating system
48
49 :param key: name of cli constant
50 :return: value of constant for active os
51 """
52 return CLI_DEFAULTS[key]
53
54 HSTS_ARGS = ['\"max-age=31536000\"', ' ', 'always']
55
56 HEADER_ARGS = {'Strict-Transport-Security': HSTS_ARGS}
57
[end of certbot-nginx/certbot_nginx/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/certbot-nginx/certbot_nginx/constants.py b/certbot-nginx/certbot_nginx/constants.py
--- a/certbot-nginx/certbot_nginx/constants.py
+++ b/certbot-nginx/certbot_nginx/constants.py
@@ -24,6 +24,7 @@
SSL_OPTIONS_HASHES_NEW = [
'63e2bddebb174a05c9d8a7cf2adf72f7af04349ba59a1a925fe447f73b2f1abf',
+ '2901debc7ecbc10917edd9084c05464c9c5930b463677571eaf8c94bffd11ae2',
]
"""SHA256 hashes of the contents of versions of MOD_SSL_CONF_SRC for nginx >= 1.5.9"""
@@ -34,6 +35,7 @@
'7f95624dd95cf5afc708b9f967ee83a24b8025dc7c8d9df2b556bbc64256b3ff',
'394732f2bbe3e5e637c3fb5c6e980a1f1b90b01e2e8d6b7cff41dde16e2a756d',
'4b16fec2bcbcd8a2f3296d886f17f9953ffdcc0af54582452ca1e52f5f776f16',
+ 'c052ffff0ad683f43bffe105f7c606b339536163490930e2632a335c8d191cc4',
] + SSL_OPTIONS_HASHES_NEW
"""SHA256 hashes of the contents of all versions of MOD_SSL_CONF_SRC"""
| {"golden_diff": "diff --git a/certbot-nginx/certbot_nginx/constants.py b/certbot-nginx/certbot_nginx/constants.py\n--- a/certbot-nginx/certbot_nginx/constants.py\n+++ b/certbot-nginx/certbot_nginx/constants.py\n@@ -24,6 +24,7 @@\n \n SSL_OPTIONS_HASHES_NEW = [\n '63e2bddebb174a05c9d8a7cf2adf72f7af04349ba59a1a925fe447f73b2f1abf',\n+ '2901debc7ecbc10917edd9084c05464c9c5930b463677571eaf8c94bffd11ae2',\n ]\n \"\"\"SHA256 hashes of the contents of versions of MOD_SSL_CONF_SRC for nginx >= 1.5.9\"\"\"\n \n@@ -34,6 +35,7 @@\n '7f95624dd95cf5afc708b9f967ee83a24b8025dc7c8d9df2b556bbc64256b3ff',\n '394732f2bbe3e5e637c3fb5c6e980a1f1b90b01e2e8d6b7cff41dde16e2a756d',\n '4b16fec2bcbcd8a2f3296d886f17f9953ffdcc0af54582452ca1e52f5f776f16',\n+ 'c052ffff0ad683f43bffe105f7c606b339536163490930e2632a335c8d191cc4',\n ] + SSL_OPTIONS_HASHES_NEW\n \"\"\"SHA256 hashes of the contents of all versions of MOD_SSL_CONF_SRC\"\"\"\n", "issue": "Update SSL session cache size to match Mozilla recommendations\nThis is a followup from the research issue at #6903.\r\n\r\nIdeally, https://github.com/mozilla/server-side-tls/issues/198 is resolved and Mozilla updates their recommendations. If not, I think we should update our value in https://github.com/certbot/certbot/blob/master/certbot-nginx/certbot_nginx/options-ssl-nginx.conf.\r\n\r\nExactly what these values should be is up for discussion, however, nginx's default timeout of 5 minutes seems like a reasonable place to start to me. I don't know of the top of my head how I think the cache should be configured.\n", "before_files": [{"content": "\"\"\"nginx plugin constants.\"\"\"\nimport platform\n\nFREEBSD_DARWIN_SERVER_ROOT = \"/usr/local/etc/nginx\"\nLINUX_SERVER_ROOT = \"/etc/nginx\"\n\nif platform.system() in ('FreeBSD', 'Darwin'):\n server_root_tmp = FREEBSD_DARWIN_SERVER_ROOT\nelse:\n server_root_tmp = LINUX_SERVER_ROOT\n\nCLI_DEFAULTS = dict(\n server_root=server_root_tmp,\n ctl=\"nginx\",\n)\n\"\"\"CLI defaults.\"\"\"\n\n\nMOD_SSL_CONF_DEST = \"options-ssl-nginx.conf\"\n\"\"\"Name of the mod_ssl config file as saved in `IConfig.config_dir`.\"\"\"\n\nUPDATED_MOD_SSL_CONF_DIGEST = \".updated-options-ssl-nginx-conf-digest.txt\"\n\"\"\"Name of the hash of the updated or informed mod_ssl_conf as saved in `IConfig.config_dir`.\"\"\"\n\nSSL_OPTIONS_HASHES_NEW = [\n '63e2bddebb174a05c9d8a7cf2adf72f7af04349ba59a1a925fe447f73b2f1abf',\n]\n\"\"\"SHA256 hashes of the contents of versions of MOD_SSL_CONF_SRC for nginx >= 1.5.9\"\"\"\n\nALL_SSL_OPTIONS_HASHES = [\n '0f81093a1465e3d4eaa8b0c14e77b2a2e93568b0fc1351c2b87893a95f0de87c',\n '9a7b32c49001fed4cff8ad24353329472a50e86ade1ef9b2b9e43566a619612e',\n 'a6d9f1c7d6b36749b52ba061fff1421f9a0a3d2cfdafbd63c05d06f65b990937',\n '7f95624dd95cf5afc708b9f967ee83a24b8025dc7c8d9df2b556bbc64256b3ff',\n '394732f2bbe3e5e637c3fb5c6e980a1f1b90b01e2e8d6b7cff41dde16e2a756d',\n '4b16fec2bcbcd8a2f3296d886f17f9953ffdcc0af54582452ca1e52f5f776f16',\n] + SSL_OPTIONS_HASHES_NEW\n\"\"\"SHA256 hashes of the contents of all versions of MOD_SSL_CONF_SRC\"\"\"\n\ndef os_constant(key):\n # XXX TODO: In the future, this could return different constants\n # based on what OS we are running under. To see an\n # approach to how to handle different OSes, see the\n # apache version of this file. Currently, we do not\n # actually have any OS-specific constants on Nginx.\n \"\"\"\n Get a constant value for operating system\n\n :param key: name of cli constant\n :return: value of constant for active os\n \"\"\"\n return CLI_DEFAULTS[key]\n\nHSTS_ARGS = ['\\\"max-age=31536000\\\"', ' ', 'always']\n\nHEADER_ARGS = {'Strict-Transport-Security': HSTS_ARGS}\n", "path": "certbot-nginx/certbot_nginx/constants.py"}]} | 1,614 | 498 |
gh_patches_debug_10834 | rasdani/github-patches | git_diff | getredash__redash-6561 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The 'Create your first Dashboard' newbie link will not dispear even I create dashboards
### Issue Summary
The 'Create your first Dashboard' newbie link will not dispear even I create dashboards. Other newbie link works fine. I tried a completely new Redash instance, this issue still exists. I remember there is a commit related to the newbie link recently, but I cannot find which. This issue does not exists in the previous Docker preview image, so I assume that it should be related to recent commits.
### Steps to Reproduce
1. Create new dashboards.
2. The link still there.
<img width="280" alt="image" src="https://github.com/getredash/redash/assets/8188177/19555165-b2df-4b07-89cf-7443858ca704">
### Technical details:
* Redash Version: 23.10.0-dev (dev)
* Browser/OS: Chrome 118
* How did you install Redash: Docker
The 'Create your first Dashboard' newbie link will not dispear even I create dashboards
### Issue Summary
The 'Create your first Dashboard' newbie link will not dispear even I create dashboards. Other newbie link works fine. I tried a completely new Redash instance, this issue still exists. I remember there is a commit related to the newbie link recently, but I cannot find which. This issue does not exists in the previous Docker preview image, so I assume that it should be related to recent commits.
### Steps to Reproduce
1. Create new dashboards.
2. The link still there.
<img width="280" alt="image" src="https://github.com/getredash/redash/assets/8188177/19555165-b2df-4b07-89cf-7443858ca704">
### Technical details:
* Redash Version: 23.10.0-dev (dev)
* Browser/OS: Chrome 118
* How did you install Redash: Docker
</issue>
<code>
[start of redash/handlers/organization.py]
1 from flask_login import current_user, login_required
2
3 from redash import models
4 from redash.authentication import current_org
5 from redash.handlers import routes
6 from redash.handlers.base import json_response, org_scoped_rule
7
8
9 @routes.route(org_scoped_rule("/api/organization/status"), methods=["GET"])
10 @login_required
11 def organization_status(org_slug=None):
12 counters = {
13 "users": models.User.all(current_org).count(),
14 "alerts": models.Alert.all(group_ids=current_user.group_ids).count(),
15 "data_sources": models.DataSource.all(current_org, group_ids=current_user.group_ids).count(),
16 "queries": models.Query.all_queries(current_user.group_ids, current_user.id, include_drafts=True).count(),
17 "dashboards": models.Dashboard.query.filter(
18 models.Dashboard.org == current_org, models.Dashboard.is_archived is False
19 ).count(),
20 }
21
22 return json_response(dict(object_counters=counters))
23
[end of redash/handlers/organization.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/handlers/organization.py b/redash/handlers/organization.py
--- a/redash/handlers/organization.py
+++ b/redash/handlers/organization.py
@@ -15,7 +15,7 @@
"data_sources": models.DataSource.all(current_org, group_ids=current_user.group_ids).count(),
"queries": models.Query.all_queries(current_user.group_ids, current_user.id, include_drafts=True).count(),
"dashboards": models.Dashboard.query.filter(
- models.Dashboard.org == current_org, models.Dashboard.is_archived is False
+ models.Dashboard.org == current_org, models.Dashboard.is_archived.is_(False)
).count(),
}
| {"golden_diff": "diff --git a/redash/handlers/organization.py b/redash/handlers/organization.py\n--- a/redash/handlers/organization.py\n+++ b/redash/handlers/organization.py\n@@ -15,7 +15,7 @@\n \"data_sources\": models.DataSource.all(current_org, group_ids=current_user.group_ids).count(),\n \"queries\": models.Query.all_queries(current_user.group_ids, current_user.id, include_drafts=True).count(),\n \"dashboards\": models.Dashboard.query.filter(\n- models.Dashboard.org == current_org, models.Dashboard.is_archived is False\n+ models.Dashboard.org == current_org, models.Dashboard.is_archived.is_(False)\n ).count(),\n }\n", "issue": "The 'Create your first Dashboard' newbie link will not dispear even I create dashboards\n### Issue Summary\r\n\r\nThe 'Create your first Dashboard' newbie link will not dispear even I create dashboards. Other newbie link works fine. I tried a completely new Redash instance, this issue still exists. I remember there is a commit related to the newbie link recently, but I cannot find which. This issue does not exists in the previous Docker preview image, so I assume that it should be related to recent commits.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create new dashboards.\r\n2. The link still there.\r\n\r\n<img width=\"280\" alt=\"image\" src=\"https://github.com/getredash/redash/assets/8188177/19555165-b2df-4b07-89cf-7443858ca704\">\r\n\r\n### Technical details:\r\n\r\n* Redash Version: 23.10.0-dev (dev)\r\n* Browser/OS: Chrome 118\r\n* How did you install Redash: Docker\r\n\nThe 'Create your first Dashboard' newbie link will not dispear even I create dashboards\n### Issue Summary\r\n\r\nThe 'Create your first Dashboard' newbie link will not dispear even I create dashboards. Other newbie link works fine. I tried a completely new Redash instance, this issue still exists. I remember there is a commit related to the newbie link recently, but I cannot find which. This issue does not exists in the previous Docker preview image, so I assume that it should be related to recent commits.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create new dashboards.\r\n2. The link still there.\r\n\r\n<img width=\"280\" alt=\"image\" src=\"https://github.com/getredash/redash/assets/8188177/19555165-b2df-4b07-89cf-7443858ca704\">\r\n\r\n### Technical details:\r\n\r\n* Redash Version: 23.10.0-dev (dev)\r\n* Browser/OS: Chrome 118\r\n* How did you install Redash: Docker\r\n\n", "before_files": [{"content": "from flask_login import current_user, login_required\n\nfrom redash import models\nfrom redash.authentication import current_org\nfrom redash.handlers import routes\nfrom redash.handlers.base import json_response, org_scoped_rule\n\n\[email protected](org_scoped_rule(\"/api/organization/status\"), methods=[\"GET\"])\n@login_required\ndef organization_status(org_slug=None):\n counters = {\n \"users\": models.User.all(current_org).count(),\n \"alerts\": models.Alert.all(group_ids=current_user.group_ids).count(),\n \"data_sources\": models.DataSource.all(current_org, group_ids=current_user.group_ids).count(),\n \"queries\": models.Query.all_queries(current_user.group_ids, current_user.id, include_drafts=True).count(),\n \"dashboards\": models.Dashboard.query.filter(\n models.Dashboard.org == current_org, models.Dashboard.is_archived is False\n ).count(),\n }\n\n return json_response(dict(object_counters=counters))\n", "path": "redash/handlers/organization.py"}]} | 1,242 | 157 |
gh_patches_debug_10357 | rasdani/github-patches | git_diff | Parsl__parsl-2450 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
walltime app function parameter modifies task_record func_name
**Describe the bug**
When the walltime special parameter is passed to a Parsl app the `task_record['func_name']` parameter is set to "wrapper" instead of to the function's name.
**To Reproduce**
Launch the code below using parsl version 1.2.0:
```
import parsl
print(parsl.__version__, flush = True)
from parsl.app.app import python_app, bash_app
from parsl.config import Config
from parsl.executors import HighThroughputExecutor
@python_app
def test(stdout='std.out', stderr = 'std.err', walltime = 5):
from time import sleep
sleep(1)
Fail = 1/0
return 'Hello'
def retry_handler(exception, task_record):
print(task_record['func_name'], flush = True)
return 1
if __name__ == '__main__':
config = Config(
executors = [HighThroughputExecutor()],
retries = 2,
retry_handler = retry_handler
)
print('Loading Parsl Config', flush = True)
parsl.load(config)
fut = test()
print(fut.result())
```
It will print "wrapper" if the walltime parameter is present and test otherwise.
**Expected behavior**
The code should print the function's name (test).
**Environment**
- OS: Linux
- Python version: 3.10.4
- Parsl version: 1.2.0
**Distributed Environment**
- Where are you running the Parsl script from ? Local
- Where do you need the workers to run ? Local
</issue>
<code>
[start of parsl/app/python.py]
1 import logging
2
3 import tblib.pickling_support
4 tblib.pickling_support.install()
5
6 from parsl.app.app import AppBase
7 from parsl.app.errors import wrap_error
8 from parsl.dataflow.dflow import DataFlowKernelLoader
9
10
11 logger = logging.getLogger(__name__)
12
13
14 def timeout(f, seconds):
15 def wrapper(*args, **kwargs):
16 import threading
17 import ctypes
18 import parsl.app.errors
19
20 def inject_exception(thread):
21 ctypes.pythonapi.PyThreadState_SetAsyncExc(
22 ctypes.c_long(thread),
23 ctypes.py_object(parsl.app.errors.AppTimeout)
24 )
25
26 thread = threading.current_thread().ident
27 timer = threading.Timer(seconds, inject_exception, args=[thread])
28 timer.start()
29 result = f(*args, **kwargs)
30 timer.cancel()
31 return result
32 return wrapper
33
34
35 class PythonApp(AppBase):
36 """Extends AppBase to cover the Python App."""
37
38 def __init__(self, func, data_flow_kernel=None, cache=False, executors='all', ignore_for_cache=[], join=False):
39 super().__init__(
40 wrap_error(func),
41 data_flow_kernel=data_flow_kernel,
42 executors=executors,
43 cache=cache,
44 ignore_for_cache=ignore_for_cache
45 )
46 self.join = join
47
48 def __call__(self, *args, **kwargs):
49 """This is where the call to a python app is handled.
50
51 Args:
52 - Arbitrary
53 Kwargs:
54 - Arbitrary
55
56 Returns:
57 App_fut
58
59 """
60 invocation_kwargs = {}
61 invocation_kwargs.update(self.kwargs)
62 invocation_kwargs.update(kwargs)
63
64 if self.data_flow_kernel is None:
65 dfk = DataFlowKernelLoader.dfk()
66 else:
67 dfk = self.data_flow_kernel
68
69 walltime = invocation_kwargs.get('walltime')
70 if walltime is not None:
71 func = timeout(self.func, walltime)
72 else:
73 func = self.func
74
75 app_fut = dfk.submit(func, app_args=args,
76 executors=self.executors,
77 cache=self.cache,
78 ignore_for_cache=self.ignore_for_cache,
79 app_kwargs=invocation_kwargs,
80 join=self.join)
81
82 return app_fut
83
[end of parsl/app/python.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/app/python.py b/parsl/app/python.py
--- a/parsl/app/python.py
+++ b/parsl/app/python.py
@@ -3,6 +3,8 @@
import tblib.pickling_support
tblib.pickling_support.install()
+from functools import wraps
+
from parsl.app.app import AppBase
from parsl.app.errors import wrap_error
from parsl.dataflow.dflow import DataFlowKernelLoader
@@ -12,6 +14,7 @@
def timeout(f, seconds):
+ @wraps(f)
def wrapper(*args, **kwargs):
import threading
import ctypes
| {"golden_diff": "diff --git a/parsl/app/python.py b/parsl/app/python.py\n--- a/parsl/app/python.py\n+++ b/parsl/app/python.py\n@@ -3,6 +3,8 @@\n import tblib.pickling_support\n tblib.pickling_support.install()\n \n+from functools import wraps\n+\n from parsl.app.app import AppBase\n from parsl.app.errors import wrap_error\n from parsl.dataflow.dflow import DataFlowKernelLoader\n@@ -12,6 +14,7 @@\n \n \n def timeout(f, seconds):\n+ @wraps(f)\n def wrapper(*args, **kwargs):\n import threading\n import ctypes\n", "issue": "walltime app function parameter modifies task_record func_name\n**Describe the bug**\r\nWhen the walltime special parameter is passed to a Parsl app the `task_record['func_name']` parameter is set to \"wrapper\" instead of to the function's name. \r\n\r\n**To Reproduce**\r\nLaunch the code below using parsl version 1.2.0:\r\n```\r\nimport parsl\r\nprint(parsl.__version__, flush = True)\r\nfrom parsl.app.app import python_app, bash_app\r\nfrom parsl.config import Config\r\nfrom parsl.executors import HighThroughputExecutor\r\n\r\n\r\n@python_app\r\ndef test(stdout='std.out', stderr = 'std.err', walltime = 5):\r\n from time import sleep\r\n sleep(1)\r\n Fail = 1/0\r\n return 'Hello'\r\n\r\ndef retry_handler(exception, task_record):\r\n print(task_record['func_name'], flush = True)\r\n return 1\r\n\r\nif __name__ == '__main__':\r\n\r\n config = Config(\r\n executors = [HighThroughputExecutor()],\r\n retries = 2,\r\n retry_handler = retry_handler\r\n )\r\n print('Loading Parsl Config', flush = True)\r\n parsl.load(config)\r\n\r\n fut = test()\r\n\r\n print(fut.result())\r\n\r\n```\r\n\r\nIt will print \"wrapper\" if the walltime parameter is present and test otherwise. \r\n\r\n**Expected behavior**\r\nThe code should print the function's name (test).\r\n\r\n**Environment**\r\n- OS: Linux\r\n- Python version: 3.10.4\r\n- Parsl version: 1.2.0\r\n\r\n\r\n**Distributed Environment**\r\n- Where are you running the Parsl script from ? Local\r\n- Where do you need the workers to run ? Local\r\n\n", "before_files": [{"content": "import logging\n\nimport tblib.pickling_support\ntblib.pickling_support.install()\n\nfrom parsl.app.app import AppBase\nfrom parsl.app.errors import wrap_error\nfrom parsl.dataflow.dflow import DataFlowKernelLoader\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef timeout(f, seconds):\n def wrapper(*args, **kwargs):\n import threading\n import ctypes\n import parsl.app.errors\n\n def inject_exception(thread):\n ctypes.pythonapi.PyThreadState_SetAsyncExc(\n ctypes.c_long(thread),\n ctypes.py_object(parsl.app.errors.AppTimeout)\n )\n\n thread = threading.current_thread().ident\n timer = threading.Timer(seconds, inject_exception, args=[thread])\n timer.start()\n result = f(*args, **kwargs)\n timer.cancel()\n return result\n return wrapper\n\n\nclass PythonApp(AppBase):\n \"\"\"Extends AppBase to cover the Python App.\"\"\"\n\n def __init__(self, func, data_flow_kernel=None, cache=False, executors='all', ignore_for_cache=[], join=False):\n super().__init__(\n wrap_error(func),\n data_flow_kernel=data_flow_kernel,\n executors=executors,\n cache=cache,\n ignore_for_cache=ignore_for_cache\n )\n self.join = join\n\n def __call__(self, *args, **kwargs):\n \"\"\"This is where the call to a python app is handled.\n\n Args:\n - Arbitrary\n Kwargs:\n - Arbitrary\n\n Returns:\n App_fut\n\n \"\"\"\n invocation_kwargs = {}\n invocation_kwargs.update(self.kwargs)\n invocation_kwargs.update(kwargs)\n\n if self.data_flow_kernel is None:\n dfk = DataFlowKernelLoader.dfk()\n else:\n dfk = self.data_flow_kernel\n\n walltime = invocation_kwargs.get('walltime')\n if walltime is not None:\n func = timeout(self.func, walltime)\n else:\n func = self.func\n\n app_fut = dfk.submit(func, app_args=args,\n executors=self.executors,\n cache=self.cache,\n ignore_for_cache=self.ignore_for_cache,\n app_kwargs=invocation_kwargs,\n join=self.join)\n\n return app_fut\n", "path": "parsl/app/python.py"}]} | 1,534 | 143 |
gh_patches_debug_24727 | rasdani/github-patches | git_diff | sublimelsp__LSP-1852 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to save modified file when dragged to a new window
Description
Editing typescript project, dragged a tab out to a separate window then modified some lines and tried to save. Sublime won't save unless I drag the tab back into the open project. (see sublime issue - https://github.com/sublimehq/sublime_text/issues/4623)
Steps to reproduce
Start Sublime Text, open a directory containing typescript files. (make sure to have the LSP plugin installed)
open multiple files in tabs
drag one of the tabs out to a separate window
modify the tab that's been dragged out, then try to save
Expected behavior
Expect the separate window/modified file to save.
Actual behavior

The separate window doesn't save with cmd+s (can still see the modified indication on top (circle icon/symbol)
Environment
Sublime Build: 4112
Operating system and version: macOS 11.4,
</issue>
<code>
[start of plugin/save_command.py]
1 from .core.registry import LspTextCommand
2 from .core.settings import userprefs
3 from .core.typing import Callable, List, Type
4 from abc import ABCMeta, abstractmethod
5 import sublime
6 import sublime_plugin
7
8
9 class SaveTask(metaclass=ABCMeta):
10 """
11 Base class for tasks that run on save.
12
13 Note: The whole task runs on the async thread.
14 """
15
16 @classmethod
17 @abstractmethod
18 def is_applicable(cls, view: sublime.View) -> bool:
19 pass
20
21 def __init__(self, task_runner: LspTextCommand, on_done: Callable[[], None]):
22 self._task_runner = task_runner
23 self._on_done = on_done
24 self._completed = False
25 self._cancelled = False
26 self._status_key = 'lsp_save_task_timeout'
27
28 def run_async(self) -> None:
29 self._erase_view_status()
30 sublime.set_timeout_async(self._on_timeout, userprefs().on_save_task_timeout_ms)
31
32 def _on_timeout(self) -> None:
33 if not self._completed and not self._cancelled:
34 self._set_view_status('LSP: Timeout processing {}'.format(self.__class__.__name__))
35 self._cancelled = True
36 self._on_done()
37
38 def cancel(self) -> None:
39 self._cancelled = True
40
41 def _set_view_status(self, text: str) -> None:
42 self._task_runner.view.set_status(self._status_key, text)
43 sublime.set_timeout_async(self._erase_view_status, 5000)
44
45 def _erase_view_status(self) -> None:
46 self._task_runner.view.erase_status(self._status_key)
47
48 def _on_complete(self) -> None:
49 assert not self._completed
50 self._completed = True
51 if not self._cancelled:
52 self._on_done()
53
54 def _purge_changes_async(self) -> None:
55 # Supermassive hack that will go away later.
56 listeners = sublime_plugin.view_event_listeners.get(self._task_runner.view.id(), [])
57 for listener in listeners:
58 if listener.__class__.__name__ == 'DocumentSyncListener':
59 listener.purge_changes_async() # type: ignore
60 break
61
62
63 class LspSaveCommand(LspTextCommand):
64 """
65 A command used as a substitute for native save command. Runs code actions and document
66 formatting before triggering the native save command.
67 """
68 _tasks = [] # type: List[Type[SaveTask]]
69
70 @classmethod
71 def register_task(cls, task: Type[SaveTask]) -> None:
72 assert task not in cls._tasks
73 cls._tasks.append(task)
74
75 def __init__(self, view: sublime.View) -> None:
76 super().__init__(view)
77 self._pending_tasks = [] # type: List[SaveTask]
78
79 def run(self, edit: sublime.Edit) -> None:
80 if self._pending_tasks:
81 for task in self._pending_tasks:
82 task.cancel()
83 self._pending_tasks = []
84 sublime.set_timeout_async(self._trigger_on_pre_save_async)
85 for Task in self._tasks:
86 if Task.is_applicable(self.view):
87 self._pending_tasks.append(Task(self, self._on_task_completed_async))
88 if self._pending_tasks:
89 sublime.set_timeout_async(self._run_next_task_async)
90 else:
91 self._trigger_native_save()
92
93 def _trigger_on_pre_save_async(self) -> None:
94 # Supermassive hack that will go away later.
95 listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])
96 for listener in listeners:
97 if listener.__class__.__name__ == 'DocumentSyncListener':
98 listener.trigger_on_pre_save_async() # type: ignore
99 break
100
101 def _run_next_task_async(self) -> None:
102 current_task = self._pending_tasks[0]
103 current_task.run_async()
104
105 def _on_task_completed_async(self) -> None:
106 self._pending_tasks.pop(0)
107 if self._pending_tasks:
108 self._run_next_task_async()
109 else:
110 self._trigger_native_save()
111
112 def _trigger_native_save(self) -> None:
113 # Triggered from set_timeout to preserve original semantics of on_pre_save handling
114 sublime.set_timeout(lambda: self.view.run_command('save', {"async": True}))
115
[end of plugin/save_command.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/save_command.py b/plugin/save_command.py
--- a/plugin/save_command.py
+++ b/plugin/save_command.py
@@ -1,6 +1,6 @@
from .core.registry import LspTextCommand
from .core.settings import userprefs
-from .core.typing import Callable, List, Type
+from .core.typing import Callable, List, Optional, Type
from abc import ABCMeta, abstractmethod
import sublime
import sublime_plugin
@@ -90,6 +90,15 @@
else:
self._trigger_native_save()
+ def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:
+ # Workaround to ensure that the command will run, even if a view was dragged out to a new window,
+ # see https://github.com/sublimelsp/LSP/issues/1791.
+ # The check to determine whether the keybinding for lsp_save is applicable already happens in
+ # DocumentSyncListener.on_query_context and should not be required here, if lsp_save is only used for the
+ # keybinding. A proper fix should ensure that LspTextCommand.is_enabled returns the correct value even for
+ # dragged out views and that LSP keeps working as expected.
+ return True
+
def _trigger_on_pre_save_async(self) -> None:
# Supermassive hack that will go away later.
listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])
| {"golden_diff": "diff --git a/plugin/save_command.py b/plugin/save_command.py\n--- a/plugin/save_command.py\n+++ b/plugin/save_command.py\n@@ -1,6 +1,6 @@\n from .core.registry import LspTextCommand\n from .core.settings import userprefs\n-from .core.typing import Callable, List, Type\n+from .core.typing import Callable, List, Optional, Type\n from abc import ABCMeta, abstractmethod\n import sublime\n import sublime_plugin\n@@ -90,6 +90,15 @@\n else:\n self._trigger_native_save()\n \n+ def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:\n+ # Workaround to ensure that the command will run, even if a view was dragged out to a new window,\n+ # see https://github.com/sublimelsp/LSP/issues/1791.\n+ # The check to determine whether the keybinding for lsp_save is applicable already happens in\n+ # DocumentSyncListener.on_query_context and should not be required here, if lsp_save is only used for the\n+ # keybinding. A proper fix should ensure that LspTextCommand.is_enabled returns the correct value even for\n+ # dragged out views and that LSP keeps working as expected.\n+ return True\n+\n def _trigger_on_pre_save_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])\n", "issue": "Unable to save modified file when dragged to a new window\nDescription\r\nEditing typescript project, dragged a tab out to a separate window then modified some lines and tried to save. Sublime won't save unless I drag the tab back into the open project. (see sublime issue - https://github.com/sublimehq/sublime_text/issues/4623)\r\n\r\nSteps to reproduce\r\nStart Sublime Text, open a directory containing typescript files. (make sure to have the LSP plugin installed)\r\nopen multiple files in tabs\r\ndrag one of the tabs out to a separate window\r\nmodify the tab that's been dragged out, then try to save\r\nExpected behavior\r\nExpect the separate window/modified file to save.\r\n\r\nActual behavior\r\n\r\n\r\n\r\nThe separate window doesn't save with cmd+s (can still see the modified indication on top (circle icon/symbol)\r\n\r\nEnvironment\r\nSublime Build: 4112\r\nOperating system and version: macOS 11.4,\n", "before_files": [{"content": "from .core.registry import LspTextCommand\nfrom .core.settings import userprefs\nfrom .core.typing import Callable, List, Type\nfrom abc import ABCMeta, abstractmethod\nimport sublime\nimport sublime_plugin\n\n\nclass SaveTask(metaclass=ABCMeta):\n \"\"\"\n Base class for tasks that run on save.\n\n Note: The whole task runs on the async thread.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def is_applicable(cls, view: sublime.View) -> bool:\n pass\n\n def __init__(self, task_runner: LspTextCommand, on_done: Callable[[], None]):\n self._task_runner = task_runner\n self._on_done = on_done\n self._completed = False\n self._cancelled = False\n self._status_key = 'lsp_save_task_timeout'\n\n def run_async(self) -> None:\n self._erase_view_status()\n sublime.set_timeout_async(self._on_timeout, userprefs().on_save_task_timeout_ms)\n\n def _on_timeout(self) -> None:\n if not self._completed and not self._cancelled:\n self._set_view_status('LSP: Timeout processing {}'.format(self.__class__.__name__))\n self._cancelled = True\n self._on_done()\n\n def cancel(self) -> None:\n self._cancelled = True\n\n def _set_view_status(self, text: str) -> None:\n self._task_runner.view.set_status(self._status_key, text)\n sublime.set_timeout_async(self._erase_view_status, 5000)\n\n def _erase_view_status(self) -> None:\n self._task_runner.view.erase_status(self._status_key)\n\n def _on_complete(self) -> None:\n assert not self._completed\n self._completed = True\n if not self._cancelled:\n self._on_done()\n\n def _purge_changes_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self._task_runner.view.id(), [])\n for listener in listeners:\n if listener.__class__.__name__ == 'DocumentSyncListener':\n listener.purge_changes_async() # type: ignore\n break\n\n\nclass LspSaveCommand(LspTextCommand):\n \"\"\"\n A command used as a substitute for native save command. Runs code actions and document\n formatting before triggering the native save command.\n \"\"\"\n _tasks = [] # type: List[Type[SaveTask]]\n\n @classmethod\n def register_task(cls, task: Type[SaveTask]) -> None:\n assert task not in cls._tasks\n cls._tasks.append(task)\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self._pending_tasks = [] # type: List[SaveTask]\n\n def run(self, edit: sublime.Edit) -> None:\n if self._pending_tasks:\n for task in self._pending_tasks:\n task.cancel()\n self._pending_tasks = []\n sublime.set_timeout_async(self._trigger_on_pre_save_async)\n for Task in self._tasks:\n if Task.is_applicable(self.view):\n self._pending_tasks.append(Task(self, self._on_task_completed_async))\n if self._pending_tasks:\n sublime.set_timeout_async(self._run_next_task_async)\n else:\n self._trigger_native_save()\n\n def _trigger_on_pre_save_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])\n for listener in listeners:\n if listener.__class__.__name__ == 'DocumentSyncListener':\n listener.trigger_on_pre_save_async() # type: ignore\n break\n\n def _run_next_task_async(self) -> None:\n current_task = self._pending_tasks[0]\n current_task.run_async()\n\n def _on_task_completed_async(self) -> None:\n self._pending_tasks.pop(0)\n if self._pending_tasks:\n self._run_next_task_async()\n else:\n self._trigger_native_save()\n\n def _trigger_native_save(self) -> None:\n # Triggered from set_timeout to preserve original semantics of on_pre_save handling\n sublime.set_timeout(lambda: self.view.run_command('save', {\"async\": True}))\n", "path": "plugin/save_command.py"}]} | 2,005 | 328 |
gh_patches_debug_10398 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-4526 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/pipeline/schedule/_utils.py]
1 from typing import Any, List, Optional
2
3 import torch
4 import torch.cuda
5 from torch.nn import Module
6 from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten
7
8
9 def to_device(x: Any, device: Optional[torch.device] = None) -> Any:
10 """Move object to device if it is a tensor.
11
12 Args:
13 x (Any): Object to be moved.
14 device (Optional[torch.device], optional): Target device. Defaults to None.
15
16 Returns:
17 Any: Moved object.
18 """
19 if isinstance(x, torch.Tensor):
20 return x.to(device)
21 return x
22
23
24 def get_batch_size(batch: Any) -> int:
25 """Get the batch size (size of dimension-0) of the first tensor in the batch.
26
27 Args:
28 batch (Any): Batch to be inspected.
29
30 Raises:
31 RuntimeError: If no tensor is found in the batch.
32
33 Returns:
34 int: Batch size.
35 """
36 data_list, _ = tree_flatten(batch)
37 for data in data_list:
38 if isinstance(data, torch.Tensor):
39 return data.size(0)
40 raise RuntimeError('No tensor found in the batch')
41
42
43 def get_micro_batch(batch: Any, start: int, micro_batch_size: int) -> Any:
44 """Get a micro batch of the original batch.
45
46 Args:
47 batch (Any): Batch to be sliced.
48 start (int): Start index of the micro batch.
49 micro_batch_size (int): Size of the micro batch.
50
51 Returns:
52 Any: Target micro batch.
53 """
54
55 def _get_tensor_slice(x: Any):
56 if isinstance(x, torch.Tensor):
57 return x[start:start + micro_batch_size]
58 return x
59
60 return tree_map(_get_tensor_slice, batch)
61
62
63 def model_forward(model: Module, data: Any, internal_inputs: Optional[dict]) -> Any:
64 """Call model forward function with data and internal inputs.
65
66 Args:
67 model (Module): Model to be called.
68 data (Any): Data loaded from data iterator.
69 internal_inputs (Optional[dict]): Data from previous stage. It must be a dict or None if it's the first stage.
70
71 Returns:
72 Any: Outputs of the model.
73 """
74 if internal_inputs is None:
75 internal_inputs = {}
76 if isinstance(data, (list, tuple)):
77 return model(*data, **internal_inputs)
78 elif isinstance(data, dict):
79 return model(**data, **internal_inputs)
80 return model(data, **internal_inputs)
81
82
83 def retain_grad(x: Any) -> None:
84 """Call retain_grad() on a tensor.
85
86 Args:
87 x (Any): Object to be called.
88 """
89 if isinstance(x, torch.Tensor) and x.requires_grad:
90 x.retain_grad()
91
92
93 def detach(x: Any) -> Any:
94 """Call detach() on a tensor.
95
96 Args:
97 x (Any): Object to be called.
98
99 Returns:
100 Any: The detached object.
101 """
102 if isinstance(x, torch.Tensor):
103 return x.detach()
104 return x
105
106
107 def merge_batch(data: List[Any]) -> Any:
108 """Merge micro batches into a batch.
109
110 Args:
111 data (List[Any]): A list of micro batches.
112
113 Returns:
114 Any: Merge batch.
115 """
116 if len(data) == 0:
117 return
118 flattened_data = []
119 tree_spec = None
120 for d in data:
121 elems, tree_spec = tree_flatten(d)
122 flattened_data.append(elems)
123 merged_data = []
124 for elem_batch in zip(*flattened_data):
125 if isinstance(elem_batch[0], torch.Tensor):
126 merged_data.append(torch.cat(elem_batch, dim=0))
127 else:
128 merged_data.append(list(elem_batch))
129 return tree_unflatten(merged_data, tree_spec)
130
[end of colossalai/pipeline/schedule/_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/pipeline/schedule/_utils.py b/colossalai/pipeline/schedule/_utils.py
--- a/colossalai/pipeline/schedule/_utils.py
+++ b/colossalai/pipeline/schedule/_utils.py
@@ -123,7 +123,10 @@
merged_data = []
for elem_batch in zip(*flattened_data):
if isinstance(elem_batch[0], torch.Tensor):
- merged_data.append(torch.cat(elem_batch, dim=0))
+ if len(elem_batch[0].shape) == 0: # set loss to None in pipeline outputs
+ merged_data.append(None)
+ else:
+ merged_data.append(torch.cat(elem_batch, dim=0))
else:
merged_data.append(list(elem_batch))
return tree_unflatten(merged_data, tree_spec)
| {"golden_diff": "diff --git a/colossalai/pipeline/schedule/_utils.py b/colossalai/pipeline/schedule/_utils.py\n--- a/colossalai/pipeline/schedule/_utils.py\n+++ b/colossalai/pipeline/schedule/_utils.py\n@@ -123,7 +123,10 @@\n merged_data = []\n for elem_batch in zip(*flattened_data):\n if isinstance(elem_batch[0], torch.Tensor):\n- merged_data.append(torch.cat(elem_batch, dim=0))\n+ if len(elem_batch[0].shape) == 0: # set loss to None in pipeline outputs\n+ merged_data.append(None)\n+ else:\n+ merged_data.append(torch.cat(elem_batch, dim=0))\n else:\n merged_data.append(list(elem_batch))\n return tree_unflatten(merged_data, tree_spec)\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from typing import Any, List, Optional\n\nimport torch\nimport torch.cuda\nfrom torch.nn import Module\nfrom torch.utils._pytree import tree_flatten, tree_map, tree_unflatten\n\n\ndef to_device(x: Any, device: Optional[torch.device] = None) -> Any:\n \"\"\"Move object to device if it is a tensor.\n\n Args:\n x (Any): Object to be moved.\n device (Optional[torch.device], optional): Target device. Defaults to None.\n\n Returns:\n Any: Moved object.\n \"\"\"\n if isinstance(x, torch.Tensor):\n return x.to(device)\n return x\n\n\ndef get_batch_size(batch: Any) -> int:\n \"\"\"Get the batch size (size of dimension-0) of the first tensor in the batch.\n\n Args:\n batch (Any): Batch to be inspected.\n\n Raises:\n RuntimeError: If no tensor is found in the batch.\n\n Returns:\n int: Batch size.\n \"\"\"\n data_list, _ = tree_flatten(batch)\n for data in data_list:\n if isinstance(data, torch.Tensor):\n return data.size(0)\n raise RuntimeError('No tensor found in the batch')\n\n\ndef get_micro_batch(batch: Any, start: int, micro_batch_size: int) -> Any:\n \"\"\"Get a micro batch of the original batch.\n\n Args:\n batch (Any): Batch to be sliced.\n start (int): Start index of the micro batch.\n micro_batch_size (int): Size of the micro batch.\n\n Returns:\n Any: Target micro batch.\n \"\"\"\n\n def _get_tensor_slice(x: Any):\n if isinstance(x, torch.Tensor):\n return x[start:start + micro_batch_size]\n return x\n\n return tree_map(_get_tensor_slice, batch)\n\n\ndef model_forward(model: Module, data: Any, internal_inputs: Optional[dict]) -> Any:\n \"\"\"Call model forward function with data and internal inputs.\n\n Args:\n model (Module): Model to be called.\n data (Any): Data loaded from data iterator.\n internal_inputs (Optional[dict]): Data from previous stage. It must be a dict or None if it's the first stage.\n\n Returns:\n Any: Outputs of the model.\n \"\"\"\n if internal_inputs is None:\n internal_inputs = {}\n if isinstance(data, (list, tuple)):\n return model(*data, **internal_inputs)\n elif isinstance(data, dict):\n return model(**data, **internal_inputs)\n return model(data, **internal_inputs)\n\n\ndef retain_grad(x: Any) -> None:\n \"\"\"Call retain_grad() on a tensor.\n\n Args:\n x (Any): Object to be called.\n \"\"\"\n if isinstance(x, torch.Tensor) and x.requires_grad:\n x.retain_grad()\n\n\ndef detach(x: Any) -> Any:\n \"\"\"Call detach() on a tensor.\n\n Args:\n x (Any): Object to be called.\n\n Returns:\n Any: The detached object.\n \"\"\"\n if isinstance(x, torch.Tensor):\n return x.detach()\n return x\n\n\ndef merge_batch(data: List[Any]) -> Any:\n \"\"\"Merge micro batches into a batch.\n\n Args:\n data (List[Any]): A list of micro batches.\n\n Returns:\n Any: Merge batch.\n \"\"\"\n if len(data) == 0:\n return\n flattened_data = []\n tree_spec = None\n for d in data:\n elems, tree_spec = tree_flatten(d)\n flattened_data.append(elems)\n merged_data = []\n for elem_batch in zip(*flattened_data):\n if isinstance(elem_batch[0], torch.Tensor):\n merged_data.append(torch.cat(elem_batch, dim=0))\n else:\n merged_data.append(list(elem_batch))\n return tree_unflatten(merged_data, tree_spec)\n", "path": "colossalai/pipeline/schedule/_utils.py"}]} | 1,678 | 187 |
gh_patches_debug_27404 | rasdani/github-patches | git_diff | onnx__onnx-tensorflow-762 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upsample TypeError: unsupported operand type(s) for *: 'NoneType' and 'float'
Hi,
I have a problem with the upsample op when trying to prepare an onnx model converted from keras. Any idea of the solution ? Thx
>
> File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\backend.py", line 56, in prepare
> return cls.onnx_model_to_tensorflow_rep(model, strict)
> File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\backend.py", line 76, in onnx_model_to_tensorflow_rep
> return cls._onnx_graph_to_tensorflow_rep(model.graph, opset_import, strict)
> File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\backend.py", line 133, in _onnx_graph_to_tensorflow_rep
> onnx_node, tensor_dict, handlers, opset=opset, strict=strict)
> File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\backend.py", line 228, in _onnx_node_to_tensorflow_op
> return handler.handle(node, tensor_dict=tensor_dict, strict=strict)
> File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\handlers\handler.py", line 59, in handle
> return ver_handle(node, **kwargs)
> File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\handlers\backend\upsample.py", line 33, in version_7
> new_height = np.floor(x_shape[2] * scales[2])
> TypeError: unsupported operand type(s) for *: 'NoneType' and 'float'
</issue>
<code>
[start of onnx_tf/handlers/backend/upsample.py]
1 import copy
2
3 import numpy as np
4 import tensorflow as tf
5
6 from onnx_tf.common import exception
7 from onnx_tf.handlers.backend_handler import BackendHandler
8 from onnx_tf.handlers.handler import onnx_op
9 from onnx_tf.handlers.handler import partial_support
10 from onnx_tf.handlers.handler import ps_description
11 from onnx_tf.handlers.handler import tf_func
12 from onnx_tf.common.tf_helper import tf_shape
13
14
15 @onnx_op("Upsample")
16 @tf_func(tf.image.resize)
17 @partial_support(True)
18 @ps_description("Upsample required 4D input in Tensorflow.")
19 class Upsample(BackendHandler):
20
21 @classmethod
22 def args_check(cls, node, **kwargs):
23 x = kwargs["tensor_dict"][node.inputs[0]]
24 x_shape = x.get_shape().as_list()
25 if len(x_shape) != 4:
26 exception.OP_UNSUPPORTED_EXCEPT("Upsample without 4D input", "Tensorflow")
27
28 if node.attrs.get(
29 "mode", "nearest").lower() not in ["nearest", "bilinear", "linear"]:
30 exception.OP_UNSUPPORTED_EXCEPT("Upsample without nearest or bilinear",
31 "Tensorflow")
32
33 @classmethod
34 def version_7(cls, node, **kwargs):
35 x = kwargs["tensor_dict"][node.inputs[0]]
36 x_shape = x.get_shape().as_list()
37 attrs = copy.deepcopy(node.attrs)
38 scales = attrs["scales"]
39 new_height = np.floor(x_shape[2] * scales[2])
40 new_weight = np.floor(x_shape[3] * scales[3])
41
42 mode = attrs.get("mode", "nearest")
43 if mode.lower() == "bilinear" or mode.lower() == "linear":
44 mode = tf.image.ResizeMethod.BILINEAR
45 else:
46 mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR
47
48 attrs["size"] = np.array((new_height, new_weight), dtype=np.int32)
49 attrs["method"] = mode
50
51 return [
52 cls.make_tensor_from_onnx_node(
53 node, attrs=attrs, c_last_only=True, **kwargs)
54 ]
55
56 @classmethod
57 def version_9(cls, node, **kwargs):
58 x = kwargs["tensor_dict"][node.inputs[0]]
59 x_shape = tf_shape(x)
60 attrs = copy.deepcopy(node.attrs)
61 scales = kwargs["tensor_dict"][node.inputs[1]]
62
63 assert_n_c_scale_is_one = tf.Assert(
64 tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)),
65 [scales])
66
67 with tf.control_dependencies([assert_n_c_scale_is_one]):
68 h_w_scale = scales[2:]
69 h_w_shape = x_shape[2:]
70 new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, scales.dtype),
71 tf.int32)
72
73 mode = attrs.get("mode", "nearest")
74 if mode.lower() == "bilinear" or mode.lower() == "linear":
75 mode = tf.image.ResizeMethod.BILINEAR
76 else:
77 mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR
78
79 attrs["size"] = new_h_w_shape
80 attrs["method"] = mode
81
82 # Remove scale.
83 upsample_node = copy.deepcopy(node)
84 del upsample_node.inputs[1]
85 return [
86 cls.make_tensor_from_onnx_node(
87 upsample_node, attrs=attrs, c_last_only=True, **kwargs)
88 ]
89
[end of onnx_tf/handlers/backend/upsample.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/onnx_tf/handlers/backend/upsample.py b/onnx_tf/handlers/backend/upsample.py
--- a/onnx_tf/handlers/backend/upsample.py
+++ b/onnx_tf/handlers/backend/upsample.py
@@ -1,6 +1,5 @@
import copy
-import numpy as np
import tensorflow as tf
from onnx_tf.common import exception
@@ -33,20 +32,28 @@
@classmethod
def version_7(cls, node, **kwargs):
x = kwargs["tensor_dict"][node.inputs[0]]
- x_shape = x.get_shape().as_list()
+ x_shape = tf_shape(x)
attrs = copy.deepcopy(node.attrs)
scales = attrs["scales"]
- new_height = np.floor(x_shape[2] * scales[2])
- new_weight = np.floor(x_shape[3] * scales[3])
- mode = attrs.get("mode", "nearest")
- if mode.lower() == "bilinear" or mode.lower() == "linear":
- mode = tf.image.ResizeMethod.BILINEAR
- else:
- mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR
+ assert_n_c_scale_is_one = tf.Assert(
+ tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)),
+ [scales])
+
+ with tf.control_dependencies([assert_n_c_scale_is_one]):
+ h_w_scale = scales[2:]
+ h_w_shape = x_shape[2:]
+ new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, type(h_w_scale[0])),
+ tf.int32)
+
+ mode = attrs.get("mode", "nearest")
+ if mode.lower() == "bilinear" or mode.lower() == "linear":
+ mode = tf.image.ResizeMethod.BILINEAR
+ else:
+ mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR
- attrs["size"] = np.array((new_height, new_weight), dtype=np.int32)
- attrs["method"] = mode
+ attrs["size"] = new_h_w_shape
+ attrs["method"] = mode
return [
cls.make_tensor_from_onnx_node(
| {"golden_diff": "diff --git a/onnx_tf/handlers/backend/upsample.py b/onnx_tf/handlers/backend/upsample.py\n--- a/onnx_tf/handlers/backend/upsample.py\n+++ b/onnx_tf/handlers/backend/upsample.py\n@@ -1,6 +1,5 @@\n import copy\n \n-import numpy as np\n import tensorflow as tf\n \n from onnx_tf.common import exception\n@@ -33,20 +32,28 @@\n @classmethod\n def version_7(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n- x_shape = x.get_shape().as_list()\n+ x_shape = tf_shape(x)\n attrs = copy.deepcopy(node.attrs)\n scales = attrs[\"scales\"]\n- new_height = np.floor(x_shape[2] * scales[2])\n- new_weight = np.floor(x_shape[3] * scales[3])\n \n- mode = attrs.get(\"mode\", \"nearest\")\n- if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n- mode = tf.image.ResizeMethod.BILINEAR\n- else:\n- mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n+ assert_n_c_scale_is_one = tf.Assert(\n+ tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)),\n+ [scales])\n+\n+ with tf.control_dependencies([assert_n_c_scale_is_one]):\n+ h_w_scale = scales[2:]\n+ h_w_shape = x_shape[2:]\n+ new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, type(h_w_scale[0])),\n+ tf.int32)\n+\n+ mode = attrs.get(\"mode\", \"nearest\")\n+ if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n+ mode = tf.image.ResizeMethod.BILINEAR\n+ else:\n+ mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n \n- attrs[\"size\"] = np.array((new_height, new_weight), dtype=np.int32)\n- attrs[\"method\"] = mode\n+ attrs[\"size\"] = new_h_w_shape\n+ attrs[\"method\"] = mode\n \n return [\n cls.make_tensor_from_onnx_node(\n", "issue": "Upsample TypeError: unsupported operand type(s) for *: 'NoneType' and 'float'\nHi, \r\nI have a problem with the upsample op when trying to prepare an onnx model converted from keras. Any idea of the solution ? Thx\r\n\r\n> \r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\backend.py\", line 56, in prepare\r\n> return cls.onnx_model_to_tensorflow_rep(model, strict)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\backend.py\", line 76, in onnx_model_to_tensorflow_rep\r\n> return cls._onnx_graph_to_tensorflow_rep(model.graph, opset_import, strict)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\backend.py\", line 133, in _onnx_graph_to_tensorflow_rep\r\n> onnx_node, tensor_dict, handlers, opset=opset, strict=strict)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\backend.py\", line 228, in _onnx_node_to_tensorflow_op\r\n> return handler.handle(node, tensor_dict=tensor_dict, strict=strict)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\handlers\\handler.py\", line 59, in handle\r\n> return ver_handle(node, **kwargs)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\handlers\\backend\\upsample.py\", line 33, in version_7\r\n> new_height = np.floor(x_shape[2] * scales[2])\r\n> TypeError: unsupported operand type(s) for *: 'NoneType' and 'float'\n", "before_files": [{"content": "import copy\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom onnx_tf.common import exception\nfrom onnx_tf.handlers.backend_handler import BackendHandler\nfrom onnx_tf.handlers.handler import onnx_op\nfrom onnx_tf.handlers.handler import partial_support\nfrom onnx_tf.handlers.handler import ps_description\nfrom onnx_tf.handlers.handler import tf_func\nfrom onnx_tf.common.tf_helper import tf_shape\n\n\n@onnx_op(\"Upsample\")\n@tf_func(tf.image.resize)\n@partial_support(True)\n@ps_description(\"Upsample required 4D input in Tensorflow.\")\nclass Upsample(BackendHandler):\n\n @classmethod\n def args_check(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = x.get_shape().as_list()\n if len(x_shape) != 4:\n exception.OP_UNSUPPORTED_EXCEPT(\"Upsample without 4D input\", \"Tensorflow\")\n\n if node.attrs.get(\n \"mode\", \"nearest\").lower() not in [\"nearest\", \"bilinear\", \"linear\"]:\n exception.OP_UNSUPPORTED_EXCEPT(\"Upsample without nearest or bilinear\",\n \"Tensorflow\")\n\n @classmethod\n def version_7(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = x.get_shape().as_list()\n attrs = copy.deepcopy(node.attrs)\n scales = attrs[\"scales\"]\n new_height = np.floor(x_shape[2] * scales[2])\n new_weight = np.floor(x_shape[3] * scales[3])\n\n mode = attrs.get(\"mode\", \"nearest\")\n if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n mode = tf.image.ResizeMethod.BILINEAR\n else:\n mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n\n attrs[\"size\"] = np.array((new_height, new_weight), dtype=np.int32)\n attrs[\"method\"] = mode\n\n return [\n cls.make_tensor_from_onnx_node(\n node, attrs=attrs, c_last_only=True, **kwargs)\n ]\n\n @classmethod\n def version_9(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = tf_shape(x)\n attrs = copy.deepcopy(node.attrs)\n scales = kwargs[\"tensor_dict\"][node.inputs[1]]\n\n assert_n_c_scale_is_one = tf.Assert(\n tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)),\n [scales])\n\n with tf.control_dependencies([assert_n_c_scale_is_one]):\n h_w_scale = scales[2:]\n h_w_shape = x_shape[2:]\n new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, scales.dtype),\n tf.int32)\n\n mode = attrs.get(\"mode\", \"nearest\")\n if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n mode = tf.image.ResizeMethod.BILINEAR\n else:\n mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n\n attrs[\"size\"] = new_h_w_shape\n attrs[\"method\"] = mode\n\n # Remove scale.\n upsample_node = copy.deepcopy(node)\n del upsample_node.inputs[1]\n return [\n cls.make_tensor_from_onnx_node(\n upsample_node, attrs=attrs, c_last_only=True, **kwargs)\n ]\n", "path": "onnx_tf/handlers/backend/upsample.py"}]} | 1,922 | 504 |
gh_patches_debug_17503 | rasdani/github-patches | git_diff | voxel51__fiftyone-563 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] numpy.array sample fields trigger server error when serialized
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 20.04
- **FiftyOne installed from (pip or source)**: source
- **FiftyOne version (run `fiftyone --version`)**: v0.5.6 (e86c3570) - does not occur in v0.5.5
- **Python version**: 3.6
### Commands to reproduce
1. Start server with `python fiftyone/server/main.py`
2. Start app with `yarn dev`
3. Run the code below
### Describe the problem
The server fails to serialize the sample (see traceback) and the sample does not display in the app.
### Code to reproduce issue
```python
import fiftyone as fo
import numpy as np
dataset = fo.Dataset()
dataset.add_sample(fo.Sample('/path/to/image', field=np.array([1,2,3])))
session = fo.launch_app(remote=True, dataset=dataset)
```
### Other info / logs
Probably introduced in #543, since that changed JSON encoding. Previously, this field was serialized as:
```
"field": {
"$binary": "eJyb7BfqGxDJyFDGUK2eklqcXKRupaBuk2mhrqOgnpZfVFKUmBefX5SSChJ3S8wpTgWKF2ckFqQC+RrGOpo6CrUKFAAuRgYIYILSzFAaAOdAG2c=",
"$type": "00"
}
```
Server traceback:
```
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File ".../lib/python3.6/site-packages/socketio/server.py", line 696, in _handle_event_internal
binary=binary))
File ".../lib/python3.6/site-packages/socketio/server.py", line 607, in _send_packet
encoded_packet = pkt.encode()
File ".../lib/python3.6/site-packages/socketio/packet.py", line 71, in encode
encoded_packet += self.json.dumps(data, separators=(',', ':'))
File "/home/alan/code/fiftyone/fiftyone/server/json_util.py", line 47, in dumps
json_util.dumps(*args, **kwargs), parse_constant=lambda c: c
File ".../lib/python3.6/site-packages/bson/json_util.py", line 383, in dumps
return json.dumps(_json_convert(obj, json_options), *args, **kwargs)
File "/usr/lib/python3.6/json/__init__.py", line 238, in dumps
**kw).encode(obj)
File ".../lib/python3.6/site-packages/simplejson/encoder.py", line 275, in encode
chunks = self.iterencode(o, _one_shot=True)
File ".../lib/python3.6/site-packages/simplejson/encoder.py", line 357, in iterencode
return _iterencode(o, 0)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x9c in position 1: invalid start byte
```
### What areas of FiftyOne does this bug affect?
- [ ] `App`: FiftyOne application issue
- [ ] `Core`: Core `fiftyone` Python library issue
- [x] `Server`: Fiftyone server issue
</issue>
<code>
[start of fiftyone/server/json_util.py]
1 """
2 FiftyOne server json utilies.
3
4 | Copyright 2017-2020, Voxel51, Inc.
5 | `voxel51.com <https://voxel51.com/>`_
6 |
7 """
8 from bson import ObjectId, json_util
9 from flask.json import JSONEncoder
10
11 from fiftyone.core.sample import Sample, SampleView
12 from fiftyone.core.stages import ViewStage
13
14
15 class FiftyOneJSONEncoder(JSONEncoder):
16 """JSON encoder for the FiftyOne server.
17
18 Any classes with non-standard serialization methods should
19 be accounted for in the `default()` method.
20 """
21
22 def default(self, o): # pylint: disable=E0202
23 """Returns the serialized representation of the objects
24
25 Args:
26 o: the object
27
28 Returns:
29 str
30 """
31 if isinstance(o, (Sample, SampleView)):
32 return o.to_mongo_dict()
33 if issubclass(type(o), ViewStage):
34 return o._serialize()
35 if isinstance(o, ObjectId):
36 return str(o)
37 if isinstance(o, float):
38 return json_util.dumps(o)
39 return super().default(o)
40
41 @staticmethod
42 def dumps(*args, **kwargs):
43 """Defined for overriding the default SocketIO `json` interface"""
44 kwargs["cls"] = FiftyOneJSONEncoder
45 return json_util.dumps(
46 json_util.loads(
47 json_util.dumps(*args, **kwargs), parse_constant=lambda c: c
48 ),
49 **kwargs
50 )
51
52 @staticmethod
53 def loads(*args, **kwargs):
54 """Defined for overriding the default SocketIO `json` interface"""
55 return json_util.loads(*args, **kwargs)
56
[end of fiftyone/server/json_util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/fiftyone/server/json_util.py b/fiftyone/server/json_util.py
--- a/fiftyone/server/json_util.py
+++ b/fiftyone/server/json_util.py
@@ -10,6 +10,16 @@
from fiftyone.core.sample import Sample, SampleView
from fiftyone.core.stages import ViewStage
+import fiftyone.core.utils as fou
+
+
+def _handle_bytes(o):
+ for k, v in o.items():
+ if isinstance(v, bytes):
+ o[k] = str(fou.deserialize_numpy_array(v).shape)
+ if isinstance(v, dict):
+ o[k] = _handle_bytes(v)
+ return o
class FiftyOneJSONEncoder(JSONEncoder):
@@ -29,7 +39,7 @@
str
"""
if isinstance(o, (Sample, SampleView)):
- return o.to_mongo_dict()
+ return _handle_bytes(o.to_mongo_dict())
if issubclass(type(o), ViewStage):
return o._serialize()
if isinstance(o, ObjectId):
| {"golden_diff": "diff --git a/fiftyone/server/json_util.py b/fiftyone/server/json_util.py\n--- a/fiftyone/server/json_util.py\n+++ b/fiftyone/server/json_util.py\n@@ -10,6 +10,16 @@\n \n from fiftyone.core.sample import Sample, SampleView\n from fiftyone.core.stages import ViewStage\n+import fiftyone.core.utils as fou\n+\n+\n+def _handle_bytes(o):\n+ for k, v in o.items():\n+ if isinstance(v, bytes):\n+ o[k] = str(fou.deserialize_numpy_array(v).shape)\n+ if isinstance(v, dict):\n+ o[k] = _handle_bytes(v)\n+ return o\n \n \n class FiftyOneJSONEncoder(JSONEncoder):\n@@ -29,7 +39,7 @@\n str\n \"\"\"\n if isinstance(o, (Sample, SampleView)):\n- return o.to_mongo_dict()\n+ return _handle_bytes(o.to_mongo_dict())\n if issubclass(type(o), ViewStage):\n return o._serialize()\n if isinstance(o, ObjectId):\n", "issue": "[BUG] numpy.array sample fields trigger server error when serialized\n### System information\r\n\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 20.04\r\n- **FiftyOne installed from (pip or source)**: source\r\n- **FiftyOne version (run `fiftyone --version`)**: v0.5.6 (e86c3570) - does not occur in v0.5.5\r\n- **Python version**: 3.6\r\n\r\n### Commands to reproduce\r\n\r\n1. Start server with `python fiftyone/server/main.py`\r\n2. Start app with `yarn dev`\r\n3. Run the code below\r\n\r\n### Describe the problem\r\n\r\nThe server fails to serialize the sample (see traceback) and the sample does not display in the app.\r\n\r\n\r\n### Code to reproduce issue\r\n```python\r\nimport fiftyone as fo\r\nimport numpy as np\r\ndataset = fo.Dataset()\r\ndataset.add_sample(fo.Sample('/path/to/image', field=np.array([1,2,3])))\r\nsession = fo.launch_app(remote=True, dataset=dataset)\r\n```\r\n\r\n### Other info / logs\r\n\r\nProbably introduced in #543, since that changed JSON encoding. Previously, this field was serialized as:\r\n```\r\n \"field\": {\r\n \"$binary\": \"eJyb7BfqGxDJyFDGUK2eklqcXKRupaBuk2mhrqOgnpZfVFKUmBefX5SSChJ3S8wpTgWKF2ckFqQC+RrGOpo6CrUKFAAuRgYIYILSzFAaAOdAG2c=\",\r\n \"$type\": \"00\"\r\n }\r\n```\r\n\r\nServer traceback:\r\n```\r\n File \"/usr/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \".../lib/python3.6/site-packages/socketio/server.py\", line 696, in _handle_event_internal\r\n binary=binary))\r\n File \".../lib/python3.6/site-packages/socketio/server.py\", line 607, in _send_packet\r\n encoded_packet = pkt.encode()\r\n File \".../lib/python3.6/site-packages/socketio/packet.py\", line 71, in encode\r\n encoded_packet += self.json.dumps(data, separators=(',', ':'))\r\n File \"/home/alan/code/fiftyone/fiftyone/server/json_util.py\", line 47, in dumps\r\n json_util.dumps(*args, **kwargs), parse_constant=lambda c: c\r\n File \".../lib/python3.6/site-packages/bson/json_util.py\", line 383, in dumps\r\n return json.dumps(_json_convert(obj, json_options), *args, **kwargs)\r\n File \"/usr/lib/python3.6/json/__init__.py\", line 238, in dumps\r\n **kw).encode(obj)\r\n File \".../lib/python3.6/site-packages/simplejson/encoder.py\", line 275, in encode\r\n chunks = self.iterencode(o, _one_shot=True)\r\n File \".../lib/python3.6/site-packages/simplejson/encoder.py\", line 357, in iterencode\r\n return _iterencode(o, 0)\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0x9c in position 1: invalid start byte\r\n```\r\n\r\n### What areas of FiftyOne does this bug affect?\r\n\r\n- [ ] `App`: FiftyOne application issue\r\n- [ ] `Core`: Core `fiftyone` Python library issue\r\n- [x] `Server`: Fiftyone server issue\r\n\n", "before_files": [{"content": "\"\"\"\nFiftyOne server json utilies.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nfrom bson import ObjectId, json_util\nfrom flask.json import JSONEncoder\n\nfrom fiftyone.core.sample import Sample, SampleView\nfrom fiftyone.core.stages import ViewStage\n\n\nclass FiftyOneJSONEncoder(JSONEncoder):\n \"\"\"JSON encoder for the FiftyOne server.\n\n Any classes with non-standard serialization methods should\n be accounted for in the `default()` method.\n \"\"\"\n\n def default(self, o): # pylint: disable=E0202\n \"\"\"Returns the serialized representation of the objects\n\n Args:\n o: the object\n\n Returns:\n str\n \"\"\"\n if isinstance(o, (Sample, SampleView)):\n return o.to_mongo_dict()\n if issubclass(type(o), ViewStage):\n return o._serialize()\n if isinstance(o, ObjectId):\n return str(o)\n if isinstance(o, float):\n return json_util.dumps(o)\n return super().default(o)\n\n @staticmethod\n def dumps(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n kwargs[\"cls\"] = FiftyOneJSONEncoder\n return json_util.dumps(\n json_util.loads(\n json_util.dumps(*args, **kwargs), parse_constant=lambda c: c\n ),\n **kwargs\n )\n\n @staticmethod\n def loads(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n return json_util.loads(*args, **kwargs)\n", "path": "fiftyone/server/json_util.py"}]} | 1,829 | 233 |
gh_patches_debug_13692 | rasdani/github-patches | git_diff | pyca__cryptography-1992 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
1.0 release
http://semver.org
> Major version zero (0.y.z) is for initial development. Anything may change at any time. The public API should not be considered stable.
Should we bump our version number all the way to 1.0 for the next release?
</issue>
<code>
[start of src/cryptography/__about__.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 __all__ = [
8 "__title__", "__summary__", "__uri__", "__version__", "__author__",
9 "__email__", "__license__", "__copyright__",
10 ]
11
12 __title__ = "cryptography"
13 __summary__ = ("cryptography is a package which provides cryptographic recipes"
14 " and primitives to Python developers.")
15 __uri__ = "https://github.com/pyca/cryptography"
16
17 __version__ = "0.10.dev1"
18
19 __author__ = "The cryptography developers"
20 __email__ = "[email protected]"
21
22 __license__ = "BSD or Apache License, Version 2.0"
23 __copyright__ = "Copyright 2013-2015 {0}".format(__author__)
24
[end of src/cryptography/__about__.py]
[start of vectors/cryptography_vectors/__about__.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 __all__ = [
8 "__title__", "__summary__", "__uri__", "__version__", "__author__",
9 "__email__", "__license__", "__copyright__",
10 ]
11
12 __title__ = "cryptography_vectors"
13 __summary__ = "Test vectors for the cryptography package."
14
15 __uri__ = "https://github.com/pyca/cryptography"
16
17 __version__ = "0.10.dev1"
18
19 __author__ = "The cryptography developers"
20 __email__ = "[email protected]"
21
22 __license__ = "BSD or Apache License, Version 2.0"
23 __copyright__ = "Copyright 2013-2015 %s" % __author__
24
[end of vectors/cryptography_vectors/__about__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py
--- a/src/cryptography/__about__.py
+++ b/src/cryptography/__about__.py
@@ -14,7 +14,7 @@
" and primitives to Python developers.")
__uri__ = "https://github.com/pyca/cryptography"
-__version__ = "0.10.dev1"
+__version__ = "1.0.dev1"
__author__ = "The cryptography developers"
__email__ = "[email protected]"
diff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py
--- a/vectors/cryptography_vectors/__about__.py
+++ b/vectors/cryptography_vectors/__about__.py
@@ -14,7 +14,7 @@
__uri__ = "https://github.com/pyca/cryptography"
-__version__ = "0.10.dev1"
+__version__ = "1.0.dev1"
__author__ = "The cryptography developers"
__email__ = "[email protected]"
| {"golden_diff": "diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py\n--- a/src/cryptography/__about__.py\n+++ b/src/cryptography/__about__.py\n@@ -14,7 +14,7 @@\n \" and primitives to Python developers.\")\n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"0.10.dev1\"\n+__version__ = \"1.0.dev1\"\n \n __author__ = \"The cryptography developers\"\n __email__ = \"[email protected]\"\ndiff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py\n--- a/vectors/cryptography_vectors/__about__.py\n+++ b/vectors/cryptography_vectors/__about__.py\n@@ -14,7 +14,7 @@\n \n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"0.10.dev1\"\n+__version__ = \"1.0.dev1\"\n \n __author__ = \"The cryptography developers\"\n __email__ = \"[email protected]\"\n", "issue": "1.0 release\nhttp://semver.org\n\n> Major version zero (0.y.z) is for initial development. Anything may change at any time. The public API should not be considered stable.\n\nShould we bump our version number all the way to 1.0 for the next release?\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n__title__ = \"cryptography\"\n__summary__ = (\"cryptography is a package which provides cryptographic recipes\"\n \" and primitives to Python developers.\")\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"0.10.dev1\"\n\n__author__ = \"The cryptography developers\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2015 {0}\".format(__author__)\n", "path": "src/cryptography/__about__.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"0.10.dev1\"\n\n__author__ = \"The cryptography developers\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2015 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py"}]} | 1,130 | 246 |
gh_patches_debug_20125 | rasdani/github-patches | git_diff | rucio__rucio-1084 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
remove unused file from common
Motivation
----------
lib/rucio/common/client.py is probably unused and can be removed
Modification
------------
</issue>
<code>
[start of lib/rucio/common/client.py]
1 # Copyright European Organization for Nuclear Research (CERN)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # You may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Authors:
8 # - Vincent Garonne, <[email protected]>, 2012
9
10 import httplib
11
12
13 class BaseClient(object):
14
15 """A base client class"""
16
17 DEFAULT_PORT = 80
18
19 OK_RESPONSE_CODES = (
20 httplib.OK,
21 httplib.CREATED,
22 httplib.ACCEPTED,
23 httplib.NO_CONTENT,
24 )
25
26 def __init__(self, host, port=None, use_ssl=False, auth_tok=None, creds=None):
27 """
28 Creates a new client to some service.
29
30 :param host: The host where service resides
31 :param port: The port where service resides
32 :param use_ssl: Should we use HTTPS?
33 :param auth_tok: The auth token to pass to the server
34 :param creds: The credentials to pass to the auth plugin
35 """
36 self.host = host
37 self.port = port or self.DEFAULT_PORT
38 self.use_ssl = use_ssl
39 self.auth_tok = auth_tok
40 self.creds = creds or {}
41 self.connection = None
42
[end of lib/rucio/common/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/rucio/common/client.py b/lib/rucio/common/client.py
deleted file mode 100644
--- a/lib/rucio/common/client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright European Organization for Nuclear Research (CERN)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# You may not use this file except in compliance with the License.
-# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
-#
-# Authors:
-# - Vincent Garonne, <[email protected]>, 2012
-
-import httplib
-
-
-class BaseClient(object):
-
- """A base client class"""
-
- DEFAULT_PORT = 80
-
- OK_RESPONSE_CODES = (
- httplib.OK,
- httplib.CREATED,
- httplib.ACCEPTED,
- httplib.NO_CONTENT,
- )
-
- def __init__(self, host, port=None, use_ssl=False, auth_tok=None, creds=None):
- """
- Creates a new client to some service.
-
- :param host: The host where service resides
- :param port: The port where service resides
- :param use_ssl: Should we use HTTPS?
- :param auth_tok: The auth token to pass to the server
- :param creds: The credentials to pass to the auth plugin
- """
- self.host = host
- self.port = port or self.DEFAULT_PORT
- self.use_ssl = use_ssl
- self.auth_tok = auth_tok
- self.creds = creds or {}
- self.connection = None
| {"golden_diff": "diff --git a/lib/rucio/common/client.py b/lib/rucio/common/client.py\ndeleted file mode 100644\n--- a/lib/rucio/common/client.py\n+++ /dev/null\n@@ -1,41 +0,0 @@\n-# Copyright European Organization for Nuclear Research (CERN)\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# You may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Authors:\n-# - Vincent Garonne, <[email protected]>, 2012\n-\n-import httplib\n-\n-\n-class BaseClient(object):\n-\n- \"\"\"A base client class\"\"\"\n-\n- DEFAULT_PORT = 80\n-\n- OK_RESPONSE_CODES = (\n- httplib.OK,\n- httplib.CREATED,\n- httplib.ACCEPTED,\n- httplib.NO_CONTENT,\n- )\n-\n- def __init__(self, host, port=None, use_ssl=False, auth_tok=None, creds=None):\n- \"\"\"\n- Creates a new client to some service.\n-\n- :param host: The host where service resides\n- :param port: The port where service resides\n- :param use_ssl: Should we use HTTPS?\n- :param auth_tok: The auth token to pass to the server\n- :param creds: The credentials to pass to the auth plugin\n- \"\"\"\n- self.host = host\n- self.port = port or self.DEFAULT_PORT\n- self.use_ssl = use_ssl\n- self.auth_tok = auth_tok\n- self.creds = creds or {}\n- self.connection = None\n", "issue": "remove unused file from common\nMotivation\r\n----------\r\nlib/rucio/common/client.py is probably unused and can be removed\r\n\r\nModification\r\n------------\r\n\n", "before_files": [{"content": "# Copyright European Organization for Nuclear Research (CERN)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n#\n# Authors:\n# - Vincent Garonne, <[email protected]>, 2012\n\nimport httplib\n\n\nclass BaseClient(object):\n\n \"\"\"A base client class\"\"\"\n\n DEFAULT_PORT = 80\n\n OK_RESPONSE_CODES = (\n httplib.OK,\n httplib.CREATED,\n httplib.ACCEPTED,\n httplib.NO_CONTENT,\n )\n\n def __init__(self, host, port=None, use_ssl=False, auth_tok=None, creds=None):\n \"\"\"\n Creates a new client to some service.\n\n :param host: The host where service resides\n :param port: The port where service resides\n :param use_ssl: Should we use HTTPS?\n :param auth_tok: The auth token to pass to the server\n :param creds: The credentials to pass to the auth plugin\n \"\"\"\n self.host = host\n self.port = port or self.DEFAULT_PORT\n self.use_ssl = use_ssl\n self.auth_tok = auth_tok\n self.creds = creds or {}\n self.connection = None\n", "path": "lib/rucio/common/client.py"}]} | 943 | 387 |
gh_patches_debug_29675 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-147 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Script to build each component from manifest and assemble bundle.
This script should read a manifest and output all artifacts ready for upload.
Example.
/build/opensearch-dashboards-min-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz <- min bundle
/build/opensearch-dashboards-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz. <-- full bundle
/build/opensearch-sql-1.0.0.0-rc1.zip
/build/org/opensearch... <- maven artifacts
Input: to the script should be a manifest file location - format defined here #111
Output: all required artifacts are written to ./build
- [x] Clone each component repository defined in the manifest
- [x] Build each component. This includes plugin zips and maven artifacts and place under a new folder with build id. Note: We need to know if the version of a particular component is already published to maven central. If this is the case we do not need to rebuild and include that artifact.
- [ ] Assemble the bundle itself and add to the /build directory. This is dependent on being able to write manifests - #134
To make assembling maven artifacts easier, each repo can be published to maven local and copied from that location into /build. All artifacts will be under org/opensearch.
</issue>
<code>
[start of tools/bundle-build/lib/component.py]
1 import os
2 import tempfile
3 import subprocess
4 from lib.git import GitRepository
5
6 class Component:
7 def __init__(self, data):
8 self._name = data['name']
9 self._repository = data['repository']
10 self._ref = data['ref']
11
12 def name(self):
13 return self._name
14
15 def repository(self):
16 return self._repository
17
18 def git_repository(self):
19 return self._git_repository
20
21 def ref(self):
22 return self._ref
23
24 def checkout(self):
25 self._git_repository = GitRepository(self.repository(), self.ref())
26
27 # script overridden in this repo
28 def custom_component_script_path(self):
29 dirname = os.path.dirname(os.path.abspath(__file__))
30 return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/components', self.name(), 'build.sh'))
31
32 # script inside the component repo
33 def component_script_path(self):
34 dirname = self.git_repository().dir()
35 return os.path.realpath(os.path.join(dirname, 'scripts/build.sh'))
36
37 # default gradle script
38 def default_script_path(self):
39 dirname = os.path.dirname(os.path.abspath(__file__))
40 return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/standard-gradle-build/build.sh'))
41
42 def build_script(self):
43 paths = [self.component_script_path(), self.custom_component_script_path(), self.default_script_path()]
44 return next(filter(lambda path: os.path.exists(path), paths), None)
45
46 def build(self, version, arch):
47 build_script = f'{self.build_script()} {version} {arch}'
48 print(f'Running {build_script} ...')
49 self.git_repository().execute(build_script)
50
51 def artifacts_path(self):
52 dirname = self.git_repository().dir()
53 return os.path.realpath(os.path.join(dirname, 'artifacts'))
54
55 def export(self, dest):
56 artifacts_path = self.artifacts_path()
57 if os.path.exists(artifacts_path):
58 print(f'Publishing artifacts from {artifacts_path} into {dest} ...')
59 self.git_repository().execute(f'cp -r "{artifacts_path}/"* "{dest}"')
60 else:
61 print(f'No artifacts found in {artifacts_path}, skipping.')
62
63 def dict(self):
64 return {
65 'name': self.name(),
66 'repository': self.repository(),
67 'ref': self.ref(),
68 'sha': self.git_repository().sha()
69 }
70
[end of tools/bundle-build/lib/component.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/bundle-build/lib/component.py b/tools/bundle-build/lib/component.py
--- a/tools/bundle-build/lib/component.py
+++ b/tools/bundle-build/lib/component.py
@@ -21,6 +21,9 @@
def ref(self):
return self._ref
+ def artifacts(self):
+ return self._artifacts
+
def checkout(self):
self._git_repository = GitRepository(self.repository(), self.ref())
@@ -57,13 +60,29 @@
if os.path.exists(artifacts_path):
print(f'Publishing artifacts from {artifacts_path} into {dest} ...')
self.git_repository().execute(f'cp -r "{artifacts_path}/"* "{dest}"')
+ self.set_artifacts()
else:
print(f'No artifacts found in {artifacts_path}, skipping.')
+ def set_artifacts(self):
+ self._artifacts = {key: self.file_paths(key) for key in ["maven", "plugins", "bundle", "libs"] if self.file_paths(key)}
+
+ def file_paths(self, dir_name):
+ artifacts_path = self.artifacts_path()
+ sub_dir = os.path.join(artifacts_path, dir_name)
+ file_paths = []
+ if os.path.exists(sub_dir):
+ for dir, dirs, files in os.walk(sub_dir):
+ for file_name in files:
+ path = os.path.relpath(os.path.join(dir, file_name), artifacts_path)
+ file_paths.append(path)
+ return file_paths
+
def dict(self):
return {
'name': self.name(),
'repository': self.repository(),
'ref': self.ref(),
- 'sha': self.git_repository().sha()
+ 'sha': self.git_repository().sha(),
+ 'artifacts': self.artifacts()
}
| {"golden_diff": "diff --git a/tools/bundle-build/lib/component.py b/tools/bundle-build/lib/component.py\n--- a/tools/bundle-build/lib/component.py\n+++ b/tools/bundle-build/lib/component.py\n@@ -21,6 +21,9 @@\n def ref(self):\n return self._ref\n \n+ def artifacts(self):\n+ return self._artifacts\n+\n def checkout(self):\n self._git_repository = GitRepository(self.repository(), self.ref())\n \n@@ -57,13 +60,29 @@\n if os.path.exists(artifacts_path):\n print(f'Publishing artifacts from {artifacts_path} into {dest} ...')\n self.git_repository().execute(f'cp -r \"{artifacts_path}/\"* \"{dest}\"')\n+ self.set_artifacts()\n else:\n print(f'No artifacts found in {artifacts_path}, skipping.')\n \n+ def set_artifacts(self):\n+ self._artifacts = {key: self.file_paths(key) for key in [\"maven\", \"plugins\", \"bundle\", \"libs\"] if self.file_paths(key)}\n+\n+ def file_paths(self, dir_name):\n+ artifacts_path = self.artifacts_path()\n+ sub_dir = os.path.join(artifacts_path, dir_name)\n+ file_paths = []\n+ if os.path.exists(sub_dir):\n+ for dir, dirs, files in os.walk(sub_dir):\n+ for file_name in files:\n+ path = os.path.relpath(os.path.join(dir, file_name), artifacts_path)\n+ file_paths.append(path)\n+ return file_paths\n+\n def dict(self):\n return {\n 'name': self.name(),\n 'repository': self.repository(),\n 'ref': self.ref(),\n- 'sha': self.git_repository().sha()\n+ 'sha': self.git_repository().sha(),\n+ 'artifacts': self.artifacts()\n }\n", "issue": "Script to build each component from manifest and assemble bundle.\nThis script should read a manifest and output all artifacts ready for upload.\r\nExample.\r\n/build/opensearch-dashboards-min-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz <- min bundle\r\n/build/opensearch-dashboards-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz. <-- full bundle\r\n/build/opensearch-sql-1.0.0.0-rc1.zip\r\n/build/org/opensearch... <- maven artifacts\r\n\r\nInput: to the script should be a manifest file location - format defined here #111 \r\nOutput: all required artifacts are written to ./build\r\n\r\n- [x] Clone each component repository defined in the manifest\r\n- [x] Build each component. This includes plugin zips and maven artifacts and place under a new folder with build id. Note: We need to know if the version of a particular component is already published to maven central. If this is the case we do not need to rebuild and include that artifact.\r\n- [ ] Assemble the bundle itself and add to the /build directory. This is dependent on being able to write manifests - #134 \r\n\r\nTo make assembling maven artifacts easier, each repo can be published to maven local and copied from that location into /build. All artifacts will be under org/opensearch.\r\n\n", "before_files": [{"content": "import os\nimport tempfile\nimport subprocess\nfrom lib.git import GitRepository\n\nclass Component:\n def __init__(self, data):\n self._name = data['name']\n self._repository = data['repository']\n self._ref = data['ref']\n\n def name(self):\n return self._name\n\n def repository(self):\n return self._repository\n\n def git_repository(self):\n return self._git_repository\n\n def ref(self):\n return self._ref\n\n def checkout(self):\n self._git_repository = GitRepository(self.repository(), self.ref())\n\n # script overridden in this repo\n def custom_component_script_path(self):\n dirname = os.path.dirname(os.path.abspath(__file__)) \n return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/components', self.name(), 'build.sh'))\n\n # script inside the component repo\n def component_script_path(self):\n dirname = self.git_repository().dir() \n return os.path.realpath(os.path.join(dirname, 'scripts/build.sh'))\n\n # default gradle script\n def default_script_path(self):\n dirname = os.path.dirname(os.path.abspath(__file__)) \n return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/standard-gradle-build/build.sh'))\n\n def build_script(self):\n paths = [self.component_script_path(), self.custom_component_script_path(), self.default_script_path()]\n return next(filter(lambda path: os.path.exists(path), paths), None)\n\n def build(self, version, arch):\n build_script = f'{self.build_script()} {version} {arch}' \n print(f'Running {build_script} ...')\n self.git_repository().execute(build_script)\n\n def artifacts_path(self):\n dirname = self.git_repository().dir()\n return os.path.realpath(os.path.join(dirname, 'artifacts'))\n\n def export(self, dest):\n artifacts_path = self.artifacts_path()\n if os.path.exists(artifacts_path):\n print(f'Publishing artifacts from {artifacts_path} into {dest} ...')\n self.git_repository().execute(f'cp -r \"{artifacts_path}/\"* \"{dest}\"')\n else:\n print(f'No artifacts found in {artifacts_path}, skipping.')\n\n def dict(self):\n return {\n 'name': self.name(),\n 'repository': self.repository(),\n 'ref': self.ref(),\n 'sha': self.git_repository().sha()\n }\n", "path": "tools/bundle-build/lib/component.py"}]} | 1,490 | 404 |
gh_patches_debug_7646 | rasdani/github-patches | git_diff | lnbits__lnbits-194 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LNURLp links give errors on WalletofSatoshi and BlueWallet
Using this LNURLp link: https://lnbits.com/lnurlp/212
BlueWallet Error: "Alert: Bad response from server"
Wallet of Satoshi Error: "Error: Could not complete payment, please try again."
</issue>
<code>
[start of lnbits/extensions/lnurlp/views_api.py]
1 from quart import g, jsonify, request
2 from http import HTTPStatus
3 from lnurl.exceptions import InvalidUrl as LnurlInvalidUrl # type: ignore
4
5 from lnbits.core.crud import get_user
6 from lnbits.decorators import api_check_wallet_key, api_validate_post_request
7 from lnbits.utils.exchange_rates import currencies, get_fiat_rate_satoshis
8
9 from . import lnurlp_ext
10 from .crud import (
11 create_pay_link,
12 get_pay_link,
13 get_pay_links,
14 update_pay_link,
15 delete_pay_link,
16 )
17
18
19 @lnurlp_ext.route("/api/v1/currencies", methods=["GET"])
20 async def api_list_currencies_available():
21 return jsonify(list(currencies.keys()))
22
23
24 @lnurlp_ext.route("/api/v1/links", methods=["GET"])
25 @api_check_wallet_key("invoice")
26 async def api_links():
27 wallet_ids = [g.wallet.id]
28
29 if "all_wallets" in request.args:
30 wallet_ids = (await get_user(g.wallet.user)).wallet_ids
31
32 try:
33 return (
34 jsonify(
35 [
36 {**link._asdict(), **{"lnurl": link.lnurl}}
37 for link in await get_pay_links(wallet_ids)
38 ]
39 ),
40 HTTPStatus.OK,
41 )
42 except LnurlInvalidUrl:
43 return (
44 jsonify(
45 {
46 "message": "LNURLs need to be delivered over a publically accessible `https` domain or Tor."
47 }
48 ),
49 HTTPStatus.UPGRADE_REQUIRED,
50 )
51
52
53 @lnurlp_ext.route("/api/v1/links/<link_id>", methods=["GET"])
54 @api_check_wallet_key("invoice")
55 async def api_link_retrieve(link_id):
56 link = await get_pay_link(link_id)
57
58 if not link:
59 return jsonify({"message": "Pay link does not exist."}), HTTPStatus.NOT_FOUND
60
61 if link.wallet != g.wallet.id:
62 return jsonify({"message": "Not your pay link."}), HTTPStatus.FORBIDDEN
63
64 return jsonify({**link._asdict(), **{"lnurl": link.lnurl}}), HTTPStatus.OK
65
66
67 @lnurlp_ext.route("/api/v1/links", methods=["POST"])
68 @lnurlp_ext.route("/api/v1/links/<link_id>", methods=["PUT"])
69 @api_check_wallet_key("invoice")
70 @api_validate_post_request(
71 schema={
72 "description": {"type": "string", "empty": False, "required": True},
73 "min": {"type": "number", "min": 0.01, "required": True},
74 "max": {"type": "number", "min": 0.01, "required": True},
75 "currency": {"type": "string", "nullable": True, "required": False},
76 "comment_chars": {"type": "integer", "required": True, "min": 0, "max": 800},
77 "webhook_url": {"type": "string", "required": False},
78 "success_text": {"type": "string", "required": False},
79 "success_url": {"type": "string", "required": False},
80 }
81 )
82 async def api_link_create_or_update(link_id=None):
83 if g.data["min"] > g.data["max"]:
84 return jsonify({"message": "Min is greater than max."}), HTTPStatus.BAD_REQUEST
85
86 if g.data.get("currency") == None and (
87 round(g.data["min"]) != g.data["min"] or round(g.data["max"]) != g.data["max"]
88 ):
89 return jsonify({"message": "Must use full satoshis."}), HTTPStatus.BAD_REQUEST
90
91 if link_id:
92 link = await get_pay_link(link_id)
93
94 if not link:
95 return (
96 jsonify({"message": "Pay link does not exist."}),
97 HTTPStatus.NOT_FOUND,
98 )
99
100 if link.wallet != g.wallet.id:
101 return jsonify({"message": "Not your pay link."}), HTTPStatus.FORBIDDEN
102
103 link = await update_pay_link(link_id, **g.data)
104 else:
105 link = await create_pay_link(wallet_id=g.wallet.id, **g.data)
106
107 return (
108 jsonify({**link._asdict(), **{"lnurl": link.lnurl}}),
109 HTTPStatus.OK if link_id else HTTPStatus.CREATED,
110 )
111
112
113 @lnurlp_ext.route("/api/v1/links/<link_id>", methods=["DELETE"])
114 @api_check_wallet_key("invoice")
115 async def api_link_delete(link_id):
116 link = await get_pay_link(link_id)
117
118 if not link:
119 return jsonify({"message": "Pay link does not exist."}), HTTPStatus.NOT_FOUND
120
121 if link.wallet != g.wallet.id:
122 return jsonify({"message": "Not your pay link."}), HTTPStatus.FORBIDDEN
123
124 await delete_pay_link(link_id)
125
126 return "", HTTPStatus.NO_CONTENT
127
128
129 @lnurlp_ext.route("/api/v1/rate/<currency>", methods=["GET"])
130 async def api_check_fiat_rate(currency):
131 try:
132 rate = await get_fiat_rate_satoshis(currency)
133 except AssertionError:
134 rate = None
135
136 return jsonify({"rate": rate}), HTTPStatus.OK
137
[end of lnbits/extensions/lnurlp/views_api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lnbits/extensions/lnurlp/views_api.py b/lnbits/extensions/lnurlp/views_api.py
--- a/lnbits/extensions/lnurlp/views_api.py
+++ b/lnbits/extensions/lnurlp/views_api.py
@@ -87,6 +87,9 @@
round(g.data["min"]) != g.data["min"] or round(g.data["max"]) != g.data["max"]
):
return jsonify({"message": "Must use full satoshis."}), HTTPStatus.BAD_REQUEST
+
+ if g.data["success_url"][:8] != "https://":
+ return jsonify({"message": "Success URL must be secure https://..."}), HTTPStatus.BAD_REQUEST
if link_id:
link = await get_pay_link(link_id)
| {"golden_diff": "diff --git a/lnbits/extensions/lnurlp/views_api.py b/lnbits/extensions/lnurlp/views_api.py\n--- a/lnbits/extensions/lnurlp/views_api.py\n+++ b/lnbits/extensions/lnurlp/views_api.py\n@@ -87,6 +87,9 @@\n round(g.data[\"min\"]) != g.data[\"min\"] or round(g.data[\"max\"]) != g.data[\"max\"]\n ):\n return jsonify({\"message\": \"Must use full satoshis.\"}), HTTPStatus.BAD_REQUEST\n+ \n+ if g.data[\"success_url\"][:8] != \"https://\":\n+ return jsonify({\"message\": \"Success URL must be secure https://...\"}), HTTPStatus.BAD_REQUEST\n \n if link_id:\n link = await get_pay_link(link_id)\n", "issue": "LNURLp links give errors on WalletofSatoshi and BlueWallet\nUsing this LNURLp link: https://lnbits.com/lnurlp/212\r\n\r\nBlueWallet Error: \"Alert: Bad response from server\"\r\nWallet of Satoshi Error: \"Error: Could not complete payment, please try again.\"\n", "before_files": [{"content": "from quart import g, jsonify, request\nfrom http import HTTPStatus\nfrom lnurl.exceptions import InvalidUrl as LnurlInvalidUrl # type: ignore\n\nfrom lnbits.core.crud import get_user\nfrom lnbits.decorators import api_check_wallet_key, api_validate_post_request\nfrom lnbits.utils.exchange_rates import currencies, get_fiat_rate_satoshis\n\nfrom . import lnurlp_ext\nfrom .crud import (\n create_pay_link,\n get_pay_link,\n get_pay_links,\n update_pay_link,\n delete_pay_link,\n)\n\n\n@lnurlp_ext.route(\"/api/v1/currencies\", methods=[\"GET\"])\nasync def api_list_currencies_available():\n return jsonify(list(currencies.keys()))\n\n\n@lnurlp_ext.route(\"/api/v1/links\", methods=[\"GET\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_links():\n wallet_ids = [g.wallet.id]\n\n if \"all_wallets\" in request.args:\n wallet_ids = (await get_user(g.wallet.user)).wallet_ids\n\n try:\n return (\n jsonify(\n [\n {**link._asdict(), **{\"lnurl\": link.lnurl}}\n for link in await get_pay_links(wallet_ids)\n ]\n ),\n HTTPStatus.OK,\n )\n except LnurlInvalidUrl:\n return (\n jsonify(\n {\n \"message\": \"LNURLs need to be delivered over a publically accessible `https` domain or Tor.\"\n }\n ),\n HTTPStatus.UPGRADE_REQUIRED,\n )\n\n\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"GET\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_link_retrieve(link_id):\n link = await get_pay_link(link_id)\n\n if not link:\n return jsonify({\"message\": \"Pay link does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n return jsonify({**link._asdict(), **{\"lnurl\": link.lnurl}}), HTTPStatus.OK\n\n\n@lnurlp_ext.route(\"/api/v1/links\", methods=[\"POST\"])\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"PUT\"])\n@api_check_wallet_key(\"invoice\")\n@api_validate_post_request(\n schema={\n \"description\": {\"type\": \"string\", \"empty\": False, \"required\": True},\n \"min\": {\"type\": \"number\", \"min\": 0.01, \"required\": True},\n \"max\": {\"type\": \"number\", \"min\": 0.01, \"required\": True},\n \"currency\": {\"type\": \"string\", \"nullable\": True, \"required\": False},\n \"comment_chars\": {\"type\": \"integer\", \"required\": True, \"min\": 0, \"max\": 800},\n \"webhook_url\": {\"type\": \"string\", \"required\": False},\n \"success_text\": {\"type\": \"string\", \"required\": False},\n \"success_url\": {\"type\": \"string\", \"required\": False},\n }\n)\nasync def api_link_create_or_update(link_id=None):\n if g.data[\"min\"] > g.data[\"max\"]:\n return jsonify({\"message\": \"Min is greater than max.\"}), HTTPStatus.BAD_REQUEST\n\n if g.data.get(\"currency\") == None and (\n round(g.data[\"min\"]) != g.data[\"min\"] or round(g.data[\"max\"]) != g.data[\"max\"]\n ):\n return jsonify({\"message\": \"Must use full satoshis.\"}), HTTPStatus.BAD_REQUEST\n\n if link_id:\n link = await get_pay_link(link_id)\n\n if not link:\n return (\n jsonify({\"message\": \"Pay link does not exist.\"}),\n HTTPStatus.NOT_FOUND,\n )\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n link = await update_pay_link(link_id, **g.data)\n else:\n link = await create_pay_link(wallet_id=g.wallet.id, **g.data)\n\n return (\n jsonify({**link._asdict(), **{\"lnurl\": link.lnurl}}),\n HTTPStatus.OK if link_id else HTTPStatus.CREATED,\n )\n\n\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"DELETE\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_link_delete(link_id):\n link = await get_pay_link(link_id)\n\n if not link:\n return jsonify({\"message\": \"Pay link does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n await delete_pay_link(link_id)\n\n return \"\", HTTPStatus.NO_CONTENT\n\n\n@lnurlp_ext.route(\"/api/v1/rate/<currency>\", methods=[\"GET\"])\nasync def api_check_fiat_rate(currency):\n try:\n rate = await get_fiat_rate_satoshis(currency)\n except AssertionError:\n rate = None\n\n return jsonify({\"rate\": rate}), HTTPStatus.OK\n", "path": "lnbits/extensions/lnurlp/views_api.py"}]} | 2,035 | 174 |
gh_patches_debug_20567 | rasdani/github-patches | git_diff | pantsbuild__pants-13467 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pants package does not build missing docker images if previous build was cached.
**Describe the bug**
Pant's caching of build targets does not take into consideration that the final target does not exist.
Take this example: https://www.pantsbuild.org/v2.8/docs/docker#example
```
$ ./pants package src/docker/hw/Dockerfile
[...]
18:07:29.66 [INFO] Completed: Building src.python.hw/bin.pex
18:07:31.83 [INFO] Completed: Building docker image helloworld:latest
18:07:31.83 [INFO] Built docker image: helloworld:latest
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
helloworld latest abcdefabcdef 6 seconds ago 420MB
$ docker rmi helloworld:latest
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
$ ./pants package src/docker/hw/Dockerfile
19:07:31.83 [INFO] Built docker image: helloworld:latest
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
```
If you did the equivalent commands for the `helloworld.pex` files, `pants package` would replace the missing file in the `dist/` folder.
**Pants version**
2.8rc1
**OS**
Linux
</issue>
<code>
[start of src/python/pants/backend/docker/util_rules/docker_binary.py]
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from dataclasses import dataclass
7 from typing import Mapping
8
9 from pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs
10 from pants.engine.fs import Digest
11 from pants.engine.process import (
12 BinaryNotFoundError,
13 BinaryPath,
14 BinaryPathRequest,
15 BinaryPaths,
16 BinaryPathTest,
17 Process,
18 SearchPath,
19 )
20 from pants.engine.rules import Get, collect_rules, rule
21 from pants.util.logging import LogLevel
22 from pants.util.strutil import pluralize
23
24
25 class DockerBinary(BinaryPath):
26 """The `docker` binary."""
27
28 DEFAULT_SEARCH_PATH = SearchPath(("/usr/bin", "/bin", "/usr/local/bin"))
29
30 def build_image(
31 self,
32 tags: tuple[str, ...],
33 digest: Digest,
34 dockerfile: str | None = None,
35 build_args: DockerBuildArgs | None = None,
36 env: Mapping[str, str] | None = None,
37 ) -> Process:
38 args = [self.path, "build"]
39
40 for tag in tags:
41 args.extend(["-t", tag])
42
43 if build_args:
44 for build_arg in build_args:
45 args.extend(["--build-arg", build_arg])
46
47 if dockerfile:
48 args.extend(["-f", dockerfile])
49
50 # Add build context root.
51 args.append(".")
52
53 return Process(
54 argv=tuple(args),
55 description=(
56 f"Building docker image {tags[0]}"
57 + (f" +{pluralize(len(tags)-1, 'additional tag')}." if len(tags) > 1 else ".")
58 ),
59 env=env,
60 input_digest=digest,
61 )
62
63 def push_image(self, tags: tuple[str, ...]) -> Process | None:
64 if not tags:
65 return None
66
67 return Process(
68 argv=(self.path, "push", *tags), description="Pushing docker image {tags[0]}"
69 )
70
71
72 @dataclass(frozen=True)
73 class DockerBinaryRequest:
74 search_path: SearchPath = DockerBinary.DEFAULT_SEARCH_PATH
75
76
77 @rule(desc="Finding the `docker` binary", level=LogLevel.DEBUG)
78 async def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary:
79 request = BinaryPathRequest(
80 binary_name="docker",
81 search_path=docker_request.search_path,
82 test=BinaryPathTest(args=["-v"]),
83 )
84 paths = await Get(BinaryPaths, BinaryPathRequest, request)
85 first_path = paths.first_path
86 if not first_path:
87 raise BinaryNotFoundError.from_request(request, rationale="interact with the docker daemon")
88 return DockerBinary(first_path.path, first_path.fingerprint)
89
90
91 @rule
92 async def get_docker() -> DockerBinary:
93 return await Get(DockerBinary, DockerBinaryRequest())
94
95
96 def rules():
97 return collect_rules()
98
[end of src/python/pants/backend/docker/util_rules/docker_binary.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/backend/docker/util_rules/docker_binary.py b/src/python/pants/backend/docker/util_rules/docker_binary.py
--- a/src/python/pants/backend/docker/util_rules/docker_binary.py
+++ b/src/python/pants/backend/docker/util_rules/docker_binary.py
@@ -15,6 +15,7 @@
BinaryPaths,
BinaryPathTest,
Process,
+ ProcessCacheScope,
SearchPath,
)
from pants.engine.rules import Get, collect_rules, rule
@@ -58,6 +59,7 @@
),
env=env,
input_digest=digest,
+ cache_scope=ProcessCacheScope.PER_SESSION,
)
def push_image(self, tags: tuple[str, ...]) -> Process | None:
@@ -65,7 +67,9 @@
return None
return Process(
- argv=(self.path, "push", *tags), description="Pushing docker image {tags[0]}"
+ argv=(self.path, "push", *tags),
+ cache_scope=ProcessCacheScope.PER_SESSION,
+ description=f"Pushing docker image {tags[0]}",
)
| {"golden_diff": "diff --git a/src/python/pants/backend/docker/util_rules/docker_binary.py b/src/python/pants/backend/docker/util_rules/docker_binary.py\n--- a/src/python/pants/backend/docker/util_rules/docker_binary.py\n+++ b/src/python/pants/backend/docker/util_rules/docker_binary.py\n@@ -15,6 +15,7 @@\n BinaryPaths,\n BinaryPathTest,\n Process,\n+ ProcessCacheScope,\n SearchPath,\n )\n from pants.engine.rules import Get, collect_rules, rule\n@@ -58,6 +59,7 @@\n ),\n env=env,\n input_digest=digest,\n+ cache_scope=ProcessCacheScope.PER_SESSION,\n )\n \n def push_image(self, tags: tuple[str, ...]) -> Process | None:\n@@ -65,7 +67,9 @@\n return None\n \n return Process(\n- argv=(self.path, \"push\", *tags), description=\"Pushing docker image {tags[0]}\"\n+ argv=(self.path, \"push\", *tags),\n+ cache_scope=ProcessCacheScope.PER_SESSION,\n+ description=f\"Pushing docker image {tags[0]}\",\n )\n", "issue": "pants package does not build missing docker images if previous build was cached.\n**Describe the bug**\r\nPant's caching of build targets does not take into consideration that the final target does not exist.\r\n\r\nTake this example: https://www.pantsbuild.org/v2.8/docs/docker#example\r\n\r\n```\r\n$ ./pants package src/docker/hw/Dockerfile\r\n[...]\r\n18:07:29.66 [INFO] Completed: Building src.python.hw/bin.pex\r\n18:07:31.83 [INFO] Completed: Building docker image helloworld:latest\r\n18:07:31.83 [INFO] Built docker image: helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\nhelloworld latest abcdefabcdef 6 seconds ago 420MB\r\n\r\n$ docker rmi helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\n\r\n$ ./pants package src/docker/hw/Dockerfile\r\n19:07:31.83 [INFO] Built docker image: helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\n```\r\nIf you did the equivalent commands for the `helloworld.pex` files, `pants package` would replace the missing file in the `dist/` folder.\r\n\r\n**Pants version**\r\n2.8rc1\r\n\r\n**OS**\r\nLinux\r\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Mapping\n\nfrom pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs\nfrom pants.engine.fs import Digest\nfrom pants.engine.process import (\n BinaryNotFoundError,\n BinaryPath,\n BinaryPathRequest,\n BinaryPaths,\n BinaryPathTest,\n Process,\n SearchPath,\n)\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\nclass DockerBinary(BinaryPath):\n \"\"\"The `docker` binary.\"\"\"\n\n DEFAULT_SEARCH_PATH = SearchPath((\"/usr/bin\", \"/bin\", \"/usr/local/bin\"))\n\n def build_image(\n self,\n tags: tuple[str, ...],\n digest: Digest,\n dockerfile: str | None = None,\n build_args: DockerBuildArgs | None = None,\n env: Mapping[str, str] | None = None,\n ) -> Process:\n args = [self.path, \"build\"]\n\n for tag in tags:\n args.extend([\"-t\", tag])\n\n if build_args:\n for build_arg in build_args:\n args.extend([\"--build-arg\", build_arg])\n\n if dockerfile:\n args.extend([\"-f\", dockerfile])\n\n # Add build context root.\n args.append(\".\")\n\n return Process(\n argv=tuple(args),\n description=(\n f\"Building docker image {tags[0]}\"\n + (f\" +{pluralize(len(tags)-1, 'additional tag')}.\" if len(tags) > 1 else \".\")\n ),\n env=env,\n input_digest=digest,\n )\n\n def push_image(self, tags: tuple[str, ...]) -> Process | None:\n if not tags:\n return None\n\n return Process(\n argv=(self.path, \"push\", *tags), description=\"Pushing docker image {tags[0]}\"\n )\n\n\n@dataclass(frozen=True)\nclass DockerBinaryRequest:\n search_path: SearchPath = DockerBinary.DEFAULT_SEARCH_PATH\n\n\n@rule(desc=\"Finding the `docker` binary\", level=LogLevel.DEBUG)\nasync def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary:\n request = BinaryPathRequest(\n binary_name=\"docker\",\n search_path=docker_request.search_path,\n test=BinaryPathTest(args=[\"-v\"]),\n )\n paths = await Get(BinaryPaths, BinaryPathRequest, request)\n first_path = paths.first_path\n if not first_path:\n raise BinaryNotFoundError.from_request(request, rationale=\"interact with the docker daemon\")\n return DockerBinary(first_path.path, first_path.fingerprint)\n\n\n@rule\nasync def get_docker() -> DockerBinary:\n return await Get(DockerBinary, DockerBinaryRequest())\n\n\ndef rules():\n return collect_rules()\n", "path": "src/python/pants/backend/docker/util_rules/docker_binary.py"}]} | 1,683 | 246 |
gh_patches_debug_23656 | rasdani/github-patches | git_diff | OpenMined__PySyft-4991 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Does the framework support IPv6 networks?
Is this framework suitable for IPv6 network environment?
</issue>
<code>
[start of src/syft/grid/example_nodes/network.py]
1 """
2 The purpose of this application is to allow us to dev and test PySyft
3 functionality on an actual local network. This is NOT meant to be run in
4 production (that's the *actual* grid's job).
5
6 For example:
7 $ python src/syft/grid/example_nodes/network.py
8
9 """
10 # stdlib
11 import os
12
13 # third party
14 import flask
15 from flask import Flask
16 from flask import Response
17 from nacl.encoding import HexEncoder
18
19 # syft absolute
20 from syft.core.common.message import SignedImmediateSyftMessageWithReply
21 from syft.core.common.message import SignedImmediateSyftMessageWithoutReply
22 from syft.core.common.serde.deserialize import _deserialize
23 from syft.core.node.network.network import Network
24 from syft.grid.services.signaling_service import PullSignalingService
25 from syft.grid.services.signaling_service import PushSignalingService
26 from syft.grid.services.signaling_service import RegisterDuetPeerService
27
28 app = Flask(__name__)
29
30 network = Network(name="om-net")
31
32 network.immediate_services_without_reply.append(PushSignalingService)
33 network.immediate_services_with_reply.append(PullSignalingService)
34 network.immediate_services_with_reply.append(RegisterDuetPeerService)
35 network._register_services() # re-register all services including SignalingService
36
37
38 @app.route("/metadata")
39 def get_metadata() -> flask.Response:
40 metadata = network.get_metadata_for_client()
41 metadata_proto = metadata.serialize()
42 r = Response(
43 response=metadata_proto.SerializeToString(),
44 status=200,
45 )
46 r.headers["Content-Type"] = "application/octet-stream"
47 return r
48
49
50 @app.route("/", methods=["POST"])
51 def process_network_msgs() -> flask.Response:
52 data = flask.request.get_data()
53 obj_msg = _deserialize(blob=data, from_bytes=True)
54 if isinstance(obj_msg, SignedImmediateSyftMessageWithReply):
55 print(
56 f"Signaling server SignedImmediateSyftMessageWithReply: {obj_msg.message} watch"
57 )
58 reply = network.recv_immediate_msg_with_reply(msg=obj_msg)
59 r = Response(response=reply.serialize(to_bytes=True), status=200)
60 r.headers["Content-Type"] = "application/octet-stream"
61 return r
62 elif isinstance(obj_msg, SignedImmediateSyftMessageWithoutReply):
63 print(
64 f"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch"
65 )
66 network.recv_immediate_msg_without_reply(msg=obj_msg)
67 r = Response(status=200)
68 return r
69 else:
70 print(
71 f"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch"
72 )
73 network.recv_eventual_msg_without_reply(msg=obj_msg)
74 r = Response(status=200)
75 return r
76
77
78 def run() -> None:
79 global network
80 print("====================================")
81 print("========== NODE ROOT KEY ===========")
82 print("====================================")
83 # this signing_key is to aid in local development and is not used in the real
84 # PyGrid implementation
85 PORT = os.getenv("PORT", 5000)
86 print(f"Starting Node on PORT: {PORT}")
87 print(network.signing_key.encode(encoder=HexEncoder).decode("utf-8"), "\n")
88 app.run(host="0.0.0.0", port=int(PORT)) # nosec
89
90
91 run()
92
[end of src/syft/grid/example_nodes/network.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/syft/grid/example_nodes/network.py b/src/syft/grid/example_nodes/network.py
--- a/src/syft/grid/example_nodes/network.py
+++ b/src/syft/grid/example_nodes/network.py
@@ -9,6 +9,7 @@
"""
# stdlib
import os
+import sys
# third party
import flask
@@ -77,15 +78,25 @@
def run() -> None:
global network
- print("====================================")
- print("========== NODE ROOT KEY ===========")
- print("====================================")
+
+ IP_MODE = os.getenv("IP_MODE", "IPV4") # default to ipv4
+ if len(sys.argv) > 1:
+ IP_MODE = sys.argv[1]
+
+ IP_MODE = "IPV6" if IP_MODE == "IPV6" else "IPV4"
# this signing_key is to aid in local development and is not used in the real
# PyGrid implementation
+ HOST = "0.0.0.0" if IP_MODE == "IPV4" else "::" # nosec
PORT = os.getenv("PORT", 5000)
- print(f"Starting Node on PORT: {PORT}")
+
+ print("====================================")
+ print("========== NODE ROOT KEY ===========")
+ print("====================================")
print(network.signing_key.encode(encoder=HexEncoder).decode("utf-8"), "\n")
- app.run(host="0.0.0.0", port=int(PORT)) # nosec
+
+ print(f"Using {IP_MODE} and listening on port {PORT}")
+
+ app.run(host=HOST, port=int(PORT))
run()
| {"golden_diff": "diff --git a/src/syft/grid/example_nodes/network.py b/src/syft/grid/example_nodes/network.py\n--- a/src/syft/grid/example_nodes/network.py\n+++ b/src/syft/grid/example_nodes/network.py\n@@ -9,6 +9,7 @@\n \"\"\"\n # stdlib\n import os\n+import sys\n \n # third party\n import flask\n@@ -77,15 +78,25 @@\n \n def run() -> None:\n global network\n- print(\"====================================\")\n- print(\"========== NODE ROOT KEY ===========\")\n- print(\"====================================\")\n+\n+ IP_MODE = os.getenv(\"IP_MODE\", \"IPV4\") # default to ipv4\n+ if len(sys.argv) > 1:\n+ IP_MODE = sys.argv[1]\n+\n+ IP_MODE = \"IPV6\" if IP_MODE == \"IPV6\" else \"IPV4\"\n # this signing_key is to aid in local development and is not used in the real\n # PyGrid implementation\n+ HOST = \"0.0.0.0\" if IP_MODE == \"IPV4\" else \"::\" # nosec\n PORT = os.getenv(\"PORT\", 5000)\n- print(f\"Starting Node on PORT: {PORT}\")\n+\n+ print(\"====================================\")\n+ print(\"========== NODE ROOT KEY ===========\")\n+ print(\"====================================\")\n print(network.signing_key.encode(encoder=HexEncoder).decode(\"utf-8\"), \"\\n\")\n- app.run(host=\"0.0.0.0\", port=int(PORT)) # nosec\n+\n+ print(f\"Using {IP_MODE} and listening on port {PORT}\")\n+\n+ app.run(host=HOST, port=int(PORT))\n \n \n run()\n", "issue": "Does the framework support IPv6 networks?\nIs this framework suitable for IPv6 network environment? \n", "before_files": [{"content": "\"\"\"\nThe purpose of this application is to allow us to dev and test PySyft\nfunctionality on an actual local network. This is NOT meant to be run in\nproduction (that's the *actual* grid's job).\n\nFor example:\n$ python src/syft/grid/example_nodes/network.py\n\n\"\"\"\n# stdlib\nimport os\n\n# third party\nimport flask\nfrom flask import Flask\nfrom flask import Response\nfrom nacl.encoding import HexEncoder\n\n# syft absolute\nfrom syft.core.common.message import SignedImmediateSyftMessageWithReply\nfrom syft.core.common.message import SignedImmediateSyftMessageWithoutReply\nfrom syft.core.common.serde.deserialize import _deserialize\nfrom syft.core.node.network.network import Network\nfrom syft.grid.services.signaling_service import PullSignalingService\nfrom syft.grid.services.signaling_service import PushSignalingService\nfrom syft.grid.services.signaling_service import RegisterDuetPeerService\n\napp = Flask(__name__)\n\nnetwork = Network(name=\"om-net\")\n\nnetwork.immediate_services_without_reply.append(PushSignalingService)\nnetwork.immediate_services_with_reply.append(PullSignalingService)\nnetwork.immediate_services_with_reply.append(RegisterDuetPeerService)\nnetwork._register_services() # re-register all services including SignalingService\n\n\[email protected](\"/metadata\")\ndef get_metadata() -> flask.Response:\n metadata = network.get_metadata_for_client()\n metadata_proto = metadata.serialize()\n r = Response(\n response=metadata_proto.SerializeToString(),\n status=200,\n )\n r.headers[\"Content-Type\"] = \"application/octet-stream\"\n return r\n\n\[email protected](\"/\", methods=[\"POST\"])\ndef process_network_msgs() -> flask.Response:\n data = flask.request.get_data()\n obj_msg = _deserialize(blob=data, from_bytes=True)\n if isinstance(obj_msg, SignedImmediateSyftMessageWithReply):\n print(\n f\"Signaling server SignedImmediateSyftMessageWithReply: {obj_msg.message} watch\"\n )\n reply = network.recv_immediate_msg_with_reply(msg=obj_msg)\n r = Response(response=reply.serialize(to_bytes=True), status=200)\n r.headers[\"Content-Type\"] = \"application/octet-stream\"\n return r\n elif isinstance(obj_msg, SignedImmediateSyftMessageWithoutReply):\n print(\n f\"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch\"\n )\n network.recv_immediate_msg_without_reply(msg=obj_msg)\n r = Response(status=200)\n return r\n else:\n print(\n f\"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch\"\n )\n network.recv_eventual_msg_without_reply(msg=obj_msg)\n r = Response(status=200)\n return r\n\n\ndef run() -> None:\n global network\n print(\"====================================\")\n print(\"========== NODE ROOT KEY ===========\")\n print(\"====================================\")\n # this signing_key is to aid in local development and is not used in the real\n # PyGrid implementation\n PORT = os.getenv(\"PORT\", 5000)\n print(f\"Starting Node on PORT: {PORT}\")\n print(network.signing_key.encode(encoder=HexEncoder).decode(\"utf-8\"), \"\\n\")\n app.run(host=\"0.0.0.0\", port=int(PORT)) # nosec\n\n\nrun()\n", "path": "src/syft/grid/example_nodes/network.py"}]} | 1,461 | 385 |
gh_patches_debug_19544 | rasdani/github-patches | git_diff | mabel-dev__opteryx-1688 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
🪲 [CI] MyPy test failure
### Thank you for taking the time to report a problem with Opteryx.
_To help us to respond to your request we ask that you try to provide the below detail about the bug._
**Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._
**Expected behaviour** _A clear and concise description of what you expected to happen._
**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._
~~~sql
~~~
**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._
</issue>
<code>
[start of opteryx/planner/views/__init__.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import orjson
14
15 from opteryx.managers.expression import NodeType
16 from opteryx.third_party.travers import Graph
17
18
19 def _load_views():
20 try:
21 with open("views.json", "rb") as defs:
22 return orjson.loads(defs.read())
23 except Exception as err:
24 print(f"[OPTERYX] Unable to open views definition file. {err}")
25 return {}
26
27
28 VIEWS = _load_views()
29
30
31 def is_view(view_name: str) -> bool:
32 return view_name in VIEWS
33
34
35 def view_as_plan(view_name: str) -> Graph:
36 from opteryx.planner.logical_planner import do_logical_planning_phase
37 from opteryx.third_party import sqloxide
38 from opteryx.utils.sql import clean_statement
39 from opteryx.utils.sql import remove_comments
40
41 operation = VIEWS.get(view_name)["statement"]
42
43 clean_sql = clean_statement(remove_comments(operation))
44 parsed_statements = sqloxide.parse_sql(clean_sql, dialect="mysql")
45 logical_plan, _, _ = next(do_logical_planning_phase(parsed_statements))
46
47 return logical_plan
48
[end of opteryx/planner/views/__init__.py]
[start of opteryx/__version__.py]
1 __build__ = 522
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Store the version here so:
17 1) we don't load dependencies by storing it in __init__.py
18 2) we can import it in setup.py for the same reason
19 """
20 from enum import Enum # isort: skip
21
22
23 class VersionStatus(Enum):
24 ALPHA = "alpha"
25 BETA = "beta"
26 RELEASE = "release"
27
28
29 _major = 0
30 _minor = 16
31 _revision = 0
32 _status = VersionStatus.ALPHA
33
34 __author__ = "@joocer"
35 __version__ = f"{_major}.{_minor}.{_revision}" + (
36 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else ""
37 )
38
[end of opteryx/__version__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opteryx/__version__.py b/opteryx/__version__.py
--- a/opteryx/__version__.py
+++ b/opteryx/__version__.py
@@ -1,4 +1,4 @@
-__build__ = 522
+__build__ = 523
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/opteryx/planner/views/__init__.py b/opteryx/planner/views/__init__.py
--- a/opteryx/planner/views/__init__.py
+++ b/opteryx/planner/views/__init__.py
@@ -12,8 +12,7 @@
import orjson
-from opteryx.managers.expression import NodeType
-from opteryx.third_party.travers import Graph
+from opteryx.planner.logical_planner import LogicalPlan
def _load_views():
@@ -32,7 +31,7 @@
return view_name in VIEWS
-def view_as_plan(view_name: str) -> Graph:
+def view_as_plan(view_name: str) -> LogicalPlan:
from opteryx.planner.logical_planner import do_logical_planning_phase
from opteryx.third_party import sqloxide
from opteryx.utils.sql import clean_statement
| {"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 522\n+__build__ = 523\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\ndiff --git a/opteryx/planner/views/__init__.py b/opteryx/planner/views/__init__.py\n--- a/opteryx/planner/views/__init__.py\n+++ b/opteryx/planner/views/__init__.py\n@@ -12,8 +12,7 @@\n \n import orjson\n \n-from opteryx.managers.expression import NodeType\n-from opteryx.third_party.travers import Graph\n+from opteryx.planner.logical_planner import LogicalPlan\n \n \n def _load_views():\n@@ -32,7 +31,7 @@\n return view_name in VIEWS\n \n \n-def view_as_plan(view_name: str) -> Graph:\n+def view_as_plan(view_name: str) -> LogicalPlan:\n from opteryx.planner.logical_planner import do_logical_planning_phase\n from opteryx.third_party import sqloxide\n from opteryx.utils.sql import clean_statement\n", "issue": "\ud83e\udeb2 [CI] MyPy test failure\n### Thank you for taking the time to report a problem with Opteryx.\r\n_To help us to respond to your request we ask that you try to provide the below detail about the bug._\r\n\r\n**Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._\r\n\r\n\r\n**Expected behaviour** _A clear and concise description of what you expected to happen._\r\n\r\n\r\n**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._\r\n\r\n~~~sql\r\n\r\n~~~\r\n\r\n**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport orjson\n\nfrom opteryx.managers.expression import NodeType\nfrom opteryx.third_party.travers import Graph\n\n\ndef _load_views():\n try:\n with open(\"views.json\", \"rb\") as defs:\n return orjson.loads(defs.read())\n except Exception as err:\n print(f\"[OPTERYX] Unable to open views definition file. {err}\")\n return {}\n\n\nVIEWS = _load_views()\n\n\ndef is_view(view_name: str) -> bool:\n return view_name in VIEWS\n\n\ndef view_as_plan(view_name: str) -> Graph:\n from opteryx.planner.logical_planner import do_logical_planning_phase\n from opteryx.third_party import sqloxide\n from opteryx.utils.sql import clean_statement\n from opteryx.utils.sql import remove_comments\n\n operation = VIEWS.get(view_name)[\"statement\"]\n\n clean_sql = clean_statement(remove_comments(operation))\n parsed_statements = sqloxide.parse_sql(clean_sql, dialect=\"mysql\")\n logical_plan, _, _ = next(do_logical_planning_phase(parsed_statements))\n\n return logical_plan\n", "path": "opteryx/planner/views/__init__.py"}, {"content": "__build__ = 522\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 16\n_revision = 0\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}]} | 1,536 | 301 |
gh_patches_debug_37940 | rasdani/github-patches | git_diff | deepset-ai__haystack-6753 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
feat: Add split by `page` to `DocumentSplitter`
**Is your feature request related to a problem? Please describe.**
There are some cases where we would like to be able to split the contents of a PDF by page. Either to keep all text from a single page as a document to help preserve context or to be able to perform two sets of chunking (i.e. split by page, followed up by split by sentence). I would not say this is a common set up, but I believe we can straightforwardly extend the `DocumentSplitter` to have this flexibility.
**Describe the solution you'd like**
Add a new `split_by` value of `page` that would split on `"\f"`.
**Describe alternatives you've considered**
Split up the source file (e.g. a PDF) into individual pages before feeding it into the Haystack pipeline. Definitely doable, but less elegant than having the `DocumentSplitter` being able to handle this.
</issue>
<code>
[start of haystack/components/preprocessors/document_splitter.py]
1 from copy import deepcopy
2 from typing import List, Literal
3
4 from more_itertools import windowed
5
6 from haystack import component, Document
7
8
9 @component
10 class DocumentSplitter:
11 """
12 Splits a list of text documents into a list of text documents with shorter texts.
13 This is useful for splitting documents with long texts that otherwise would not fit into the maximum text length of language models.
14 """
15
16 def __init__(
17 self, split_by: Literal["word", "sentence", "passage"] = "word", split_length: int = 200, split_overlap: int = 0
18 ):
19 """
20 :param split_by: The unit by which the document should be split. Choose from "word" for splitting by " ",
21 "sentence" for splitting by ".", or "passage" for splitting by "\\n\\n".
22 :param split_length: The maximum number of units in each split.
23 :param split_overlap: The number of units that each split should overlap.
24 """
25
26 self.split_by = split_by
27 if split_by not in ["word", "sentence", "passage"]:
28 raise ValueError("split_by must be one of 'word', 'sentence' or 'passage'.")
29 if split_length <= 0:
30 raise ValueError("split_length must be greater than 0.")
31 self.split_length = split_length
32 if split_overlap < 0:
33 raise ValueError("split_overlap must be greater than or equal to 0.")
34 self.split_overlap = split_overlap
35
36 @component.output_types(documents=List[Document])
37 def run(self, documents: List[Document]):
38 """
39 Splits the documents by split_by after split_length units with an overlap of split_overlap units.
40 Returns a list of documents with the split texts.
41 A metadata field "source_id" is added to each document to keep track of the original document that was split.
42 Other metadata are copied from the original document.
43 :param documents: The documents to split.
44 :return: A list of documents with the split texts.
45 """
46
47 if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):
48 raise TypeError("DocumentSplitter expects a List of Documents as input.")
49
50 split_docs = []
51 for doc in documents:
52 if doc.content is None:
53 raise ValueError(
54 f"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None."
55 )
56 units = self._split_into_units(doc.content, self.split_by)
57 text_splits = self._concatenate_units(units, self.split_length, self.split_overlap)
58 metadata = deepcopy(doc.meta)
59 metadata["source_id"] = doc.id
60 split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]
61 return {"documents": split_docs}
62
63 def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage"]) -> List[str]:
64 if split_by == "passage":
65 split_at = "\n\n"
66 elif split_by == "sentence":
67 split_at = "."
68 elif split_by == "word":
69 split_at = " "
70 else:
71 raise NotImplementedError(
72 "DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options."
73 )
74 units = text.split(split_at)
75 # Add the delimiter back to all units except the last one
76 for i in range(len(units) - 1):
77 units[i] += split_at
78 return units
79
80 def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]:
81 """
82 Concatenates the elements into parts of split_length units.
83 """
84 text_splits = []
85 segments = windowed(elements, n=split_length, step=split_length - split_overlap)
86 for seg in segments:
87 current_units = [unit for unit in seg if unit is not None]
88 txt = "".join(current_units)
89 if len(txt) > 0:
90 text_splits.append(txt)
91 return text_splits
92
[end of haystack/components/preprocessors/document_splitter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/haystack/components/preprocessors/document_splitter.py b/haystack/components/preprocessors/document_splitter.py
--- a/haystack/components/preprocessors/document_splitter.py
+++ b/haystack/components/preprocessors/document_splitter.py
@@ -14,18 +14,21 @@
"""
def __init__(
- self, split_by: Literal["word", "sentence", "passage"] = "word", split_length: int = 200, split_overlap: int = 0
+ self,
+ split_by: Literal["word", "sentence", "page", "passage"] = "word",
+ split_length: int = 200,
+ split_overlap: int = 0,
):
"""
:param split_by: The unit by which the document should be split. Choose from "word" for splitting by " ",
- "sentence" for splitting by ".", or "passage" for splitting by "\\n\\n".
+ "sentence" for splitting by ".", "page" for splitting by "\f" or "passage" for splitting by "\\n\\n".
:param split_length: The maximum number of units in each split.
:param split_overlap: The number of units that each split should overlap.
"""
self.split_by = split_by
- if split_by not in ["word", "sentence", "passage"]:
- raise ValueError("split_by must be one of 'word', 'sentence' or 'passage'.")
+ if split_by not in ["word", "sentence", "page", "passage"]:
+ raise ValueError("split_by must be one of 'word', 'sentence', 'page' or 'passage'.")
if split_length <= 0:
raise ValueError("split_length must be greater than 0.")
self.split_length = split_length
@@ -60,8 +63,10 @@
split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]
return {"documents": split_docs}
- def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage"]) -> List[str]:
- if split_by == "passage":
+ def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage", "page"]) -> List[str]:
+ if split_by == "page":
+ split_at = "\f"
+ elif split_by == "passage":
split_at = "\n\n"
elif split_by == "sentence":
split_at = "."
@@ -69,7 +74,7 @@
split_at = " "
else:
raise NotImplementedError(
- "DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options."
+ "DocumentSplitter only supports 'word', 'sentence', 'page' or 'passage' split_by options."
)
units = text.split(split_at)
# Add the delimiter back to all units except the last one
| {"golden_diff": "diff --git a/haystack/components/preprocessors/document_splitter.py b/haystack/components/preprocessors/document_splitter.py\n--- a/haystack/components/preprocessors/document_splitter.py\n+++ b/haystack/components/preprocessors/document_splitter.py\n@@ -14,18 +14,21 @@\n \"\"\"\n \n def __init__(\n- self, split_by: Literal[\"word\", \"sentence\", \"passage\"] = \"word\", split_length: int = 200, split_overlap: int = 0\n+ self,\n+ split_by: Literal[\"word\", \"sentence\", \"page\", \"passage\"] = \"word\",\n+ split_length: int = 200,\n+ split_overlap: int = 0,\n ):\n \"\"\"\n :param split_by: The unit by which the document should be split. Choose from \"word\" for splitting by \" \",\n- \"sentence\" for splitting by \".\", or \"passage\" for splitting by \"\\\\n\\\\n\".\n+ \"sentence\" for splitting by \".\", \"page\" for splitting by \"\\f\" or \"passage\" for splitting by \"\\\\n\\\\n\".\n :param split_length: The maximum number of units in each split.\n :param split_overlap: The number of units that each split should overlap.\n \"\"\"\n \n self.split_by = split_by\n- if split_by not in [\"word\", \"sentence\", \"passage\"]:\n- raise ValueError(\"split_by must be one of 'word', 'sentence' or 'passage'.\")\n+ if split_by not in [\"word\", \"sentence\", \"page\", \"passage\"]:\n+ raise ValueError(\"split_by must be one of 'word', 'sentence', 'page' or 'passage'.\")\n if split_length <= 0:\n raise ValueError(\"split_length must be greater than 0.\")\n self.split_length = split_length\n@@ -60,8 +63,10 @@\n split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]\n return {\"documents\": split_docs}\n \n- def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\"]) -> List[str]:\n- if split_by == \"passage\":\n+ def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\", \"page\"]) -> List[str]:\n+ if split_by == \"page\":\n+ split_at = \"\\f\"\n+ elif split_by == \"passage\":\n split_at = \"\\n\\n\"\n elif split_by == \"sentence\":\n split_at = \".\"\n@@ -69,7 +74,7 @@\n split_at = \" \"\n else:\n raise NotImplementedError(\n- \"DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options.\"\n+ \"DocumentSplitter only supports 'word', 'sentence', 'page' or 'passage' split_by options.\"\n )\n units = text.split(split_at)\n # Add the delimiter back to all units except the last one\n", "issue": "feat: Add split by `page` to `DocumentSplitter`\n**Is your feature request related to a problem? Please describe.**\r\nThere are some cases where we would like to be able to split the contents of a PDF by page. Either to keep all text from a single page as a document to help preserve context or to be able to perform two sets of chunking (i.e. split by page, followed up by split by sentence). I would not say this is a common set up, but I believe we can straightforwardly extend the `DocumentSplitter` to have this flexibility. \r\n\r\n**Describe the solution you'd like**\r\nAdd a new `split_by` value of `page` that would split on `\"\\f\"`.\r\n\r\n**Describe alternatives you've considered**\r\nSplit up the source file (e.g. a PDF) into individual pages before feeding it into the Haystack pipeline. Definitely doable, but less elegant than having the `DocumentSplitter` being able to handle this. \r\n\n", "before_files": [{"content": "from copy import deepcopy\nfrom typing import List, Literal\n\nfrom more_itertools import windowed\n\nfrom haystack import component, Document\n\n\n@component\nclass DocumentSplitter:\n \"\"\"\n Splits a list of text documents into a list of text documents with shorter texts.\n This is useful for splitting documents with long texts that otherwise would not fit into the maximum text length of language models.\n \"\"\"\n\n def __init__(\n self, split_by: Literal[\"word\", \"sentence\", \"passage\"] = \"word\", split_length: int = 200, split_overlap: int = 0\n ):\n \"\"\"\n :param split_by: The unit by which the document should be split. Choose from \"word\" for splitting by \" \",\n \"sentence\" for splitting by \".\", or \"passage\" for splitting by \"\\\\n\\\\n\".\n :param split_length: The maximum number of units in each split.\n :param split_overlap: The number of units that each split should overlap.\n \"\"\"\n\n self.split_by = split_by\n if split_by not in [\"word\", \"sentence\", \"passage\"]:\n raise ValueError(\"split_by must be one of 'word', 'sentence' or 'passage'.\")\n if split_length <= 0:\n raise ValueError(\"split_length must be greater than 0.\")\n self.split_length = split_length\n if split_overlap < 0:\n raise ValueError(\"split_overlap must be greater than or equal to 0.\")\n self.split_overlap = split_overlap\n\n @component.output_types(documents=List[Document])\n def run(self, documents: List[Document]):\n \"\"\"\n Splits the documents by split_by after split_length units with an overlap of split_overlap units.\n Returns a list of documents with the split texts.\n A metadata field \"source_id\" is added to each document to keep track of the original document that was split.\n Other metadata are copied from the original document.\n :param documents: The documents to split.\n :return: A list of documents with the split texts.\n \"\"\"\n\n if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):\n raise TypeError(\"DocumentSplitter expects a List of Documents as input.\")\n\n split_docs = []\n for doc in documents:\n if doc.content is None:\n raise ValueError(\n f\"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None.\"\n )\n units = self._split_into_units(doc.content, self.split_by)\n text_splits = self._concatenate_units(units, self.split_length, self.split_overlap)\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]\n return {\"documents\": split_docs}\n\n def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\"]) -> List[str]:\n if split_by == \"passage\":\n split_at = \"\\n\\n\"\n elif split_by == \"sentence\":\n split_at = \".\"\n elif split_by == \"word\":\n split_at = \" \"\n else:\n raise NotImplementedError(\n \"DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options.\"\n )\n units = text.split(split_at)\n # Add the delimiter back to all units except the last one\n for i in range(len(units) - 1):\n units[i] += split_at\n return units\n\n def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]:\n \"\"\"\n Concatenates the elements into parts of split_length units.\n \"\"\"\n text_splits = []\n segments = windowed(elements, n=split_length, step=split_length - split_overlap)\n for seg in segments:\n current_units = [unit for unit in seg if unit is not None]\n txt = \"\".join(current_units)\n if len(txt) > 0:\n text_splits.append(txt)\n return text_splits\n", "path": "haystack/components/preprocessors/document_splitter.py"}]} | 1,800 | 668 |
gh_patches_debug_1503 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-11075 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build: support Ruby under `build.tools`
We should add support for Ruby on `build.tools`. It will be useful for doctools like Jekyll.
Work required:
- [x] Update the documentation
- [x] Install asdf-ruby (https://github.com/asdf-vm/asdf-ruby) on Docker images
- [x] Compile latest Ruby version and upload it S3 (happening at https://app.circleci.com/pipelines/github/readthedocs/readthedocs-docker-images/289/workflows/f1bc7c62-02d8-4353-ac94-972eb58b0675/jobs/503)
- [x] Update `settings.py` to add this tool and version
- [x] Update config v2 to accept this value
- [x] Create a branch on `test-builds` for this use case
> **Note**: we had a support request for this at https://github.com/readthedocs/readthedocs.org/issues/9599#issuecomment-1560011462
</issue>
<code>
[start of readthedocs/builds/constants_docker.py]
1 """
2 Define constants here to allow import them without any external dependency.
3
4 There are situations where we want to have access to these values without Django installed
5 (e.g. common/dockerfiles/tasks.py)
6
7 Note these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.
8 """
9
10 DOCKER_DEFAULT_IMAGE = "readthedocs/build"
11
12 # Adding a new tool/version to this setting requires:
13 #
14 # - a mapping between the expected version in the config file, to the full
15 # version installed via asdf (found via ``asdf list all <tool>``)
16 #
17 # - running the script ``./scripts/compile_version_upload.sh`` in
18 # development and production environments to compile and cache the new
19 # tool/version
20 #
21 # Note that when updating this options, you should also update the file:
22 # readthedocs/rtd_tests/fixtures/spec/v2/schema.json
23 RTD_DOCKER_BUILD_SETTINGS = {
24 # Mapping of build.os options to docker image.
25 "os": {
26 "ubuntu-20.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04",
27 "ubuntu-22.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04",
28 },
29 # Mapping of build.tools options to specific versions.
30 "tools": {
31 "python": {
32 "2.7": "2.7.18",
33 "3.6": "3.6.15",
34 "3.7": "3.7.17",
35 "3.8": "3.8.18",
36 "3.9": "3.9.18",
37 "3.10": "3.10.13",
38 "3.11": "3.11.6",
39 "3.12": "3.12.0",
40 # Always point to the latest stable release.
41 "3": "3.12.0",
42 "miniconda3-4.7": "miniconda3-4.7.12",
43 "mambaforge-4.10": "mambaforge-4.10.3-10",
44 "mambaforge-22.9": "mambaforge-22.9.0-3",
45 },
46 "nodejs": {
47 "14": "14.20.1",
48 "16": "16.18.1",
49 "18": "18.16.1", # LTS
50 "19": "19.0.1",
51 "20": "20.3.1",
52 },
53 "rust": {
54 "1.55": "1.55.0",
55 "1.61": "1.61.0",
56 "1.64": "1.64.0",
57 "1.70": "1.70.0",
58 },
59 "golang": {
60 "1.17": "1.17.13",
61 "1.18": "1.18.10",
62 "1.19": "1.19.10",
63 "1.20": "1.20.5",
64 },
65 },
66 }
67
[end of readthedocs/builds/constants_docker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py
--- a/readthedocs/builds/constants_docker.py
+++ b/readthedocs/builds/constants_docker.py
@@ -50,6 +50,9 @@
"19": "19.0.1",
"20": "20.3.1",
},
+ "ruby": {
+ "3.3": "3.3.0",
+ },
"rust": {
"1.55": "1.55.0",
"1.61": "1.61.0",
| {"golden_diff": "diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py\n--- a/readthedocs/builds/constants_docker.py\n+++ b/readthedocs/builds/constants_docker.py\n@@ -50,6 +50,9 @@\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n+ \"ruby\": {\n+ \"3.3\": \"3.3.0\",\n+ },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n", "issue": "Build: support Ruby under `build.tools` \nWe should add support for Ruby on `build.tools`. It will be useful for doctools like Jekyll.\r\n\r\nWork required:\r\n\r\n- [x] Update the documentation\r\n- [x] Install asdf-ruby (https://github.com/asdf-vm/asdf-ruby) on Docker images\r\n- [x] Compile latest Ruby version and upload it S3 (happening at https://app.circleci.com/pipelines/github/readthedocs/readthedocs-docker-images/289/workflows/f1bc7c62-02d8-4353-ac94-972eb58b0675/jobs/503)\r\n- [x] Update `settings.py` to add this tool and version\r\n- [x] Update config v2 to accept this value\r\n- [x] Create a branch on `test-builds` for this use case\r\n\r\n\r\n> **Note**: we had a support request for this at https://github.com/readthedocs/readthedocs.org/issues/9599#issuecomment-1560011462\n", "before_files": [{"content": "\"\"\"\nDefine constants here to allow import them without any external dependency.\n\nThere are situations where we want to have access to these values without Django installed\n(e.g. common/dockerfiles/tasks.py)\n\nNote these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.\n\"\"\"\n\nDOCKER_DEFAULT_IMAGE = \"readthedocs/build\"\n\n# Adding a new tool/version to this setting requires:\n#\n# - a mapping between the expected version in the config file, to the full\n# version installed via asdf (found via ``asdf list all <tool>``)\n#\n# - running the script ``./scripts/compile_version_upload.sh`` in\n# development and production environments to compile and cache the new\n# tool/version\n#\n# Note that when updating this options, you should also update the file:\n# readthedocs/rtd_tests/fixtures/spec/v2/schema.json\nRTD_DOCKER_BUILD_SETTINGS = {\n # Mapping of build.os options to docker image.\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n \"python\": {\n \"2.7\": \"2.7.18\",\n \"3.6\": \"3.6.15\",\n \"3.7\": \"3.7.17\",\n \"3.8\": \"3.8.18\",\n \"3.9\": \"3.9.18\",\n \"3.10\": \"3.10.13\",\n \"3.11\": \"3.11.6\",\n \"3.12\": \"3.12.0\",\n # Always point to the latest stable release.\n \"3\": \"3.12.0\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n \"mambaforge-22.9\": \"mambaforge-22.9.0-3\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n \"16\": \"16.18.1\",\n \"18\": \"18.16.1\", # LTS\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n \"1.64\": \"1.64.0\",\n \"1.70\": \"1.70.0\",\n },\n \"golang\": {\n \"1.17\": \"1.17.13\",\n \"1.18\": \"1.18.10\",\n \"1.19\": \"1.19.10\",\n \"1.20\": \"1.20.5\",\n },\n },\n}\n", "path": "readthedocs/builds/constants_docker.py"}]} | 1,628 | 147 |
gh_patches_debug_58664 | rasdani/github-patches | git_diff | jazzband__pip-tools-12 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support Python versions lower than 2.7, too
</issue>
<code>
[start of setup.py]
1 """
2 pip-tools keeps your pinned dependencies fresh.
3 """
4 import sys
5 from setuptools import setup
6
7
8 setup(
9 name='pip-tools',
10 version='0.2',
11 url='https://github.com/nvie/pip-tools/',
12 license='BSD',
13 author='Vincent Driessen',
14 author_email='[email protected]',
15 description=__doc__,
16 #packages=[],
17 scripts=['bin/pip-review', 'bin/pip-dump'],
18 #include_package_data=True,
19 zip_safe=False,
20 platforms='any',
21 #install_requires=[],
22 classifiers=[
23 # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
24 #'Development Status :: 1 - Planning',
25 #'Development Status :: 2 - Pre-Alpha',
26 #'Development Status :: 3 - Alpha',
27 'Development Status :: 4 - Beta',
28 #'Development Status :: 5 - Production/Stable',
29 #'Development Status :: 6 - Mature',
30 #'Development Status :: 7 - Inactive',
31 'Intended Audience :: Developers',
32 'Intended Audience :: System Administrators',
33 'License :: OSI Approved :: BSD License',
34 'Operating System :: OS Independent',
35 'Topic :: System :: Systems Administration',
36 ]
37 )
38
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
#include_package_data=True,
zip_safe=False,
platforms='any',
- #install_requires=[],
+ install_requires=['argparse==1.2.1'], # needed for python 2.6
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
#'Development Status :: 1 - Planning',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n #include_package_data=True,\n zip_safe=False,\n platforms='any',\n- #install_requires=[],\n+ install_requires=['argparse==1.2.1'], # needed for python 2.6\n classifiers=[\n # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n #'Development Status :: 1 - Planning',\n", "issue": "Support Python versions lower than 2.7, too\n\n", "before_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nimport sys\nfrom setuptools import setup\n\n\nsetup(\n name='pip-tools',\n version='0.2',\n url='https://github.com/nvie/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__,\n #packages=[],\n scripts=['bin/pip-review', 'bin/pip-dump'],\n #include_package_data=True,\n zip_safe=False,\n platforms='any',\n #install_requires=[],\n classifiers=[\n # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n #'Development Status :: 1 - Planning',\n #'Development Status :: 2 - Pre-Alpha',\n #'Development Status :: 3 - Alpha',\n 'Development Status :: 4 - Beta',\n #'Development Status :: 5 - Production/Stable',\n #'Development Status :: 6 - Mature',\n #'Development Status :: 7 - Inactive',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}]} | 878 | 115 |
gh_patches_debug_8563 | rasdani/github-patches | git_diff | google__osv.dev-1044 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"withdrawn" entries not getting exported correctly
Identified by @andrewpollock :
https://github.com/google/osv.dev/blob/26050deb42785bc5a4dc7d802eac8e7f95135509/docker/exporter/exporter.py#L94
withdrawn entries are marked as status = INVALID in our DB, so they're not included.
They should be included when we export.
</issue>
<code>
[start of docker/exporter/exporter.py]
1 #!/usr/bin/env python3
2 # Copyright 2021 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """OSV Exporter."""
16 import argparse
17 import concurrent.futures
18 import logging
19 import os
20 import tempfile
21 import zipfile
22 from typing import List
23
24 from google.cloud import ndb
25 from google.cloud import storage
26 from google.cloud import logging as google_logging
27
28 import osv
29
30 DEFAULT_WORK_DIR = '/work'
31
32 DEFAULT_EXPORT_BUCKET = 'osv-vulnerabilities'
33 _EXPORT_WORKERS = 32
34 ECOSYSTEMS_FILE = 'ecosystems.txt'
35
36
37 class Exporter:
38 """Exporter."""
39
40 def __init__(self, work_dir, export_bucket):
41 self._work_dir = work_dir
42 self._export_bucket = export_bucket
43
44 def run(self):
45 """Run exporter."""
46 query = osv.Bug.query(projection=[osv.Bug.ecosystem], distinct=True)
47 ecosystems = [bug.ecosystem[0] for bug in query if bug.ecosystem]
48
49 for ecosystem in ecosystems:
50 with tempfile.TemporaryDirectory() as tmp_dir:
51 self._export_ecosystem_to_bucket(ecosystem, tmp_dir)
52
53 with tempfile.TemporaryDirectory() as tmp_dir:
54 self._export_ecosystem_list_to_bucket(ecosystems, tmp_dir)
55
56 def upload_single(self, bucket, source_path, target_path):
57 """Upload a single file to a bucket."""
58 logging.info('Uploading %s', target_path)
59 try:
60 blob = bucket.blob(target_path)
61 blob.upload_from_filename(source_path)
62 except Exception as e:
63 logging.error('Failed to export: %s', e)
64
65 def _export_ecosystem_list_to_bucket(self, ecosystems: List[str],
66 tmp_dir: str):
67 """Export an ecosystems.txt file with all of the ecosystem names.
68
69 See https://github.com/google/osv.dev/issues/619
70
71 Args:
72 ecosystems: the list of ecosystem names
73 tmp_dir: temporary directory for scratch
74 """
75
76 logging.info('Exporting ecosystem list to %s', ECOSYSTEMS_FILE)
77 storage_client = storage.Client()
78 bucket = storage_client.get_bucket(self._export_bucket)
79 ecosystems_file_path = os.path.join(tmp_dir, ECOSYSTEMS_FILE)
80 with open(ecosystems_file_path, "w") as ecosystems_file:
81 ecosystems_file.writelines([e + "\n" for e in ecosystems])
82
83 self.upload_single(bucket, ecosystems_file_path, ECOSYSTEMS_FILE)
84
85 def _export_ecosystem_to_bucket(self, ecosystem, tmp_dir):
86 """Export ecosystem vulns to bucket."""
87 logging.info('Exporting vulnerabilities for ecosystem %s', ecosystem)
88 storage_client = storage.Client()
89 bucket = storage_client.get_bucket(self._export_bucket)
90
91 zip_path = os.path.join(tmp_dir, 'all.zip')
92 with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:
93 for bug in osv.Bug.query(osv.Bug.ecosystem == ecosystem):
94 if not bug.public or not bug.status == osv.BugStatus.PROCESSED:
95 continue
96
97 file_path = os.path.join(tmp_dir, bug.id() + '.json')
98 osv.write_vulnerability(
99 bug.to_vulnerability(include_source=True), file_path)
100 zip_file.write(file_path, os.path.basename(file_path))
101
102 with concurrent.futures.ThreadPoolExecutor(
103 max_workers=_EXPORT_WORKERS) as executor:
104 for filename in os.listdir(tmp_dir):
105 executor.submit(self.upload_single, bucket,
106 os.path.join(tmp_dir, filename),
107 f'{ecosystem}/{filename}')
108
109
110 def main():
111 logging.getLogger().setLevel(logging.INFO)
112 parser = argparse.ArgumentParser(description='Exporter')
113 parser.add_argument(
114 '--work_dir', help='Working directory', default=DEFAULT_WORK_DIR)
115 parser.add_argument(
116 '--bucket',
117 help='Bucket name to export to',
118 default=DEFAULT_EXPORT_BUCKET)
119 args = parser.parse_args()
120
121 tmp_dir = os.path.join(args.work_dir, 'tmp')
122 os.makedirs(tmp_dir, exist_ok=True)
123 os.environ['TMPDIR'] = tmp_dir
124
125 exporter = Exporter(args.work_dir, args.bucket)
126 exporter.run()
127
128
129 if __name__ == '__main__':
130 _ndb_client = ndb.Client()
131 logging_client = google_logging.Client()
132 logging_client.setup_logging()
133 with _ndb_client.context():
134 main()
135
[end of docker/exporter/exporter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/exporter/exporter.py b/docker/exporter/exporter.py
--- a/docker/exporter/exporter.py
+++ b/docker/exporter/exporter.py
@@ -91,7 +91,7 @@
zip_path = os.path.join(tmp_dir, 'all.zip')
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:
for bug in osv.Bug.query(osv.Bug.ecosystem == ecosystem):
- if not bug.public or not bug.status == osv.BugStatus.PROCESSED:
+ if not bug.public or bug.status == osv.BugStatus.UNPROCESSED:
continue
file_path = os.path.join(tmp_dir, bug.id() + '.json')
| {"golden_diff": "diff --git a/docker/exporter/exporter.py b/docker/exporter/exporter.py\n--- a/docker/exporter/exporter.py\n+++ b/docker/exporter/exporter.py\n@@ -91,7 +91,7 @@\n zip_path = os.path.join(tmp_dir, 'all.zip')\n with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:\n for bug in osv.Bug.query(osv.Bug.ecosystem == ecosystem):\n- if not bug.public or not bug.status == osv.BugStatus.PROCESSED:\n+ if not bug.public or bug.status == osv.BugStatus.UNPROCESSED:\n continue\n \n file_path = os.path.join(tmp_dir, bug.id() + '.json')\n", "issue": "\"withdrawn\" entries not getting exported correctly\nIdentified by @andrewpollock : \r\n\r\nhttps://github.com/google/osv.dev/blob/26050deb42785bc5a4dc7d802eac8e7f95135509/docker/exporter/exporter.py#L94\r\n\r\nwithdrawn entries are marked as status = INVALID in our DB, so they're not included.\r\n\r\nThey should be included when we export. \n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"OSV Exporter.\"\"\"\nimport argparse\nimport concurrent.futures\nimport logging\nimport os\nimport tempfile\nimport zipfile\nfrom typing import List\n\nfrom google.cloud import ndb\nfrom google.cloud import storage\nfrom google.cloud import logging as google_logging\n\nimport osv\n\nDEFAULT_WORK_DIR = '/work'\n\nDEFAULT_EXPORT_BUCKET = 'osv-vulnerabilities'\n_EXPORT_WORKERS = 32\nECOSYSTEMS_FILE = 'ecosystems.txt'\n\n\nclass Exporter:\n \"\"\"Exporter.\"\"\"\n\n def __init__(self, work_dir, export_bucket):\n self._work_dir = work_dir\n self._export_bucket = export_bucket\n\n def run(self):\n \"\"\"Run exporter.\"\"\"\n query = osv.Bug.query(projection=[osv.Bug.ecosystem], distinct=True)\n ecosystems = [bug.ecosystem[0] for bug in query if bug.ecosystem]\n\n for ecosystem in ecosystems:\n with tempfile.TemporaryDirectory() as tmp_dir:\n self._export_ecosystem_to_bucket(ecosystem, tmp_dir)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n self._export_ecosystem_list_to_bucket(ecosystems, tmp_dir)\n\n def upload_single(self, bucket, source_path, target_path):\n \"\"\"Upload a single file to a bucket.\"\"\"\n logging.info('Uploading %s', target_path)\n try:\n blob = bucket.blob(target_path)\n blob.upload_from_filename(source_path)\n except Exception as e:\n logging.error('Failed to export: %s', e)\n\n def _export_ecosystem_list_to_bucket(self, ecosystems: List[str],\n tmp_dir: str):\n \"\"\"Export an ecosystems.txt file with all of the ecosystem names.\n\n See https://github.com/google/osv.dev/issues/619\n\n Args:\n ecosystems: the list of ecosystem names\n tmp_dir: temporary directory for scratch\n \"\"\"\n\n logging.info('Exporting ecosystem list to %s', ECOSYSTEMS_FILE)\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(self._export_bucket)\n ecosystems_file_path = os.path.join(tmp_dir, ECOSYSTEMS_FILE)\n with open(ecosystems_file_path, \"w\") as ecosystems_file:\n ecosystems_file.writelines([e + \"\\n\" for e in ecosystems])\n\n self.upload_single(bucket, ecosystems_file_path, ECOSYSTEMS_FILE)\n\n def _export_ecosystem_to_bucket(self, ecosystem, tmp_dir):\n \"\"\"Export ecosystem vulns to bucket.\"\"\"\n logging.info('Exporting vulnerabilities for ecosystem %s', ecosystem)\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(self._export_bucket)\n\n zip_path = os.path.join(tmp_dir, 'all.zip')\n with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:\n for bug in osv.Bug.query(osv.Bug.ecosystem == ecosystem):\n if not bug.public or not bug.status == osv.BugStatus.PROCESSED:\n continue\n\n file_path = os.path.join(tmp_dir, bug.id() + '.json')\n osv.write_vulnerability(\n bug.to_vulnerability(include_source=True), file_path)\n zip_file.write(file_path, os.path.basename(file_path))\n\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=_EXPORT_WORKERS) as executor:\n for filename in os.listdir(tmp_dir):\n executor.submit(self.upload_single, bucket,\n os.path.join(tmp_dir, filename),\n f'{ecosystem}/{filename}')\n\n\ndef main():\n logging.getLogger().setLevel(logging.INFO)\n parser = argparse.ArgumentParser(description='Exporter')\n parser.add_argument(\n '--work_dir', help='Working directory', default=DEFAULT_WORK_DIR)\n parser.add_argument(\n '--bucket',\n help='Bucket name to export to',\n default=DEFAULT_EXPORT_BUCKET)\n args = parser.parse_args()\n\n tmp_dir = os.path.join(args.work_dir, 'tmp')\n os.makedirs(tmp_dir, exist_ok=True)\n os.environ['TMPDIR'] = tmp_dir\n\n exporter = Exporter(args.work_dir, args.bucket)\n exporter.run()\n\n\nif __name__ == '__main__':\n _ndb_client = ndb.Client()\n logging_client = google_logging.Client()\n logging_client.setup_logging()\n with _ndb_client.context():\n main()\n", "path": "docker/exporter/exporter.py"}]} | 2,014 | 164 |
gh_patches_debug_6226 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-1847 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KeyError: 'split_by_domain'
```
File "/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/requests/sessions.py", line 581, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/requests/sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/ddtrace/contrib/requests/connection.py", line 73, in _wrap_send
span.service = _extract_service_name(instance, span, hostname=hostname)
File "/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/ddtrace/contrib/requests/connection.py", line 30, in _extract_service_name
if cfg['split_by_domain'] and hostname:
KeyError: 'split_by_domain'
```
Happens on python 3.6 and 3.7
</issue>
<code>
[start of ddtrace/contrib/requests/session.py]
1 import requests
2
3 from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
4
5 from .connection import _wrap_send
6
7
8 class TracedSession(requests.Session):
9 """TracedSession is a requests' Session that is already traced.
10 You can use it if you want a finer grained control for your
11 HTTP clients.
12 """
13
14 pass
15
16
17 # always patch our `TracedSession` when imported
18 _w(TracedSession, "send", _wrap_send)
19
[end of ddtrace/contrib/requests/session.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/contrib/requests/session.py b/ddtrace/contrib/requests/session.py
--- a/ddtrace/contrib/requests/session.py
+++ b/ddtrace/contrib/requests/session.py
@@ -2,6 +2,8 @@
from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
+from ddtrace import config, Pin
+
from .connection import _wrap_send
@@ -16,3 +18,4 @@
# always patch our `TracedSession` when imported
_w(TracedSession, "send", _wrap_send)
+Pin(_config=config.requests).onto(TracedSession)
| {"golden_diff": "diff --git a/ddtrace/contrib/requests/session.py b/ddtrace/contrib/requests/session.py\n--- a/ddtrace/contrib/requests/session.py\n+++ b/ddtrace/contrib/requests/session.py\n@@ -2,6 +2,8 @@\n \n from ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n \n+from ddtrace import config, Pin\n+\n from .connection import _wrap_send\n \n \n@@ -16,3 +18,4 @@\n \n # always patch our `TracedSession` when imported\n _w(TracedSession, \"send\", _wrap_send)\n+Pin(_config=config.requests).onto(TracedSession)\n", "issue": "KeyError: 'split_by_domain' \n```\r\n File \"/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/requests/sessions.py\", line 581, in post\r\n return self.request('POST', url, data=data, json=json, **kwargs)\r\n File \"/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/requests/sessions.py\", line 533, in request\r\n resp = self.send(prep, **send_kwargs)\r\n File \"/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/ddtrace/contrib/requests/connection.py\", line 73, in _wrap_send\r\n span.service = _extract_service_name(instance, span, hostname=hostname)\r\n File \"/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/ddtrace/contrib/requests/connection.py\", line 30, in _extract_service_name\r\n if cfg['split_by_domain'] and hostname:\r\nKeyError: 'split_by_domain'\r\n```\r\n\r\nHappens on python 3.6 and 3.7\n", "before_files": [{"content": "import requests\n\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .connection import _wrap_send\n\n\nclass TracedSession(requests.Session):\n \"\"\"TracedSession is a requests' Session that is already traced.\n You can use it if you want a finer grained control for your\n HTTP clients.\n \"\"\"\n\n pass\n\n\n# always patch our `TracedSession` when imported\n_w(TracedSession, \"send\", _wrap_send)\n", "path": "ddtrace/contrib/requests/session.py"}]} | 951 | 141 |
gh_patches_debug_21677 | rasdani/github-patches | git_diff | plotly__plotly.py-4562 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
With newer versions of orjson, users need to specify the json engine explicitly (bug?)
Hey
I found out I get an
`AttributeError: partially initialized module 'orjson' has no attribute 'OPT_NON_STR_KEYS'`
if I don't specify this
`plotly.io.json.config.default_engine = 'orjson'`
when using orjson v3.6.6 (latest as of 25jan2022)
Also, additional note for whoever might have this issue: you don't need to uninstall orjson if you don't want to use it. just set the engine to 'json' explicitly.
I'm using orjson because of the performance claims, although I ran some tests switching between the 2 engines and they seem to yield the same results: using go.Candlestick with 10000 candlesticks and some 4-5 indicators, getting ~0.8sec in each case for creating the plot. My purpose is to improve the dash server performace, but it seems it makes no difference (the web page still renders slower than the ticker even with 600 candles)
</issue>
<code>
[start of packages/python/plotly/_plotly_utils/optional_imports.py]
1 """
2 Stand-alone module to provide information about whether optional deps exist.
3
4 """
5 from importlib import import_module
6 import logging
7 import sys
8
9 logger = logging.getLogger(__name__)
10 _not_importable = set()
11
12
13 def get_module(name, should_load=True):
14 """
15 Return module or None. Absolute import is required.
16
17 :param (str) name: Dot-separated module path. E.g., 'scipy.stats'.
18 :raise: (ImportError) Only when exc_msg is defined.
19 :return: (module|None) If import succeeds, the module will be returned.
20
21 """
22 if name in sys.modules:
23 return sys.modules[name]
24 if not should_load:
25 return None
26 if name not in _not_importable:
27 try:
28 return import_module(name)
29 except ImportError:
30 _not_importable.add(name)
31 except Exception:
32 _not_importable.add(name)
33 msg = f"Error importing optional module {name}"
34 logger.exception(msg)
35
[end of packages/python/plotly/_plotly_utils/optional_imports.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/python/plotly/_plotly_utils/optional_imports.py b/packages/python/plotly/_plotly_utils/optional_imports.py
--- a/packages/python/plotly/_plotly_utils/optional_imports.py
+++ b/packages/python/plotly/_plotly_utils/optional_imports.py
@@ -2,6 +2,7 @@
Stand-alone module to provide information about whether optional deps exist.
"""
+
from importlib import import_module
import logging
import sys
@@ -19,10 +20,9 @@
:return: (module|None) If import succeeds, the module will be returned.
"""
- if name in sys.modules:
- return sys.modules[name]
if not should_load:
- return None
+ return sys.modules.get(name, None)
+
if name not in _not_importable:
try:
return import_module(name)
@@ -32,3 +32,5 @@
_not_importable.add(name)
msg = f"Error importing optional module {name}"
logger.exception(msg)
+
+ return None
| {"golden_diff": "diff --git a/packages/python/plotly/_plotly_utils/optional_imports.py b/packages/python/plotly/_plotly_utils/optional_imports.py\n--- a/packages/python/plotly/_plotly_utils/optional_imports.py\n+++ b/packages/python/plotly/_plotly_utils/optional_imports.py\n@@ -2,6 +2,7 @@\n Stand-alone module to provide information about whether optional deps exist.\n \n \"\"\"\n+\n from importlib import import_module\n import logging\n import sys\n@@ -19,10 +20,9 @@\n :return: (module|None) If import succeeds, the module will be returned.\n \n \"\"\"\n- if name in sys.modules:\n- return sys.modules[name]\n if not should_load:\n- return None\n+ return sys.modules.get(name, None)\n+\n if name not in _not_importable:\n try:\n return import_module(name)\n@@ -32,3 +32,5 @@\n _not_importable.add(name)\n msg = f\"Error importing optional module {name}\"\n logger.exception(msg)\n+\n+ return None\n", "issue": "With newer versions of orjson, users need to specify the json engine explicitly (bug?)\nHey\r\n\r\nI found out I get an\r\n`AttributeError: partially initialized module 'orjson' has no attribute 'OPT_NON_STR_KEYS'`\r\nif I don't specify this\r\n`plotly.io.json.config.default_engine = 'orjson'`\r\nwhen using orjson v3.6.6 (latest as of 25jan2022)\r\n\r\nAlso, additional note for whoever might have this issue: you don't need to uninstall orjson if you don't want to use it. just set the engine to 'json' explicitly. \r\n\r\nI'm using orjson because of the performance claims, although I ran some tests switching between the 2 engines and they seem to yield the same results: using go.Candlestick with 10000 candlesticks and some 4-5 indicators, getting ~0.8sec in each case for creating the plot. My purpose is to improve the dash server performace, but it seems it makes no difference (the web page still renders slower than the ticker even with 600 candles)\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nStand-alone module to provide information about whether optional deps exist.\n\n\"\"\"\nfrom importlib import import_module\nimport logging\nimport sys\n\nlogger = logging.getLogger(__name__)\n_not_importable = set()\n\n\ndef get_module(name, should_load=True):\n \"\"\"\n Return module or None. Absolute import is required.\n\n :param (str) name: Dot-separated module path. E.g., 'scipy.stats'.\n :raise: (ImportError) Only when exc_msg is defined.\n :return: (module|None) If import succeeds, the module will be returned.\n\n \"\"\"\n if name in sys.modules:\n return sys.modules[name]\n if not should_load:\n return None\n if name not in _not_importable:\n try:\n return import_module(name)\n except ImportError:\n _not_importable.add(name)\n except Exception:\n _not_importable.add(name)\n msg = f\"Error importing optional module {name}\"\n logger.exception(msg)\n", "path": "packages/python/plotly/_plotly_utils/optional_imports.py"}]} | 1,060 | 241 |
gh_patches_debug_49037 | rasdani/github-patches | git_diff | facebookresearch__hydra-2677 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] Fix failing tests
Several tests are broken on main
</issue>
<code>
[start of examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import os
3
4 from omegaconf import DictConfig
5
6 import hydra
7
8
9 @hydra.main(version_base=None)
10 def my_app(_cfg: DictConfig) -> None:
11 print(f"Working directory : {os.getcwd()}")
12 print(f"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}")
13
14
15 if __name__ == "__main__":
16 my_app()
17
[end of examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
--- a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
+++ b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
@@ -9,7 +9,9 @@
@hydra.main(version_base=None)
def my_app(_cfg: DictConfig) -> None:
print(f"Working directory : {os.getcwd()}")
- print(f"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}")
+ print(
+ f"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}"
+ )
if __name__ == "__main__":
| {"golden_diff": "diff --git a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py\n--- a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py\n+++ b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py\n@@ -9,7 +9,9 @@\n @hydra.main(version_base=None)\n def my_app(_cfg: DictConfig) -> None:\n print(f\"Working directory : {os.getcwd()}\")\n- print(f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\")\n+ print(\n+ f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\"\n+ )\n \n \n if __name__ == \"__main__\":\n", "issue": "[Bug] Fix failing tests\nSeveral tests are broken on main\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport os\n\nfrom omegaconf import DictConfig\n\nimport hydra\n\n\[email protected](version_base=None)\ndef my_app(_cfg: DictConfig) -> None:\n print(f\"Working directory : {os.getcwd()}\")\n print(f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\")\n\n\nif __name__ == \"__main__\":\n my_app()\n", "path": "examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py"}]} | 709 | 198 |
gh_patches_debug_66238 | rasdani/github-patches | git_diff | deepchecks__deepchecks-728 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] resources/suite_output.html file is missing when installing not via git
**Describe the bug**
can't use save_as_html because suite_output.html file is missing
**To Reproduce**
pip install deepchecks
suite_result.save_as_html()
**Expected behavior**
save as html
**Environment (please complete the following information):**
- OS: linux
- Python Version: 3.7
- Deepchecks Version: 0.3.1
</issue>
<code>
[start of setup.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """
12
13 |build| |Documentation Status| |pkgVersion| |pyVersions|
14 |Maintainability| |Coverage Status|
15
16 .. image:: https://raw.githubusercontent.com/deepchecks/deepchecks/main/docs/images/deepchecks-logo-with-white-wide-back.png
17 :target: https://github.com/deepchecks/deepchecks
18
19 Deepchecks is a Python package for comprehensively validating your machine learning models and data with minimal effort.
20 This includes checks related to various types of issues, such as model performance, data integrity,
21 distribution mismatches, and more.
22
23 What Do You Need in Order to Start Validating?
24 ----------------------------------------------
25
26 Depending on your phase and what you wise to validate, you'll need a
27 subset of the following:
28
29 - Raw data (before pre-processing such as OHE, string processing,
30 etc.), with optional labels
31
32 - The model's training data with labels
33
34 - Test data (which the model isn't exposed to) with labels
35
36 - A model compatible with scikit-learn API that you wish to validate
37 (e.g. RandomForest, XGBoost)
38
39 Deepchecks validation accompanies you from the initial phase when you
40 have only raw data, through the data splits, and to the final stage of
41 having a trained model that you wish to evaluate. Accordingly, each
42 phase requires different assets for the validation. See more about
43 typical usage scenarios and the built-in suites in the
44 `docs <https://docs.deepchecks.com/?utm_source=pypi.org&utm_medium=referral&utm_campaign=readme>`__.
45
46 Installation
47 ------------
48
49 Using pip
50 ~~~~~~~~~
51
52 .. code:: bash
53
54 pip install deepchecks #--upgrade --user
55
56 Using conda
57 ~~~~~~~~~~~
58
59 .. code:: bash
60
61 conda install -c deepchecks deepchecks
62
63 .. |build| image:: https://github.com/deepchecks/deepchecks/actions/workflows/build.yml/badge.svg
64 .. |Documentation Status| image:: https://readthedocs.org/projects/deepchecks/badge/?version=latest
65 :target: https://docs.deepchecks.com/en/latest/?badge=latest
66 .. |pkgVersion| image:: https://img.shields.io/pypi/v/deepchecks
67 .. |pyVersions| image:: https://img.shields.io/pypi/pyversions/deepchecks
68 .. |Maintainability| image:: https://api.codeclimate.com/v1/badges/970b11794144139975fa/maintainability
69 :target: https://codeclimate.com/github/deepchecks/deepchecks/maintainability
70 .. |Coverage Status| image:: https://coveralls.io/repos/github/deepchecks/deepchecks/badge.svg?branch=main
71 :target: https://coveralls.io/github/deepchecks/deepchecks?branch=main
72
73 """
74
75 import setuptools
76 from setuptools import setup
77 from distutils.util import convert_path
78 import os
79
80 main_ns = {}
81 DOCLINES = (__doc__ or '').split("\n")
82
83 with open(os.path.join('./', 'VERSION')) as version_file:
84 VER = version_file.read().strip()
85
86 requirementPath = os.path.dirname(os.path.realpath(__file__)) + '/requirements.txt'
87 install_requires = []
88 if os.path.isfile(requirementPath):
89 with open(requirementPath) as f:
90 install_requires = f.read().splitlines()
91
92
93
94
95 setup(
96 name='deepchecks',
97 version=VER,
98 packages=setuptools.find_packages(),
99 install_requires=install_requires,
100 license_files = ('LICENSE', ),
101 description = DOCLINES[0],
102 long_description="\n".join(DOCLINES[2:]),
103 author = 'deepchecks',
104 author_email = '[email protected]',
105 url = 'https://github.com/deepchecks/deepchecks',
106 download_url = "https://github.com/deepchecks/deepchecks/releases/download/{0}/deepchecks-{0}.tar.gz".format(VER),
107 keywords = ['Software Development', 'Machine Learning'],
108 include_package_data=True,
109 classifiers = [
110 'Intended Audience :: Developers',
111 'Intended Audience :: Science/Research',
112 'Topic :: Software Development',
113 'Topic :: Scientific/Engineering',
114 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
115 'Programming Language :: Python :: 3',
116 'Programming Language :: Python :: 3.6',
117 'Programming Language :: Python :: 3.7',
118 'Programming Language :: Python :: 3.8',
119 'Programming Language :: Python :: 3.9',
120 'Programming Language :: Python :: 3.10',
121 ],
122 )
123
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -74,7 +74,6 @@
import setuptools
from setuptools import setup
-from distutils.util import convert_path
import os
main_ns = {}
@@ -89,9 +88,6 @@
with open(requirementPath) as f:
install_requires = f.read().splitlines()
-
-
-
setup(
name='deepchecks',
version=VER,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -74,7 +74,6 @@\n \n import setuptools\n from setuptools import setup\n-from distutils.util import convert_path\n import os\n \n main_ns = {}\n@@ -89,9 +88,6 @@\n with open(requirementPath) as f:\n install_requires = f.read().splitlines()\n \n-\n-\n-\n setup(\n name='deepchecks',\n version=VER,\n", "issue": "[BUG] resources/suite_output.html file is missing when installing not via git\n**Describe the bug**\r\ncan't use save_as_html because suite_output.html file is missing\r\n\r\n**To Reproduce**\r\npip install deepchecks\r\nsuite_result.save_as_html()\r\n\r\n**Expected behavior**\r\nsave as html\r\n\r\n**Environment (please complete the following information):**\r\n - OS: linux\r\n - Python Version: 3.7\r\n - Deepchecks Version: 0.3.1\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"\n\n|build| |Documentation Status| |pkgVersion| |pyVersions|\n|Maintainability| |Coverage Status|\n\n.. image:: https://raw.githubusercontent.com/deepchecks/deepchecks/main/docs/images/deepchecks-logo-with-white-wide-back.png\n :target: https://github.com/deepchecks/deepchecks\n\nDeepchecks is a Python package for comprehensively validating your machine learning models and data with minimal effort.\nThis includes checks related to various types of issues, such as model performance, data integrity,\ndistribution mismatches, and more.\n\nWhat Do You Need in Order to Start Validating?\n----------------------------------------------\n\nDepending on your phase and what you wise to validate, you'll need a\nsubset of the following:\n\n- Raw data (before pre-processing such as OHE, string processing,\n etc.), with optional labels\n\n- The model's training data with labels\n\n- Test data (which the model isn't exposed to) with labels\n\n- A model compatible with scikit-learn API that you wish to validate\n (e.g. RandomForest, XGBoost)\n\nDeepchecks validation accompanies you from the initial phase when you\nhave only raw data, through the data splits, and to the final stage of\nhaving a trained model that you wish to evaluate. Accordingly, each\nphase requires different assets for the validation. See more about\ntypical usage scenarios and the built-in suites in the\n`docs <https://docs.deepchecks.com/?utm_source=pypi.org&utm_medium=referral&utm_campaign=readme>`__.\n\nInstallation\n------------\n\nUsing pip\n~~~~~~~~~\n\n.. code:: bash\n\n pip install deepchecks #--upgrade --user\n\nUsing conda\n~~~~~~~~~~~\n\n.. code:: bash\n\n conda install -c deepchecks deepchecks\n\n.. |build| image:: https://github.com/deepchecks/deepchecks/actions/workflows/build.yml/badge.svg\n.. |Documentation Status| image:: https://readthedocs.org/projects/deepchecks/badge/?version=latest\n :target: https://docs.deepchecks.com/en/latest/?badge=latest\n.. |pkgVersion| image:: https://img.shields.io/pypi/v/deepchecks\n.. |pyVersions| image:: https://img.shields.io/pypi/pyversions/deepchecks\n.. |Maintainability| image:: https://api.codeclimate.com/v1/badges/970b11794144139975fa/maintainability\n :target: https://codeclimate.com/github/deepchecks/deepchecks/maintainability\n.. |Coverage Status| image:: https://coveralls.io/repos/github/deepchecks/deepchecks/badge.svg?branch=main\n :target: https://coveralls.io/github/deepchecks/deepchecks?branch=main\n\n\"\"\"\n\nimport setuptools\nfrom setuptools import setup\nfrom distutils.util import convert_path\nimport os\n\nmain_ns = {}\nDOCLINES = (__doc__ or '').split(\"\\n\")\n\nwith open(os.path.join('./', 'VERSION')) as version_file:\n VER = version_file.read().strip()\n\nrequirementPath = os.path.dirname(os.path.realpath(__file__)) + '/requirements.txt'\ninstall_requires = []\nif os.path.isfile(requirementPath):\n with open(requirementPath) as f:\n install_requires = f.read().splitlines()\n\n\n\n\nsetup(\n name='deepchecks',\n version=VER,\n packages=setuptools.find_packages(),\n install_requires=install_requires,\n license_files = ('LICENSE', ),\n description = DOCLINES[0],\n long_description=\"\\n\".join(DOCLINES[2:]),\n author = 'deepchecks', \n author_email = '[email protected]', \n url = 'https://github.com/deepchecks/deepchecks',\n download_url = \"https://github.com/deepchecks/deepchecks/releases/download/{0}/deepchecks-{0}.tar.gz\".format(VER),\n keywords = ['Software Development', 'Machine Learning'],\n include_package_data=True,\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ],\n)\n", "path": "setup.py"}]} | 1,982 | 106 |
gh_patches_debug_4106 | rasdani/github-patches | git_diff | hylang__hy-1955 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make hy-history location configurable
How about an environment variable like `HY_HISTORY` that allows the user to change the location of `~/.hy-history`.
</issue>
<code>
[start of hy/completer.py]
1 # Copyright 2021 the authors.
2 # This file is part of Hy, which is free software licensed under the Expat
3 # license. See the LICENSE.
4
5 import contextlib
6 import os
7 import re
8 import sys
9 import builtins
10
11 import hy.macros
12 import hy.compiler
13
14
15 docomplete = True
16
17 try:
18 import readline
19 except ImportError:
20 try:
21 import pyreadline.rlmain
22 import pyreadline.unicode_helper # NOQA
23 import readline
24 except ImportError:
25 docomplete = False
26
27 if docomplete:
28 if sys.platform == 'darwin' and 'libedit' in readline.__doc__:
29 readline_bind = "bind ^I rl_complete"
30 else:
31 readline_bind = "tab: complete"
32
33
34 class Completer(object):
35
36 def __init__(self, namespace={}):
37 if not isinstance(namespace, dict):
38 raise TypeError('namespace must be a dictionary')
39 self.namespace = namespace
40 self.path = [hy.compiler._special_form_compilers,
41 builtins.__dict__,
42 namespace]
43
44 self.tag_path = []
45
46 namespace.setdefault('__macros__', {})
47 namespace.setdefault('__tags__', {})
48
49 self.path.append(namespace['__macros__'])
50 self.tag_path.append(namespace['__tags__'])
51
52 def attr_matches(self, text):
53 # Borrowed from IPython's completer
54 m = re.match(r"(\S+(\.[\w-]+)*)\.([\w-]*)$", text)
55
56 if m:
57 expr, attr = m.group(1, 3)
58 attr = attr.replace("-", "_")
59 expr = expr.replace("-", "_")
60 else:
61 return []
62
63 try:
64 obj = eval(expr, self.namespace)
65 words = dir(obj)
66 except Exception:
67 return []
68
69 n = len(attr)
70 matches = []
71 for w in words:
72 if w[:n] == attr:
73 matches.append("{}.{}".format(
74 expr.replace("_", "-"), w.replace("_", "-")))
75 return matches
76
77 def global_matches(self, text):
78 matches = []
79 for p in self.path:
80 for k in p.keys():
81 if isinstance(k, str):
82 k = k.replace("_", "-")
83 if k.startswith(text):
84 matches.append(k)
85 return matches
86
87 def tag_matches(self, text):
88 text = text[1:]
89 matches = []
90 for p in self.tag_path:
91 for k in p.keys():
92 if isinstance(k, str):
93 if k.startswith(text):
94 matches.append("#{}".format(k))
95 return matches
96
97 def complete(self, text, state):
98 if text.startswith("#"):
99 matches = self.tag_matches(text)
100 elif "." in text:
101 matches = self.attr_matches(text)
102 else:
103 matches = self.global_matches(text)
104 try:
105 return matches[state]
106 except IndexError:
107 return None
108
109
110 @contextlib.contextmanager
111 def completion(completer=None):
112 delims = "()[]{} "
113 if not completer:
114 completer = Completer()
115
116 if docomplete:
117 readline.set_completer(completer.complete)
118 readline.set_completer_delims(delims)
119
120 history = os.path.expanduser("~/.hy-history")
121 readline.parse_and_bind("set blink-matching-paren on")
122
123 try:
124 readline.read_history_file(history)
125 except IOError:
126 pass
127
128 readline.parse_and_bind(readline_bind)
129
130 try:
131 yield
132 finally:
133 if docomplete:
134 try:
135 readline.write_history_file(history)
136 except IOError:
137 pass
138
[end of hy/completer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hy/completer.py b/hy/completer.py
--- a/hy/completer.py
+++ b/hy/completer.py
@@ -117,7 +117,8 @@
readline.set_completer(completer.complete)
readline.set_completer_delims(delims)
- history = os.path.expanduser("~/.hy-history")
+ history = os.environ.get(
+ "HY_HISTORY", os.path.expanduser("~/.hy-history"))
readline.parse_and_bind("set blink-matching-paren on")
try:
| {"golden_diff": "diff --git a/hy/completer.py b/hy/completer.py\n--- a/hy/completer.py\n+++ b/hy/completer.py\n@@ -117,7 +117,8 @@\n readline.set_completer(completer.complete)\n readline.set_completer_delims(delims)\n \n- history = os.path.expanduser(\"~/.hy-history\")\n+ history = os.environ.get(\n+ \"HY_HISTORY\", os.path.expanduser(\"~/.hy-history\"))\n readline.parse_and_bind(\"set blink-matching-paren on\")\n \n try:\n", "issue": "Make hy-history location configurable\nHow about an environment variable like `HY_HISTORY` that allows the user to change the location of `~/.hy-history`.\n", "before_files": [{"content": "# Copyright 2021 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nimport contextlib\nimport os\nimport re\nimport sys\nimport builtins\n\nimport hy.macros\nimport hy.compiler\n\n\ndocomplete = True\n\ntry:\n import readline\nexcept ImportError:\n try:\n import pyreadline.rlmain\n import pyreadline.unicode_helper # NOQA\n import readline\n except ImportError:\n docomplete = False\n\nif docomplete:\n if sys.platform == 'darwin' and 'libedit' in readline.__doc__:\n readline_bind = \"bind ^I rl_complete\"\n else:\n readline_bind = \"tab: complete\"\n\n\nclass Completer(object):\n\n def __init__(self, namespace={}):\n if not isinstance(namespace, dict):\n raise TypeError('namespace must be a dictionary')\n self.namespace = namespace\n self.path = [hy.compiler._special_form_compilers,\n builtins.__dict__,\n namespace]\n\n self.tag_path = []\n\n namespace.setdefault('__macros__', {})\n namespace.setdefault('__tags__', {})\n\n self.path.append(namespace['__macros__'])\n self.tag_path.append(namespace['__tags__'])\n\n def attr_matches(self, text):\n # Borrowed from IPython's completer\n m = re.match(r\"(\\S+(\\.[\\w-]+)*)\\.([\\w-]*)$\", text)\n\n if m:\n expr, attr = m.group(1, 3)\n attr = attr.replace(\"-\", \"_\")\n expr = expr.replace(\"-\", \"_\")\n else:\n return []\n\n try:\n obj = eval(expr, self.namespace)\n words = dir(obj)\n except Exception:\n return []\n\n n = len(attr)\n matches = []\n for w in words:\n if w[:n] == attr:\n matches.append(\"{}.{}\".format(\n expr.replace(\"_\", \"-\"), w.replace(\"_\", \"-\")))\n return matches\n\n def global_matches(self, text):\n matches = []\n for p in self.path:\n for k in p.keys():\n if isinstance(k, str):\n k = k.replace(\"_\", \"-\")\n if k.startswith(text):\n matches.append(k)\n return matches\n\n def tag_matches(self, text):\n text = text[1:]\n matches = []\n for p in self.tag_path:\n for k in p.keys():\n if isinstance(k, str):\n if k.startswith(text):\n matches.append(\"#{}\".format(k))\n return matches\n\n def complete(self, text, state):\n if text.startswith(\"#\"):\n matches = self.tag_matches(text)\n elif \".\" in text:\n matches = self.attr_matches(text)\n else:\n matches = self.global_matches(text)\n try:\n return matches[state]\n except IndexError:\n return None\n\n\[email protected]\ndef completion(completer=None):\n delims = \"()[]{} \"\n if not completer:\n completer = Completer()\n\n if docomplete:\n readline.set_completer(completer.complete)\n readline.set_completer_delims(delims)\n\n history = os.path.expanduser(\"~/.hy-history\")\n readline.parse_and_bind(\"set blink-matching-paren on\")\n\n try:\n readline.read_history_file(history)\n except IOError:\n pass\n\n readline.parse_and_bind(readline_bind)\n\n try:\n yield\n finally:\n if docomplete:\n try:\n readline.write_history_file(history)\n except IOError:\n pass\n", "path": "hy/completer.py"}]} | 1,645 | 128 |
gh_patches_debug_58411 | rasdani/github-patches | git_diff | web2py__web2py-1871 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
contrib/pg8000 is old and causes weird postgres errors
Please update the contrib/pg8000 driver to the current version.
Otherwise errors like Broken Pipe, Operationalerror,.. occur,
- at least for postgres 9.6,
- especially for long running task (ie. scheduler, where they are not properly handled (scheduler workers will restart and earlier run rmains as RUNNING).
related links:
https://github.com/mfenniak/pg8000/issues/73
https://groups.google.com/forum/#!topic/web2py/HAeJevtGtdU
..in copy into issues: web2py/web2py, web2py/pydal
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import setup
4 from gluon.fileutils import tar, untar, read_file, write_file
5 import tarfile
6 import sys
7
8
9 def tar(file, filelist, expression='^.+$'):
10 """
11 tars dir/files into file, only tars file that match expression
12 """
13
14 tar = tarfile.TarFile(file, 'w')
15 try:
16 for element in filelist:
17 try:
18 for file in listdir(element, expression, add_dirs=True):
19 tar.add(os.path.join(element, file), file, False)
20 except:
21 tar.add(element)
22 finally:
23 tar.close()
24
25
26 def start():
27 if 'sdist' in sys.argv:
28 tar('gluon/env.tar', ['applications', 'VERSION',
29 'extras/icons/splashlogo.gif'])
30
31 setup(name='web2py',
32 version=read_file("VERSION").split()[1],
33 description="""full-stack framework for rapid development and prototyping
34 of secure database-driven web-based applications, written and
35 programmable in Python.""",
36 long_description="""
37 Everything in one package with no dependencies. Development, deployment,
38 debugging, testing, database administration and maintenance of applications can
39 be done via the provided web interface. web2py has no configuration files,
40 requires no installation, can run off a USB drive. web2py uses Python for the
41 Model, the Views and the Controllers, has a built-in ticketing system to manage
42 errors, an internationalization engine, works with SQLite, PostgreSQL, MySQL,
43 MSSQL, FireBird, Oracle, IBM DB2, Informix, Ingres, sybase and Google App Engine via a
44 Database Abstraction Layer. web2py includes libraries to handle
45 HTML/XML, RSS, ATOM, CSV, RTF, JSON, AJAX, XMLRPC, WIKI markup. Production
46 ready, capable of upload/download streaming of very large files, and always
47 backward compatible.
48 """,
49 author='Massimo Di Pierro',
50 author_email='[email protected]',
51 license='http://web2py.com/examples/default/license',
52 classifiers=["Development Status :: 5 - Production/Stable"],
53 url='http://web2py.com',
54 platforms='Windows, Linux, Mac, Unix,Windows Mobile',
55 packages=['gluon',
56 'gluon/contrib',
57 'gluon/contrib/gateways',
58 'gluon/contrib/login_methods',
59 'gluon/contrib/markdown',
60 'gluon/contrib/markmin',
61 'gluon/contrib/memcache',
62 'gluon/contrib/fpdf',
63 'gluon/contrib/pymysql',
64 'gluon/contrib/pyrtf',
65 'gluon/contrib/pysimplesoap',
66 'gluon/contrib/pg8000',
67 'gluon/contrib/plural_rules',
68 'gluon/contrib/minify',
69 'gluon/contrib/pyaes',
70 'gluon/contrib/pyuca',
71 'gluon/tests',
72 ],
73 package_data={'gluon': ['env.tar']},
74 # scripts=['w2p_apps', 'w2p_run', 'w2p_clone'],
75 )
76
77 if __name__ == '__main__':
78 #print "web2py does not require installation and"
79 #print "you should just start it with:"
80 #print
81 #print "$ python web2py.py"
82 #print
83 #print "are you sure you want to install it anyway (y/n)?"
84 #s = raw_input('>')
85 #if s.lower()[:1]=='y':
86 start()
87
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -63,7 +63,6 @@
'gluon/contrib/pymysql',
'gluon/contrib/pyrtf',
'gluon/contrib/pysimplesoap',
- 'gluon/contrib/pg8000',
'gluon/contrib/plural_rules',
'gluon/contrib/minify',
'gluon/contrib/pyaes',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,7 +63,6 @@\n 'gluon/contrib/pymysql',\n 'gluon/contrib/pyrtf',\n 'gluon/contrib/pysimplesoap',\n- 'gluon/contrib/pg8000',\n 'gluon/contrib/plural_rules',\n 'gluon/contrib/minify',\n 'gluon/contrib/pyaes',\n", "issue": "contrib/pg8000 is old and causes weird postgres errors\nPlease update the contrib/pg8000 driver to the current version.\r\nOtherwise errors like Broken Pipe, Operationalerror,.. occur,\r\n- at least for postgres 9.6,\r\n- especially for long running task (ie. scheduler, where they are not properly handled (scheduler workers will restart and earlier run rmains as RUNNING).\r\n\r\nrelated links:\r\nhttps://github.com/mfenniak/pg8000/issues/73\r\nhttps://groups.google.com/forum/#!topic/web2py/HAeJevtGtdU\r\n\r\n..in copy into issues: web2py/web2py, web2py/pydal\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nfrom gluon.fileutils import tar, untar, read_file, write_file\nimport tarfile\nimport sys\n\n\ndef tar(file, filelist, expression='^.+$'):\n \"\"\"\n tars dir/files into file, only tars file that match expression\n \"\"\"\n\n tar = tarfile.TarFile(file, 'w')\n try:\n for element in filelist:\n try:\n for file in listdir(element, expression, add_dirs=True):\n tar.add(os.path.join(element, file), file, False)\n except:\n tar.add(element)\n finally:\n tar.close()\n\n\ndef start():\n if 'sdist' in sys.argv:\n tar('gluon/env.tar', ['applications', 'VERSION',\n 'extras/icons/splashlogo.gif'])\n\n setup(name='web2py',\n version=read_file(\"VERSION\").split()[1],\n description=\"\"\"full-stack framework for rapid development and prototyping\n of secure database-driven web-based applications, written and\n programmable in Python.\"\"\",\n long_description=\"\"\"\n Everything in one package with no dependencies. Development, deployment,\n debugging, testing, database administration and maintenance of applications can\n be done via the provided web interface. web2py has no configuration files,\n requires no installation, can run off a USB drive. web2py uses Python for the\n Model, the Views and the Controllers, has a built-in ticketing system to manage\n errors, an internationalization engine, works with SQLite, PostgreSQL, MySQL,\n MSSQL, FireBird, Oracle, IBM DB2, Informix, Ingres, sybase and Google App Engine via a\n Database Abstraction Layer. web2py includes libraries to handle\n HTML/XML, RSS, ATOM, CSV, RTF, JSON, AJAX, XMLRPC, WIKI markup. Production\n ready, capable of upload/download streaming of very large files, and always\n backward compatible.\n \"\"\",\n author='Massimo Di Pierro',\n author_email='[email protected]',\n license='http://web2py.com/examples/default/license',\n classifiers=[\"Development Status :: 5 - Production/Stable\"],\n url='http://web2py.com',\n platforms='Windows, Linux, Mac, Unix,Windows Mobile',\n packages=['gluon',\n 'gluon/contrib',\n 'gluon/contrib/gateways',\n 'gluon/contrib/login_methods',\n 'gluon/contrib/markdown',\n 'gluon/contrib/markmin',\n 'gluon/contrib/memcache',\n 'gluon/contrib/fpdf',\n 'gluon/contrib/pymysql',\n 'gluon/contrib/pyrtf',\n 'gluon/contrib/pysimplesoap',\n 'gluon/contrib/pg8000',\n 'gluon/contrib/plural_rules',\n 'gluon/contrib/minify',\n 'gluon/contrib/pyaes',\n 'gluon/contrib/pyuca',\n 'gluon/tests',\n ],\n package_data={'gluon': ['env.tar']},\n# scripts=['w2p_apps', 'w2p_run', 'w2p_clone'],\n )\n\nif __name__ == '__main__':\n #print \"web2py does not require installation and\"\n #print \"you should just start it with:\"\n #print\n #print \"$ python web2py.py\"\n #print\n #print \"are you sure you want to install it anyway (y/n)?\"\n #s = raw_input('>')\n #if s.lower()[:1]=='y':\n start()\n", "path": "setup.py"}]} | 1,644 | 114 |
gh_patches_debug_18335 | rasdani/github-patches | git_diff | searx__searx-1301 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Gigablast crash
Greetings,
I have been experimenting with SearX lately and have been seeing this message:
"
Engines cannot retrieve results:
gigablast (unexpected crash: No JSON object could be decoded)
"
Seems like something is wrong with the Gigablast driver but I am not sure how to fix it.
I'm using: searx - 0.14.0
Thanks
</issue>
<code>
[start of searx/engines/gigablast.py]
1 """
2 Gigablast (Web)
3
4 @website https://gigablast.com
5 @provide-api yes (https://gigablast.com/api.html)
6
7 @using-api yes
8 @results XML
9 @stable yes
10 @parse url, title, content
11 """
12
13 import random
14 from json import loads
15 from time import time
16 from lxml.html import fromstring
17 from searx.url_utils import urlencode
18
19 # engine dependent config
20 categories = ['general']
21 paging = True
22 number_of_results = 10
23 language_support = True
24 safesearch = True
25
26 # search-url
27 base_url = 'https://gigablast.com/'
28 search_string = 'search?{query}'\
29 '&n={number_of_results}'\
30 '&c=main'\
31 '&s={offset}'\
32 '&format=json'\
33 '&qh=0'\
34 '&qlang={lang}'\
35 '&ff={safesearch}'\
36 '&rxiec={rxieu}'\
37 '&rand={rxikd}' # current unix timestamp
38
39 # specific xpath variables
40 results_xpath = '//response//result'
41 url_xpath = './/url'
42 title_xpath = './/title'
43 content_xpath = './/sum'
44
45 supported_languages_url = 'https://gigablast.com/search?&rxikd=1'
46
47
48 # do search-request
49 def request(query, params):
50 offset = (params['pageno'] - 1) * number_of_results
51
52 language = params['language'].replace('-', '_').lower()
53 if language.split('-')[0] != 'zh':
54 language = language.split('-')[0]
55
56 if params['safesearch'] >= 1:
57 safesearch = 1
58 else:
59 safesearch = 0
60
61 # rxieu is some kind of hash from the search query, but accepts random atm
62 search_path = search_string.format(query=urlencode({'q': query}),
63 offset=offset,
64 number_of_results=number_of_results,
65 rxikd=int(time() * 1000),
66 rxieu=random.randint(1000000000, 9999999999),
67 lang=language,
68 safesearch=safesearch)
69
70 params['url'] = base_url + search_path
71
72 return params
73
74
75 # get response from search-request
76 def response(resp):
77 results = []
78
79 # parse results
80 response_json = loads(resp.text)
81
82 for result in response_json['results']:
83 # append result
84 results.append({'url': result['url'],
85 'title': result['title'],
86 'content': result['sum']})
87
88 # return results
89 return results
90
91
92 # get supported languages from their site
93 def _fetch_supported_languages(resp):
94 supported_languages = []
95 dom = fromstring(resp.text)
96 links = dom.xpath('//span[@id="menu2"]/a')
97 for link in links:
98 href = link.xpath('./@href')[0].split('lang%3A')
99 if len(href) == 2:
100 code = href[1].split('_')
101 if len(code) == 2:
102 code = code[0] + '-' + code[1].upper()
103 else:
104 code = code[0]
105 supported_languages.append(code)
106
107 return supported_languages
108
[end of searx/engines/gigablast.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py
--- a/searx/engines/gigablast.py
+++ b/searx/engines/gigablast.py
@@ -34,6 +34,7 @@
'&qlang={lang}'\
'&ff={safesearch}'\
'&rxiec={rxieu}'\
+ '&ulse={ulse}'\
'&rand={rxikd}' # current unix timestamp
# specific xpath variables
@@ -64,6 +65,7 @@
number_of_results=number_of_results,
rxikd=int(time() * 1000),
rxieu=random.randint(1000000000, 9999999999),
+ ulse=random.randint(100000000, 999999999),
lang=language,
safesearch=safesearch)
| {"golden_diff": "diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py\n--- a/searx/engines/gigablast.py\n+++ b/searx/engines/gigablast.py\n@@ -34,6 +34,7 @@\n '&qlang={lang}'\\\n '&ff={safesearch}'\\\n '&rxiec={rxieu}'\\\n+ '&ulse={ulse}'\\\n '&rand={rxikd}' # current unix timestamp\n \n # specific xpath variables\n@@ -64,6 +65,7 @@\n number_of_results=number_of_results,\n rxikd=int(time() * 1000),\n rxieu=random.randint(1000000000, 9999999999),\n+ ulse=random.randint(100000000, 999999999),\n lang=language,\n safesearch=safesearch)\n", "issue": "Gigablast crash\nGreetings,\r\n\r\nI have been experimenting with SearX lately and have been seeing this message:\r\n\r\n\"\r\nEngines cannot retrieve results:\r\n\r\ngigablast (unexpected crash: No JSON object could be decoded)\r\n\"\r\n\r\nSeems like something is wrong with the Gigablast driver but I am not sure how to fix it.\r\n\r\nI'm using: searx - 0.14.0 \r\n\r\nThanks\n", "before_files": [{"content": "\"\"\"\n Gigablast (Web)\n\n @website https://gigablast.com\n @provide-api yes (https://gigablast.com/api.html)\n\n @using-api yes\n @results XML\n @stable yes\n @parse url, title, content\n\"\"\"\n\nimport random\nfrom json import loads\nfrom time import time\nfrom lxml.html import fromstring\nfrom searx.url_utils import urlencode\n\n# engine dependent config\ncategories = ['general']\npaging = True\nnumber_of_results = 10\nlanguage_support = True\nsafesearch = True\n\n# search-url\nbase_url = 'https://gigablast.com/'\nsearch_string = 'search?{query}'\\\n '&n={number_of_results}'\\\n '&c=main'\\\n '&s={offset}'\\\n '&format=json'\\\n '&qh=0'\\\n '&qlang={lang}'\\\n '&ff={safesearch}'\\\n '&rxiec={rxieu}'\\\n '&rand={rxikd}' # current unix timestamp\n\n# specific xpath variables\nresults_xpath = '//response//result'\nurl_xpath = './/url'\ntitle_xpath = './/title'\ncontent_xpath = './/sum'\n\nsupported_languages_url = 'https://gigablast.com/search?&rxikd=1'\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * number_of_results\n\n language = params['language'].replace('-', '_').lower()\n if language.split('-')[0] != 'zh':\n language = language.split('-')[0]\n\n if params['safesearch'] >= 1:\n safesearch = 1\n else:\n safesearch = 0\n\n # rxieu is some kind of hash from the search query, but accepts random atm\n search_path = search_string.format(query=urlencode({'q': query}),\n offset=offset,\n number_of_results=number_of_results,\n rxikd=int(time() * 1000),\n rxieu=random.randint(1000000000, 9999999999),\n lang=language,\n safesearch=safesearch)\n\n params['url'] = base_url + search_path\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n # parse results\n response_json = loads(resp.text)\n\n for result in response_json['results']:\n # append result\n results.append({'url': result['url'],\n 'title': result['title'],\n 'content': result['sum']})\n\n # return results\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n supported_languages = []\n dom = fromstring(resp.text)\n links = dom.xpath('//span[@id=\"menu2\"]/a')\n for link in links:\n href = link.xpath('./@href')[0].split('lang%3A')\n if len(href) == 2:\n code = href[1].split('_')\n if len(code) == 2:\n code = code[0] + '-' + code[1].upper()\n else:\n code = code[0]\n supported_languages.append(code)\n\n return supported_languages\n", "path": "searx/engines/gigablast.py"}]} | 1,586 | 229 |
gh_patches_debug_17435 | rasdani/github-patches | git_diff | freedomofpress__securedrop-2756 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Failure to invalidate session when user resets their own password
## Description
When a user resets their own password, their session is not invalidated.
## Steps to Reproduce
1. User logs in
2. User resets password
## Expected Behavior
User is logged out and is requested to use their new password to login
## Actual Behavior
User can continue to browse without having to enter their new password again
## Comments
Related tickets: #2300, #880
</issue>
<code>
[start of securedrop/journalist_app/account.py]
1 # -*- coding: utf-8 -*-
2
3 from flask import (Blueprint, render_template, request, g, redirect, url_for,
4 flash)
5 from flask_babel import gettext
6
7 from db import db_session
8 from journalist_app.utils import (make_password, set_diceware_password,
9 validate_user)
10
11
12 def make_blueprint(config):
13 view = Blueprint('account', __name__)
14
15 @view.route('/account', methods=('GET',))
16 def edit():
17 password = make_password(config)
18 return render_template('edit_account.html',
19 password=password)
20
21 @view.route('/new-password', methods=('POST',))
22 def new_password():
23 user = g.user
24 current_password = request.form.get('current_password')
25 token = request.form.get('token')
26 error_message = gettext('Incorrect password or two-factor code.')
27 # If the user is validated, change their password
28 if validate_user(user.username, current_password, token,
29 error_message):
30 password = request.form.get('password')
31 set_diceware_password(user, password)
32 return redirect(url_for('account.edit'))
33
34 @view.route('/2fa', methods=('GET', 'POST'))
35 def new_two_factor():
36 if request.method == 'POST':
37 token = request.form['token']
38 if g.user.verify_token(token):
39 flash(gettext("Token in two-factor authentication verified."),
40 "notification")
41 return redirect(url_for('account.edit'))
42 else:
43 flash(gettext(
44 "Could not verify token in two-factor authentication."),
45 "error")
46
47 return render_template('account_new_two_factor.html', user=g.user)
48
49 @view.route('/reset-2fa-totp', methods=['POST'])
50 def reset_two_factor_totp():
51 g.user.is_totp = True
52 g.user.regenerate_totp_shared_secret()
53 db_session.commit()
54 return redirect(url_for('account.new_two_factor'))
55
56 @view.route('/reset-2fa-hotp', methods=['POST'])
57 def reset_two_factor_hotp():
58 otp_secret = request.form.get('otp_secret', None)
59 if otp_secret:
60 g.user.set_hotp_secret(otp_secret)
61 db_session.commit()
62 return redirect(url_for('account.new_two_factor'))
63 else:
64 return render_template('account_edit_hotp_secret.html')
65
66 return view
67
[end of securedrop/journalist_app/account.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/journalist_app/account.py b/securedrop/journalist_app/account.py
--- a/securedrop/journalist_app/account.py
+++ b/securedrop/journalist_app/account.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from flask import (Blueprint, render_template, request, g, redirect, url_for,
- flash)
+ flash, session)
from flask_babel import gettext
from db import db_session
@@ -29,6 +29,9 @@
error_message):
password = request.form.get('password')
set_diceware_password(user, password)
+ session.pop('uid', None)
+ session.pop('expires', None)
+ return redirect(url_for('main.login'))
return redirect(url_for('account.edit'))
@view.route('/2fa', methods=('GET', 'POST'))
| {"golden_diff": "diff --git a/securedrop/journalist_app/account.py b/securedrop/journalist_app/account.py\n--- a/securedrop/journalist_app/account.py\n+++ b/securedrop/journalist_app/account.py\n@@ -1,7 +1,7 @@\n # -*- coding: utf-8 -*-\n \n from flask import (Blueprint, render_template, request, g, redirect, url_for,\n- flash)\n+ flash, session)\n from flask_babel import gettext\n \n from db import db_session\n@@ -29,6 +29,9 @@\n error_message):\n password = request.form.get('password')\n set_diceware_password(user, password)\n+ session.pop('uid', None)\n+ session.pop('expires', None)\n+ return redirect(url_for('main.login'))\n return redirect(url_for('account.edit'))\n \n @view.route('/2fa', methods=('GET', 'POST'))\n", "issue": "Failure to invalidate session when user resets their own password\n## Description\r\n\r\nWhen a user resets their own password, their session is not invalidated. \r\n\r\n## Steps to Reproduce\r\n\r\n1. User logs in\r\n2. User resets password\r\n\r\n## Expected Behavior\r\n\r\nUser is logged out and is requested to use their new password to login\r\n\r\n## Actual Behavior\r\n\r\nUser can continue to browse without having to enter their new password again\r\n\r\n## Comments\r\n\r\nRelated tickets: #2300, #880\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom flask import (Blueprint, render_template, request, g, redirect, url_for,\n flash)\nfrom flask_babel import gettext\n\nfrom db import db_session\nfrom journalist_app.utils import (make_password, set_diceware_password,\n validate_user)\n\n\ndef make_blueprint(config):\n view = Blueprint('account', __name__)\n\n @view.route('/account', methods=('GET',))\n def edit():\n password = make_password(config)\n return render_template('edit_account.html',\n password=password)\n\n @view.route('/new-password', methods=('POST',))\n def new_password():\n user = g.user\n current_password = request.form.get('current_password')\n token = request.form.get('token')\n error_message = gettext('Incorrect password or two-factor code.')\n # If the user is validated, change their password\n if validate_user(user.username, current_password, token,\n error_message):\n password = request.form.get('password')\n set_diceware_password(user, password)\n return redirect(url_for('account.edit'))\n\n @view.route('/2fa', methods=('GET', 'POST'))\n def new_two_factor():\n if request.method == 'POST':\n token = request.form['token']\n if g.user.verify_token(token):\n flash(gettext(\"Token in two-factor authentication verified.\"),\n \"notification\")\n return redirect(url_for('account.edit'))\n else:\n flash(gettext(\n \"Could not verify token in two-factor authentication.\"),\n \"error\")\n\n return render_template('account_new_two_factor.html', user=g.user)\n\n @view.route('/reset-2fa-totp', methods=['POST'])\n def reset_two_factor_totp():\n g.user.is_totp = True\n g.user.regenerate_totp_shared_secret()\n db_session.commit()\n return redirect(url_for('account.new_two_factor'))\n\n @view.route('/reset-2fa-hotp', methods=['POST'])\n def reset_two_factor_hotp():\n otp_secret = request.form.get('otp_secret', None)\n if otp_secret:\n g.user.set_hotp_secret(otp_secret)\n db_session.commit()\n return redirect(url_for('account.new_two_factor'))\n else:\n return render_template('account_edit_hotp_secret.html')\n\n return view\n", "path": "securedrop/journalist_app/account.py"}]} | 1,257 | 196 |
gh_patches_debug_7947 | rasdani/github-patches | git_diff | coreruleset__coreruleset-3232 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Review links to OWASP wiki
### Describe the bug
We have references to other OWASP projects in our files:
```
rules/REQUEST-933-APPLICATION-ATTACK-PHP.conf
28:# https://www.owasp.org/index.php/PHP_Top_5#P1:_Remote_Code_Executionh
366:# https://www.owasp.org/index.php/PHP_Object_Injection
rules/REQUEST-921-PROTOCOL-ATTACK.conf
194:# Reference: https://www.owasp.org/index.php/Testing_for_HTTP_Splitting/Smuggling_(OTG-INPVAL-016)
rules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf
97:# https://www.owasp.org/index.php/ModSecurity_CRS_RuleID-96000
CHANGES.md
977: https://www.owasp.org/index.php/AppSensor_DetectionPoints
rules/REQUEST-932-APPLICATION-ATTACK-RCE.conf
690:# https://www.owasp.org/index.php/Unrestricted_File_Upload
rules/scanners-user-agents.data
58:# https://www.owasp.org/index.php/Category:OWASP_DirBuster_Project
```
We need to double check they are still valid and update if not.
</issue>
<code>
[start of util/regexp-tricks/negative-lookahead.py]
1 import argparse
2
3 # WARNING: This script is EXPERIMENTAL. Use with caution.
4 #
5 # Known issues:
6 # * At the moment, it will probably not work with more than two strings.
7 #
8 # Known limitations:
9 # * Any substrings of a target string will also NOT be matched. This is probably due to a limitation in this technique,
10 # make sure that subtrings of the negative lookahead are not harmful in any way.
11
12 parser = argparse.ArgumentParser(description="This script takes a list of strings and converts them into \
13 a regex that acts like a negative lookahead")
14 parser.add_argument("strings", type=str, nargs='+',
15 help="the strings to convert into a negative lookahead")
16 parser.add_argument("--prefix", type=str, default="",
17 help="sets a prefix for the resulting regex")
18 parser.add_argument("--suffix", type=str, default="",
19 help="sets a suffix for the resulting regex")
20
21 args = parser.parse_args()
22
23 # Return the longest prefix of all list elements. Shamelessly copied from:
24 # https://stackoverflow.com/questions/6718196/determine-prefix-from-a-set-of-similar-strings
25 def commonprefix(m):
26 "Given a list of pathnames, returns the longest common leading component"
27 if not m: return ''
28 s1 = min(m)
29 s2 = max(m)
30 for i, c in enumerate(s1):
31 if c != s2[i]:
32 return s1[:i]
33 return s1
34
35 # flatten returns a string with concatenated dictionary keys
36 def flatten(dict):
37 s = ""
38
39 for key in dict.keys():
40 s += key
41
42 return s
43
44 # set returns a character set containing the unique characters across all strings for the given index
45 def set(strings, index, flags):
46 dict = {}
47
48 for s in strings:
49 # Continue so we don't panic
50 if index > len(s) -1:
51 continue
52
53 dict[s[index]] = ''
54
55 return "[" + flags + flatten(dict) + "]"
56
57 # prepare converts a string for negative lookaheads emulation
58 def prepare(s, offset):
59 r = ""
60
61 if len(s) == 0:
62 return r
63
64 for i in range(offset, len(s)):
65 for j in range(0, i + 1):
66 if j == i:
67 r += "[^" + s[j] + "]"
68 else:
69 r += s[j]
70
71 if i != len(s) - 1:
72 r += "|"
73
74 return r
75
76 # run runs the
77 def run():
78 strings = args.strings
79
80 r = ""
81 r += set(strings, 0, "^")
82
83 c = ""
84 d = {}
85
86 # Only find common string if we have more than one
87 if len(strings) > 1:
88 c = commonprefix(strings)
89
90 # Collect all characters after the common substring from every string
91 for s in strings:
92 if len(s) > len(c) and s.startswith(c):
93 d[s[len(c)]] = ''
94
95 # Add the common string to the regex to prevent accidental matching
96 if len(c) > 0:
97 if len(c) > 1:
98 r += "|" + "(?:" + prepare(c, 1) + ")"
99
100 r += "|" + "(?:" + c + "[^" + flatten(d) + "]" + ")"
101
102 for s in strings:
103 g = ""
104
105 # When the common string is > 0, offset with len(c) + 1 because we handled this earlier
106 if len(c) > 0:
107 g = prepare(s, len(c) + 1)
108 else:
109 g = prepare(s, 1)
110
111 # Add OR boolean if necessary
112 if len(g) > 0:
113 r += "|"
114
115 r += g
116
117 print(args.prefix + "(?:" + r + ")" + args.suffix)
118
119 # Only run if script is called directly
120 if __name__ == "__main__":
121 run()
122
[end of util/regexp-tricks/negative-lookahead.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/util/regexp-tricks/negative-lookahead.py b/util/regexp-tricks/negative-lookahead.py
--- a/util/regexp-tricks/negative-lookahead.py
+++ b/util/regexp-tricks/negative-lookahead.py
@@ -21,7 +21,7 @@
args = parser.parse_args()
# Return the longest prefix of all list elements. Shamelessly copied from:
-# https://stackoverflow.com/questions/6718196/determine-prefix-from-a-set-of-similar-strings
+# https://stackoverflow.com/questions/6718196/determine-the-common-prefix-of-multiple-strings
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
| {"golden_diff": "diff --git a/util/regexp-tricks/negative-lookahead.py b/util/regexp-tricks/negative-lookahead.py\n--- a/util/regexp-tricks/negative-lookahead.py\n+++ b/util/regexp-tricks/negative-lookahead.py\n@@ -21,7 +21,7 @@\n args = parser.parse_args()\n \n # Return the longest prefix of all list elements. Shamelessly copied from:\n-# https://stackoverflow.com/questions/6718196/determine-prefix-from-a-set-of-similar-strings\n+# https://stackoverflow.com/questions/6718196/determine-the-common-prefix-of-multiple-strings\n def commonprefix(m):\n \"Given a list of pathnames, returns the longest common leading component\"\n if not m: return ''\n", "issue": "Review links to OWASP wiki\n### Describe the bug\r\n\r\nWe have references to other OWASP projects in our files:\r\n\r\n```\r\nrules/REQUEST-933-APPLICATION-ATTACK-PHP.conf\r\n28:# https://www.owasp.org/index.php/PHP_Top_5#P1:_Remote_Code_Executionh\r\n366:# https://www.owasp.org/index.php/PHP_Object_Injection\r\n\r\nrules/REQUEST-921-PROTOCOL-ATTACK.conf\r\n194:# Reference: https://www.owasp.org/index.php/Testing_for_HTTP_Splitting/Smuggling_(OTG-INPVAL-016)\r\n\r\nrules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf\r\n97:# https://www.owasp.org/index.php/ModSecurity_CRS_RuleID-96000\r\n\r\nCHANGES.md\r\n977: https://www.owasp.org/index.php/AppSensor_DetectionPoints\r\n\r\nrules/REQUEST-932-APPLICATION-ATTACK-RCE.conf\r\n690:# https://www.owasp.org/index.php/Unrestricted_File_Upload\r\n\r\nrules/scanners-user-agents.data\r\n58:# https://www.owasp.org/index.php/Category:OWASP_DirBuster_Project\r\n```\r\nWe need to double check they are still valid and update if not.\n", "before_files": [{"content": "import argparse\n\n# WARNING: This script is EXPERIMENTAL. Use with caution.\n#\n# Known issues:\n# * At the moment, it will probably not work with more than two strings.\n#\n# Known limitations:\n# * Any substrings of a target string will also NOT be matched. This is probably due to a limitation in this technique,\n# make sure that subtrings of the negative lookahead are not harmful in any way.\n\nparser = argparse.ArgumentParser(description=\"This script takes a list of strings and converts them into \\\n a regex that acts like a negative lookahead\")\nparser.add_argument(\"strings\", type=str, nargs='+',\n help=\"the strings to convert into a negative lookahead\")\nparser.add_argument(\"--prefix\", type=str, default=\"\",\n help=\"sets a prefix for the resulting regex\")\nparser.add_argument(\"--suffix\", type=str, default=\"\",\n help=\"sets a suffix for the resulting regex\")\n\nargs = parser.parse_args()\n\n# Return the longest prefix of all list elements. Shamelessly copied from:\n# https://stackoverflow.com/questions/6718196/determine-prefix-from-a-set-of-similar-strings\ndef commonprefix(m):\n \"Given a list of pathnames, returns the longest common leading component\"\n if not m: return ''\n s1 = min(m)\n s2 = max(m)\n for i, c in enumerate(s1):\n if c != s2[i]:\n return s1[:i]\n return s1\n\n# flatten returns a string with concatenated dictionary keys\ndef flatten(dict):\n s = \"\"\n\n for key in dict.keys():\n s += key\n\n return s\n\n# set returns a character set containing the unique characters across all strings for the given index\ndef set(strings, index, flags):\n dict = {}\n\n for s in strings:\n # Continue so we don't panic\n if index > len(s) -1:\n continue\n \n dict[s[index]] = ''\n \n return \"[\" + flags + flatten(dict) + \"]\"\n\n# prepare converts a string for negative lookaheads emulation\ndef prepare(s, offset):\n r = \"\"\n\n if len(s) == 0:\n return r\n\n for i in range(offset, len(s)):\n for j in range(0, i + 1):\n if j == i:\n r += \"[^\" + s[j] + \"]\"\n else:\n r += s[j]\n\n if i != len(s) - 1:\n r += \"|\"\n\n return r\n\n# run runs the \ndef run():\n strings = args.strings\n\n r = \"\"\n r += set(strings, 0, \"^\")\n\n c = \"\"\n d = {}\n\n # Only find common string if we have more than one\n if len(strings) > 1:\n c = commonprefix(strings)\n \n # Collect all characters after the common substring from every string\n for s in strings:\n if len(s) > len(c) and s.startswith(c):\n d[s[len(c)]] = ''\n\n # Add the common string to the regex to prevent accidental matching\n if len(c) > 0:\n if len(c) > 1:\n r += \"|\" + \"(?:\" + prepare(c, 1) + \")\"\n\n r += \"|\" + \"(?:\" + c + \"[^\" + flatten(d) + \"]\" + \")\"\n\n for s in strings:\n g = \"\"\n\n # When the common string is > 0, offset with len(c) + 1 because we handled this earlier\n if len(c) > 0:\n g = prepare(s, len(c) + 1)\n else:\n g = prepare(s, 1)\n \n # Add OR boolean if necessary\n if len(g) > 0:\n r += \"|\"\n\n r += g\n\n print(args.prefix + \"(?:\" + r + \")\" + args.suffix)\n\n# Only run if script is called directly\nif __name__ == \"__main__\":\n run()\n", "path": "util/regexp-tricks/negative-lookahead.py"}]} | 1,963 | 171 |
gh_patches_debug_26830 | rasdani/github-patches | git_diff | nilearn__nilearn-1219 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sklearn.covariance.LedoitWolf or ConnectivityMeasure in plot_adhd_spheres
`ConnectivityMeasure` can be used here, and its default covariance estimator is `LedoitWolf`from `sklearn.covariance`.
I also prefer using partial correlations rather than precision, because no need for negating the connections.
</issue>
<code>
[start of examples/03_connectivity/plot_adhd_spheres.py]
1 """
2 Extracting brain signal from spheres
3 ====================================
4
5 This example extract brain signals from spheres described by the coordinates
6 of their center in MNI space and a given radius in millimeters. In particular,
7 this example extracts signals from Default Mode Network regions and compute a
8 connectome from them.
9
10 """
11
12 ##########################################################################
13 # Retrieve the dataset
14 from nilearn import datasets
15 adhd_dataset = datasets.fetch_adhd(n_subjects=1)
16
17 # print basic information on the dataset
18 print('First subject functional nifti image (4D) is at: %s' %
19 adhd_dataset.func[0]) # 4D data
20
21
22 ##########################################################################
23 # Coordinates of Default Mode Network
24 dmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (1, 50, -5)]
25 labels = [
26 'Posterior Cingulate Cortex',
27 'Left Temporoparietal junction',
28 'Right Temporoparietal junction',
29 'Medial prefrontal cortex'
30 ]
31
32
33 ##########################################################################
34 # Extracts signal from sphere around DMN seeds
35 from nilearn import input_data
36
37 masker = input_data.NiftiSpheresMasker(
38 dmn_coords, radius=8,
39 detrend=True, standardize=True,
40 low_pass=0.1, high_pass=0.01, t_r=2.5,
41 memory='nilearn_cache', memory_level=1, verbose=2)
42
43 func_filename = adhd_dataset.func[0]
44 confound_filename = adhd_dataset.confounds[0]
45
46 time_series = masker.fit_transform(func_filename,
47 confounds=[confound_filename])
48
49 ##########################################################################
50 # Display time series
51 import matplotlib.pyplot as plt
52 for time_serie, label in zip(time_series.T, labels):
53 plt.plot(time_serie, label=label)
54
55 plt.title('Default Mode Network Time Series')
56 plt.xlabel('Scan number')
57 plt.ylabel('Normalized signal')
58 plt.legend()
59 plt.tight_layout()
60
61
62 ##########################################################################
63 # Compute precision matrices
64 from sklearn.covariance import LedoitWolf
65 cve = LedoitWolf()
66 cve.fit(time_series)
67
68
69 ##########################################################################
70 # Display connectome
71 from nilearn import plotting
72
73 plotting.plot_connectome(cve.precision_, dmn_coords,
74 title="Default Mode Network Connectivity")
75
76 # Display connectome with hemispheric projections.
77 # Notice (0, -52, 18) is included in both hemispheres since x == 0.
78 title = "Connectivity projected on hemispheres"
79 plotting.plot_connectome(cve.precision_, dmn_coords, title=title,
80 display_mode='lyrz')
81
82 plotting.show()
83
[end of examples/03_connectivity/plot_adhd_spheres.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/03_connectivity/plot_adhd_spheres.py b/examples/03_connectivity/plot_adhd_spheres.py
--- a/examples/03_connectivity/plot_adhd_spheres.py
+++ b/examples/03_connectivity/plot_adhd_spheres.py
@@ -60,23 +60,25 @@
##########################################################################
-# Compute precision matrices
-from sklearn.covariance import LedoitWolf
-cve = LedoitWolf()
-cve.fit(time_series)
-
+# Compute partial correlation matrix using object
+# :class:`nilearn.connectome.ConnectivityMeasure`: Its default covariance
+# estimator is Ledoit-Wolf, allowing to obtain accurate partial correlations.
+from nilearn.connectome import ConnectivityMeasure
+connectivity_measure = ConnectivityMeasure(kind='partial correlation')
+partial_correlation_matrix = connectivity_measure.fit_transform(
+ [time_series])[0]
##########################################################################
# Display connectome
from nilearn import plotting
-plotting.plot_connectome(cve.precision_, dmn_coords,
+plotting.plot_connectome(partial_correlation_matrix, dmn_coords,
title="Default Mode Network Connectivity")
# Display connectome with hemispheric projections.
# Notice (0, -52, 18) is included in both hemispheres since x == 0.
title = "Connectivity projected on hemispheres"
-plotting.plot_connectome(cve.precision_, dmn_coords, title=title,
+plotting.plot_connectome(partial_correlation_matrix, dmn_coords, title=title,
display_mode='lyrz')
plotting.show()
| {"golden_diff": "diff --git a/examples/03_connectivity/plot_adhd_spheres.py b/examples/03_connectivity/plot_adhd_spheres.py\n--- a/examples/03_connectivity/plot_adhd_spheres.py\n+++ b/examples/03_connectivity/plot_adhd_spheres.py\n@@ -60,23 +60,25 @@\n \n \n ##########################################################################\n-# Compute precision matrices\n-from sklearn.covariance import LedoitWolf\n-cve = LedoitWolf()\n-cve.fit(time_series)\n-\n+# Compute partial correlation matrix using object\n+# :class:`nilearn.connectome.ConnectivityMeasure`: Its default covariance\n+# estimator is Ledoit-Wolf, allowing to obtain accurate partial correlations.\n+from nilearn.connectome import ConnectivityMeasure\n+connectivity_measure = ConnectivityMeasure(kind='partial correlation')\n+partial_correlation_matrix = connectivity_measure.fit_transform(\n+ [time_series])[0]\n \n ##########################################################################\n # Display connectome\n from nilearn import plotting\n \n-plotting.plot_connectome(cve.precision_, dmn_coords,\n+plotting.plot_connectome(partial_correlation_matrix, dmn_coords,\n title=\"Default Mode Network Connectivity\")\n \n # Display connectome with hemispheric projections.\n # Notice (0, -52, 18) is included in both hemispheres since x == 0.\n title = \"Connectivity projected on hemispheres\"\n-plotting.plot_connectome(cve.precision_, dmn_coords, title=title,\n+plotting.plot_connectome(partial_correlation_matrix, dmn_coords, title=title,\n display_mode='lyrz')\n \n plotting.show()\n", "issue": "sklearn.covariance.LedoitWolf or ConnectivityMeasure in plot_adhd_spheres\n`ConnectivityMeasure` can be used here, and its default covariance estimator is `LedoitWolf`from `sklearn.covariance`.\nI also prefer using partial correlations rather than precision, because no need for negating the connections.\n\n", "before_files": [{"content": "\"\"\"\nExtracting brain signal from spheres\n====================================\n\nThis example extract brain signals from spheres described by the coordinates\nof their center in MNI space and a given radius in millimeters. In particular,\nthis example extracts signals from Default Mode Network regions and compute a\nconnectome from them.\n\n\"\"\"\n\n##########################################################################\n# Retrieve the dataset\nfrom nilearn import datasets\nadhd_dataset = datasets.fetch_adhd(n_subjects=1)\n\n# print basic information on the dataset\nprint('First subject functional nifti image (4D) is at: %s' %\n adhd_dataset.func[0]) # 4D data\n\n\n##########################################################################\n# Coordinates of Default Mode Network\ndmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (1, 50, -5)]\nlabels = [\n 'Posterior Cingulate Cortex',\n 'Left Temporoparietal junction',\n 'Right Temporoparietal junction',\n 'Medial prefrontal cortex'\n]\n\n\n##########################################################################\n# Extracts signal from sphere around DMN seeds\nfrom nilearn import input_data\n\nmasker = input_data.NiftiSpheresMasker(\n dmn_coords, radius=8,\n detrend=True, standardize=True,\n low_pass=0.1, high_pass=0.01, t_r=2.5,\n memory='nilearn_cache', memory_level=1, verbose=2)\n\nfunc_filename = adhd_dataset.func[0]\nconfound_filename = adhd_dataset.confounds[0]\n\ntime_series = masker.fit_transform(func_filename,\n confounds=[confound_filename])\n\n##########################################################################\n# Display time series\nimport matplotlib.pyplot as plt\nfor time_serie, label in zip(time_series.T, labels):\n plt.plot(time_serie, label=label)\n\nplt.title('Default Mode Network Time Series')\nplt.xlabel('Scan number')\nplt.ylabel('Normalized signal')\nplt.legend()\nplt.tight_layout()\n\n\n##########################################################################\n# Compute precision matrices\nfrom sklearn.covariance import LedoitWolf\ncve = LedoitWolf()\ncve.fit(time_series)\n\n\n##########################################################################\n# Display connectome\nfrom nilearn import plotting\n\nplotting.plot_connectome(cve.precision_, dmn_coords,\n title=\"Default Mode Network Connectivity\")\n\n# Display connectome with hemispheric projections.\n# Notice (0, -52, 18) is included in both hemispheres since x == 0.\ntitle = \"Connectivity projected on hemispheres\"\nplotting.plot_connectome(cve.precision_, dmn_coords, title=title,\n display_mode='lyrz')\n\nplotting.show()\n", "path": "examples/03_connectivity/plot_adhd_spheres.py"}]} | 1,359 | 342 |
gh_patches_debug_6697 | rasdani/github-patches | git_diff | SeldonIO__MLServer-911 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problems with own logging configuration
Currently I have the problem that my logging configuration is not accepted everywhere. As soon as the REST server starts (Uvicorn Worker), my logging configuration is ignored. I have created a repo that represents my scenario and also which is configuration used. Maybe my configuration is just wrong. In the model itself, I print out all the loggers and the associated handlers and formatter and can see here that it should actually fit. Do you have any ideas?
Here is my small example repo: https://github.com/JustinDroege/mlserver-logging
</issue>
<code>
[start of mlserver/metrics/server.py]
1 import uvicorn
2
3 from fastapi import FastAPI
4 from starlette_exporter import handle_metrics
5
6 from ..settings import Settings
7 from .logging import logger
8 from typing import Optional
9
10
11 class _NoSignalServer(uvicorn.Server):
12 def install_signal_handlers(self):
13 pass
14
15
16 class MetricsServer:
17 def __init__(self, settings: Settings):
18 self._settings = settings
19 self._app = self._get_app()
20
21 def _get_app(self):
22 app = FastAPI(debug=self._settings.debug)
23 app.add_route(self._settings.metrics_endpoint, handle_metrics)
24 return app
25
26 async def start(self):
27 cfg = self._get_config()
28 self._server = _NoSignalServer(cfg)
29
30 metrics_server = f"http://{self._settings.host}:{self._settings.metrics_port}"
31 logger.info(f"Metrics server running on {metrics_server}")
32 logger.info(
33 "Prometheus scraping endpoint can be accessed on "
34 f"{metrics_server}{self._settings.metrics_endpoint}"
35 )
36 await self._server.serve()
37
38 def _get_config(self):
39 kwargs = {}
40
41 if self._settings._custom_metrics_server_settings:
42 logger.warning(
43 "REST custom configuration is out of support. Use as your own risk"
44 )
45 kwargs.update(self._settings._custom_metrics_server_settings)
46
47 kwargs.update(
48 {
49 "host": self._settings.host,
50 "port": self._settings.metrics_port,
51 "access_log": self._settings.debug,
52 }
53 )
54
55 # TODO: we want to disable logger unless debug is enabled (otherwise,
56 # prom reqs can be spammy)
57 return uvicorn.Config(self._app, **kwargs)
58
59 async def stop(self, sig: Optional[int] = None):
60 self._server.handle_exit(sig=sig, frame=None)
61
[end of mlserver/metrics/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlserver/metrics/server.py b/mlserver/metrics/server.py
--- a/mlserver/metrics/server.py
+++ b/mlserver/metrics/server.py
@@ -52,8 +52,11 @@
}
)
- # TODO: we want to disable logger unless debug is enabled (otherwise,
- # prom reqs can be spammy)
+ if self._settings.logging_settings:
+ # If not None, use ours. Otherwise, let Uvicorn fall back on its
+ # own config.
+ kwargs.update({"log_config": self._settings.logging_settings})
+
return uvicorn.Config(self._app, **kwargs)
async def stop(self, sig: Optional[int] = None):
| {"golden_diff": "diff --git a/mlserver/metrics/server.py b/mlserver/metrics/server.py\n--- a/mlserver/metrics/server.py\n+++ b/mlserver/metrics/server.py\n@@ -52,8 +52,11 @@\n }\n )\n \n- # TODO: we want to disable logger unless debug is enabled (otherwise,\n- # prom reqs can be spammy)\n+ if self._settings.logging_settings:\n+ # If not None, use ours. Otherwise, let Uvicorn fall back on its\n+ # own config.\n+ kwargs.update({\"log_config\": self._settings.logging_settings})\n+\n return uvicorn.Config(self._app, **kwargs)\n \n async def stop(self, sig: Optional[int] = None):\n", "issue": "Problems with own logging configuration\nCurrently I have the problem that my logging configuration is not accepted everywhere. As soon as the REST server starts (Uvicorn Worker), my logging configuration is ignored. I have created a repo that represents my scenario and also which is configuration used. Maybe my configuration is just wrong. In the model itself, I print out all the loggers and the associated handlers and formatter and can see here that it should actually fit. Do you have any ideas?\r\n\r\nHere is my small example repo: https://github.com/JustinDroege/mlserver-logging\n", "before_files": [{"content": "import uvicorn\n\nfrom fastapi import FastAPI\nfrom starlette_exporter import handle_metrics\n\nfrom ..settings import Settings\nfrom .logging import logger\nfrom typing import Optional\n\n\nclass _NoSignalServer(uvicorn.Server):\n def install_signal_handlers(self):\n pass\n\n\nclass MetricsServer:\n def __init__(self, settings: Settings):\n self._settings = settings\n self._app = self._get_app()\n\n def _get_app(self):\n app = FastAPI(debug=self._settings.debug)\n app.add_route(self._settings.metrics_endpoint, handle_metrics)\n return app\n\n async def start(self):\n cfg = self._get_config()\n self._server = _NoSignalServer(cfg)\n\n metrics_server = f\"http://{self._settings.host}:{self._settings.metrics_port}\"\n logger.info(f\"Metrics server running on {metrics_server}\")\n logger.info(\n \"Prometheus scraping endpoint can be accessed on \"\n f\"{metrics_server}{self._settings.metrics_endpoint}\"\n )\n await self._server.serve()\n\n def _get_config(self):\n kwargs = {}\n\n if self._settings._custom_metrics_server_settings:\n logger.warning(\n \"REST custom configuration is out of support. Use as your own risk\"\n )\n kwargs.update(self._settings._custom_metrics_server_settings)\n\n kwargs.update(\n {\n \"host\": self._settings.host,\n \"port\": self._settings.metrics_port,\n \"access_log\": self._settings.debug,\n }\n )\n\n # TODO: we want to disable logger unless debug is enabled (otherwise,\n # prom reqs can be spammy)\n return uvicorn.Config(self._app, **kwargs)\n\n async def stop(self, sig: Optional[int] = None):\n self._server.handle_exit(sig=sig, frame=None)\n", "path": "mlserver/metrics/server.py"}]} | 1,161 | 161 |
gh_patches_debug_38993 | rasdani/github-patches | git_diff | zulip__zulip-29641 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update first message ID when first message is deleted
When a message is deleted, we should update the stored ID of the first message in the stream. Because we currently do not, deleting the first message may result in an extraneous "more topics" link in the left sidebar, with no additional topics shown when you click it.
Note: The symptom may be hard to replicate; we should focus on fixing the technical issue, as described in @timabbott 's comment below.
</issue>
<code>
[start of zerver/actions/message_delete.py]
1 from typing import Iterable, List, TypedDict
2
3 from zerver.lib import retention
4 from zerver.lib.retention import move_messages_to_archive
5 from zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id
6 from zerver.models import Message, Realm, UserMessage, UserProfile
7 from zerver.tornado.django_api import send_event_on_commit
8
9
10 class DeleteMessagesEvent(TypedDict, total=False):
11 type: str
12 message_ids: List[int]
13 message_type: str
14 topic: str
15 stream_id: int
16
17
18 def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
19 # messages in delete_message event belong to the same topic
20 # or is a single direct message, as any other behaviour is not possible with
21 # the current callers to this method.
22 messages = list(messages)
23 message_ids = [message.id for message in messages]
24 if not message_ids:
25 return
26
27 event: DeleteMessagesEvent = {
28 "type": "delete_message",
29 "message_ids": message_ids,
30 }
31
32 sample_message = messages[0]
33 message_type = "stream"
34 users_to_notify = []
35 if not sample_message.is_stream_message():
36 assert len(messages) == 1
37 message_type = "private"
38 ums = UserMessage.objects.filter(message_id__in=message_ids)
39 users_to_notify = [um.user_profile_id for um in ums]
40 archiving_chunk_size = retention.MESSAGE_BATCH_SIZE
41
42 if message_type == "stream":
43 stream_id = sample_message.recipient.type_id
44 event["stream_id"] = stream_id
45 event["topic"] = sample_message.topic_name()
46 subscriptions = get_active_subscriptions_for_stream_id(
47 stream_id, include_deactivated_users=False
48 )
49 # We exclude long-term idle users, since they by definition have no active clients.
50 subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)
51 users_to_notify = list(subscriptions.values_list("user_profile_id", flat=True))
52 archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
53
54 move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
55
56 event["message_type"] = message_type
57 send_event_on_commit(realm, event, users_to_notify)
58
59
60 def do_delete_messages_by_sender(user: UserProfile) -> None:
61 message_ids = list(
62 # Uses index: zerver_message_realm_sender_recipient (prefix)
63 Message.objects.filter(realm_id=user.realm_id, sender=user)
64 .values_list("id", flat=True)
65 .order_by("id")
66 )
67 if message_ids:
68 move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)
69
[end of zerver/actions/message_delete.py]
[start of version.py]
1 import os
2
3 ZULIP_VERSION = "9.0-dev+git"
4
5 # Add information on number of commits and commit hash to version, if available
6 zulip_git_version_file = os.path.join(
7 os.path.dirname(os.path.abspath(__file__)), "zulip-git-version"
8 )
9 lines = [ZULIP_VERSION, ""]
10 if os.path.exists(zulip_git_version_file):
11 with open(zulip_git_version_file) as f:
12 lines = [*f, "", ""]
13 ZULIP_VERSION = lines.pop(0).strip()
14 ZULIP_MERGE_BASE = lines.pop(0).strip()
15
16 LATEST_MAJOR_VERSION = "8.0"
17 LATEST_RELEASE_VERSION = "8.3"
18 LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.com/2023/12/15/zulip-8-0-released/"
19
20 # Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be
21 # prevented from connecting to the Zulip server. Versions above
22 # DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have
23 # a banner at the top of the page asking the user to upgrade.
24 DESKTOP_MINIMUM_VERSION = "5.4.3"
25 DESKTOP_WARNING_VERSION = "5.9.3"
26
27 # Bump the API_FEATURE_LEVEL whenever an API change is made
28 # that clients might want to condition on. If we forget at
29 # the time we make the change, then bump it later as soon
30 # as we notice; clients using API_FEATURE_LEVEL will just not
31 # use the new feature/API until the bump.
32 #
33 # Changes should be accompanied by documentation explaining what the
34 # new level means in api_docs/changelog.md, as well as "**Changes**"
35 # entries in the endpoint's documentation in `zulip.yaml`.
36 API_FEATURE_LEVEL = 255
37
38 # Bump the minor PROVISION_VERSION to indicate that folks should provision
39 # only when going from an old version of the code to a newer version. Bump
40 # the major version to indicate that folks should provision in both
41 # directions.
42
43 # Typically,
44 # * adding a dependency only requires a minor version bump;
45 # * removing a dependency requires a major version bump;
46 # * upgrading a dependency requires a major version bump, unless the
47 # upgraded dependency is backwards compatible with all of our
48 # historical commits sharing the same major version, in which case a
49 # minor version bump suffices.
50
51 PROVISION_VERSION = (269, 3) # last bumped 2024-04-29 for adding pyasyncore
52
[end of version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -33,7 +33,7 @@
# Changes should be accompanied by documentation explaining what the
# new level means in api_docs/changelog.md, as well as "**Changes**"
# entries in the endpoint's documentation in `zulip.yaml`.
-API_FEATURE_LEVEL = 255
+API_FEATURE_LEVEL = 256
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
diff --git a/zerver/actions/message_delete.py b/zerver/actions/message_delete.py
--- a/zerver/actions/message_delete.py
+++ b/zerver/actions/message_delete.py
@@ -3,7 +3,7 @@
from zerver.lib import retention
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id
-from zerver.models import Message, Realm, UserMessage, UserProfile
+from zerver.models import Message, Realm, Stream, UserMessage, UserProfile
from zerver.tornado.django_api import send_event_on_commit
@@ -15,6 +15,34 @@
stream_id: int
+def check_update_first_message_id(
+ realm: Realm, stream: Stream, message_ids: List[int], users_to_notify: Iterable[int]
+) -> None:
+ # This will not update the `first_message_id` of streams where the
+ # first message was deleted prior to the implementation of this function.
+ assert stream.recipient_id is not None
+ if stream.first_message_id not in message_ids:
+ return
+ current_first_message_id = (
+ Message.objects.filter(realm_id=realm.id, recipient_id=stream.recipient_id)
+ .values_list("id", flat=True)
+ .order_by("id")
+ .first()
+ )
+
+ stream.first_message_id = current_first_message_id
+ stream.save(update_fields=["first_message_id"])
+
+ stream_event = dict(
+ type="stream",
+ op="update",
+ property="first_message_id",
+ value=stream.first_message_id,
+ stream_id=stream.id,
+ )
+ send_event_on_commit(realm, stream_event, users_to_notify)
+
+
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
# messages in delete_message event belong to the same topic
# or is a single direct message, as any other behaviour is not possible with
@@ -52,6 +80,9 @@
archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
+ if message_type == "stream":
+ stream = Stream.objects.get(id=sample_message.recipient.type_id)
+ check_update_first_message_id(realm, stream, message_ids, users_to_notify)
event["message_type"] = message_type
send_event_on_commit(realm, event, users_to_notify)
| {"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -33,7 +33,7 @@\n # Changes should be accompanied by documentation explaining what the\n # new level means in api_docs/changelog.md, as well as \"**Changes**\"\n # entries in the endpoint's documentation in `zulip.yaml`.\n-API_FEATURE_LEVEL = 255\n+API_FEATURE_LEVEL = 256\n \n # Bump the minor PROVISION_VERSION to indicate that folks should provision\n # only when going from an old version of the code to a newer version. Bump\ndiff --git a/zerver/actions/message_delete.py b/zerver/actions/message_delete.py\n--- a/zerver/actions/message_delete.py\n+++ b/zerver/actions/message_delete.py\n@@ -3,7 +3,7 @@\n from zerver.lib import retention\n from zerver.lib.retention import move_messages_to_archive\n from zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id\n-from zerver.models import Message, Realm, UserMessage, UserProfile\n+from zerver.models import Message, Realm, Stream, UserMessage, UserProfile\n from zerver.tornado.django_api import send_event_on_commit\n \n \n@@ -15,6 +15,34 @@\n stream_id: int\n \n \n+def check_update_first_message_id(\n+ realm: Realm, stream: Stream, message_ids: List[int], users_to_notify: Iterable[int]\n+) -> None:\n+ # This will not update the `first_message_id` of streams where the\n+ # first message was deleted prior to the implementation of this function.\n+ assert stream.recipient_id is not None\n+ if stream.first_message_id not in message_ids:\n+ return\n+ current_first_message_id = (\n+ Message.objects.filter(realm_id=realm.id, recipient_id=stream.recipient_id)\n+ .values_list(\"id\", flat=True)\n+ .order_by(\"id\")\n+ .first()\n+ )\n+\n+ stream.first_message_id = current_first_message_id\n+ stream.save(update_fields=[\"first_message_id\"])\n+\n+ stream_event = dict(\n+ type=\"stream\",\n+ op=\"update\",\n+ property=\"first_message_id\",\n+ value=stream.first_message_id,\n+ stream_id=stream.id,\n+ )\n+ send_event_on_commit(realm, stream_event, users_to_notify)\n+\n+\n def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:\n # messages in delete_message event belong to the same topic\n # or is a single direct message, as any other behaviour is not possible with\n@@ -52,6 +80,9 @@\n archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE\n \n move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)\n+ if message_type == \"stream\":\n+ stream = Stream.objects.get(id=sample_message.recipient.type_id)\n+ check_update_first_message_id(realm, stream, message_ids, users_to_notify)\n \n event[\"message_type\"] = message_type\n send_event_on_commit(realm, event, users_to_notify)\n", "issue": "Update first message ID when first message is deleted\nWhen a message is deleted, we should update the stored ID of the first message in the stream. Because we currently do not, deleting the first message may result in an extraneous \"more topics\" link in the left sidebar, with no additional topics shown when you click it.\r\n\r\nNote: The symptom may be hard to replicate; we should focus on fixing the technical issue, as described in @timabbott 's comment below.\n", "before_files": [{"content": "from typing import Iterable, List, TypedDict\n\nfrom zerver.lib import retention\nfrom zerver.lib.retention import move_messages_to_archive\nfrom zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id\nfrom zerver.models import Message, Realm, UserMessage, UserProfile\nfrom zerver.tornado.django_api import send_event_on_commit\n\n\nclass DeleteMessagesEvent(TypedDict, total=False):\n type: str\n message_ids: List[int]\n message_type: str\n topic: str\n stream_id: int\n\n\ndef do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:\n # messages in delete_message event belong to the same topic\n # or is a single direct message, as any other behaviour is not possible with\n # the current callers to this method.\n messages = list(messages)\n message_ids = [message.id for message in messages]\n if not message_ids:\n return\n\n event: DeleteMessagesEvent = {\n \"type\": \"delete_message\",\n \"message_ids\": message_ids,\n }\n\n sample_message = messages[0]\n message_type = \"stream\"\n users_to_notify = []\n if not sample_message.is_stream_message():\n assert len(messages) == 1\n message_type = \"private\"\n ums = UserMessage.objects.filter(message_id__in=message_ids)\n users_to_notify = [um.user_profile_id for um in ums]\n archiving_chunk_size = retention.MESSAGE_BATCH_SIZE\n\n if message_type == \"stream\":\n stream_id = sample_message.recipient.type_id\n event[\"stream_id\"] = stream_id\n event[\"topic\"] = sample_message.topic_name()\n subscriptions = get_active_subscriptions_for_stream_id(\n stream_id, include_deactivated_users=False\n )\n # We exclude long-term idle users, since they by definition have no active clients.\n subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)\n users_to_notify = list(subscriptions.values_list(\"user_profile_id\", flat=True))\n archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE\n\n move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)\n\n event[\"message_type\"] = message_type\n send_event_on_commit(realm, event, users_to_notify)\n\n\ndef do_delete_messages_by_sender(user: UserProfile) -> None:\n message_ids = list(\n # Uses index: zerver_message_realm_sender_recipient (prefix)\n Message.objects.filter(realm_id=user.realm_id, sender=user)\n .values_list(\"id\", flat=True)\n .order_by(\"id\")\n )\n if message_ids:\n move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)\n", "path": "zerver/actions/message_delete.py"}, {"content": "import os\n\nZULIP_VERSION = \"9.0-dev+git\"\n\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"zulip-git-version\"\n)\nlines = [ZULIP_VERSION, \"\"]\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n lines = [*f, \"\", \"\"]\nZULIP_VERSION = lines.pop(0).strip()\nZULIP_MERGE_BASE = lines.pop(0).strip()\n\nLATEST_MAJOR_VERSION = \"8.0\"\nLATEST_RELEASE_VERSION = \"8.3\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.com/2023/12/15/zulip-8-0-released/\"\n\n# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be\n# prevented from connecting to the Zulip server. Versions above\n# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have\n# a banner at the top of the page asking the user to upgrade.\nDESKTOP_MINIMUM_VERSION = \"5.4.3\"\nDESKTOP_WARNING_VERSION = \"5.9.3\"\n\n# Bump the API_FEATURE_LEVEL whenever an API change is made\n# that clients might want to condition on. If we forget at\n# the time we make the change, then bump it later as soon\n# as we notice; clients using API_FEATURE_LEVEL will just not\n# use the new feature/API until the bump.\n#\n# Changes should be accompanied by documentation explaining what the\n# new level means in api_docs/changelog.md, as well as \"**Changes**\"\n# entries in the endpoint's documentation in `zulip.yaml`.\nAPI_FEATURE_LEVEL = 255\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = (269, 3) # last bumped 2024-04-29 for adding pyasyncore\n", "path": "version.py"}]} | 2,019 | 678 |
gh_patches_debug_26011 | rasdani/github-patches | git_diff | ray-project__ray-3711 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tune[ partial function cannot be registered as trainable
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 18.04
- **Ray installed from (source or binary)**: binary
- **Ray version**: 0.6.1
- **Python version**: 3.7
- **Exact command to reproduce**:
The following code fails:
```
def dummy_fn(c, a, b):
print("Called")
from functools import partial
from ray.tune import register_trainable
register_trainable("test", partial(dummy_fn, c=None))
```
while the following code works:
```
def dummy_fn(a, b):
print("Called")
from functools import partial
from ray.tune import register_trainable
register_trainable("test", dummy_fn)
```
### Describe the problem
The first code sample does not work, despite the function (after the `partial`) fullfills all requirements to be properly registered.
### Source code / logs
Traceback:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/temp/schock/conda/envs/delira_new/lib/python3.7/site-packages/ray/tune/registry.py", line 35, in register_trainable
if not issubclass(trainable, Trainable):
TypeError: issubclass() arg 1 must be a class
```
</issue>
<code>
[start of python/ray/tune/registry.py]
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 from types import FunctionType
6
7 import ray
8 import ray.cloudpickle as pickle
9 from ray.experimental.internal_kv import _internal_kv_initialized, \
10 _internal_kv_get, _internal_kv_put
11
12 TRAINABLE_CLASS = "trainable_class"
13 ENV_CREATOR = "env_creator"
14 RLLIB_MODEL = "rllib_model"
15 RLLIB_PREPROCESSOR = "rllib_preprocessor"
16 KNOWN_CATEGORIES = [
17 TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR
18 ]
19
20
21 def register_trainable(name, trainable):
22 """Register a trainable function or class.
23
24 Args:
25 name (str): Name to register.
26 trainable (obj): Function or tune.Trainable class. Functions must
27 take (config, status_reporter) as arguments and will be
28 automatically converted into a class during registration.
29 """
30
31 from ray.tune.trainable import Trainable, wrap_function
32
33 if isinstance(trainable, FunctionType):
34 trainable = wrap_function(trainable)
35 if not issubclass(trainable, Trainable):
36 raise TypeError("Second argument must be convertable to Trainable",
37 trainable)
38 _global_registry.register(TRAINABLE_CLASS, name, trainable)
39
40
41 def register_env(name, env_creator):
42 """Register a custom environment for use with RLlib.
43
44 Args:
45 name (str): Name to register.
46 env_creator (obj): Function that creates an env.
47 """
48
49 if not isinstance(env_creator, FunctionType):
50 raise TypeError("Second argument must be a function.", env_creator)
51 _global_registry.register(ENV_CREATOR, name, env_creator)
52
53
54 def _make_key(category, key):
55 """Generate a binary key for the given category and key.
56
57 Args:
58 category (str): The category of the item
59 key (str): The unique identifier for the item
60
61 Returns:
62 The key to use for storing a the value.
63 """
64 return (b"TuneRegistry:" + category.encode("ascii") + b"/" +
65 key.encode("ascii"))
66
67
68 class _Registry(object):
69 def __init__(self):
70 self._to_flush = {}
71
72 def register(self, category, key, value):
73 if category not in KNOWN_CATEGORIES:
74 from ray.tune import TuneError
75 raise TuneError("Unknown category {} not among {}".format(
76 category, KNOWN_CATEGORIES))
77 self._to_flush[(category, key)] = pickle.dumps(value)
78 if _internal_kv_initialized():
79 self.flush_values()
80
81 def contains(self, category, key):
82 if _internal_kv_initialized():
83 value = _internal_kv_get(_make_key(category, key))
84 return value is not None
85 else:
86 return (category, key) in self._to_flush
87
88 def get(self, category, key):
89 if _internal_kv_initialized():
90 value = _internal_kv_get(_make_key(category, key))
91 if value is None:
92 raise ValueError(
93 "Registry value for {}/{} doesn't exist.".format(
94 category, key))
95 return pickle.loads(value)
96 else:
97 return pickle.loads(self._to_flush[(category, key)])
98
99 def flush_values(self):
100 for (category, key), value in self._to_flush.items():
101 _internal_kv_put(_make_key(category, key), value, overwrite=True)
102 self._to_flush.clear()
103
104
105 _global_registry = _Registry()
106 ray.worker._post_init_hooks.append(_global_registry.flush_values)
107
[end of python/ray/tune/registry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/ray/tune/registry.py b/python/ray/tune/registry.py
--- a/python/ray/tune/registry.py
+++ b/python/ray/tune/registry.py
@@ -2,6 +2,7 @@
from __future__ import division
from __future__ import print_function
+import logging
from types import FunctionType
import ray
@@ -17,6 +18,8 @@
TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR
]
+logger = logging.getLogger(__name__)
+
def register_trainable(name, trainable):
"""Register a trainable function or class.
@@ -30,8 +33,16 @@
from ray.tune.trainable import Trainable, wrap_function
- if isinstance(trainable, FunctionType):
+ if isinstance(trainable, type):
+ logger.debug("Detected class for trainable.")
+ elif isinstance(trainable, FunctionType):
+ logger.debug("Detected function for trainable.")
+ trainable = wrap_function(trainable)
+ elif callable(trainable):
+ logger.warning(
+ "Detected unknown callable for trainable. Converting to class.")
trainable = wrap_function(trainable)
+
if not issubclass(trainable, Trainable):
raise TypeError("Second argument must be convertable to Trainable",
trainable)
| {"golden_diff": "diff --git a/python/ray/tune/registry.py b/python/ray/tune/registry.py\n--- a/python/ray/tune/registry.py\n+++ b/python/ray/tune/registry.py\n@@ -2,6 +2,7 @@\n from __future__ import division\n from __future__ import print_function\n \n+import logging\n from types import FunctionType\n \n import ray\n@@ -17,6 +18,8 @@\n TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR\n ]\n \n+logger = logging.getLogger(__name__)\n+\n \n def register_trainable(name, trainable):\n \"\"\"Register a trainable function or class.\n@@ -30,8 +33,16 @@\n \n from ray.tune.trainable import Trainable, wrap_function\n \n- if isinstance(trainable, FunctionType):\n+ if isinstance(trainable, type):\n+ logger.debug(\"Detected class for trainable.\")\n+ elif isinstance(trainable, FunctionType):\n+ logger.debug(\"Detected function for trainable.\")\n+ trainable = wrap_function(trainable)\n+ elif callable(trainable):\n+ logger.warning(\n+ \"Detected unknown callable for trainable. Converting to class.\")\n trainable = wrap_function(trainable)\n+\n if not issubclass(trainable, Trainable):\n raise TypeError(\"Second argument must be convertable to Trainable\",\n trainable)\n", "issue": "[tune[ partial function cannot be registered as trainable\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 18.04\r\n- **Ray installed from (source or binary)**: binary\r\n- **Ray version**: 0.6.1\r\n- **Python version**: 3.7\r\n- **Exact command to reproduce**:\r\n\r\nThe following code fails:\r\n```\r\ndef dummy_fn(c, a, b):\r\n print(\"Called\")\r\n\r\nfrom functools import partial\r\nfrom ray.tune import register_trainable\r\nregister_trainable(\"test\", partial(dummy_fn, c=None))\r\n\r\n```\r\n\r\nwhile the following code works:\r\n```\r\ndef dummy_fn(a, b):\r\n print(\"Called\")\r\n\r\nfrom functools import partial\r\nfrom ray.tune import register_trainable\r\nregister_trainable(\"test\", dummy_fn)\r\n\r\n```\r\n### Describe the problem\r\nThe first code sample does not work, despite the function (after the `partial`) fullfills all requirements to be properly registered.\r\n\r\n### Source code / logs\r\nTraceback:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/temp/schock/conda/envs/delira_new/lib/python3.7/site-packages/ray/tune/registry.py\", line 35, in register_trainable\r\n if not issubclass(trainable, Trainable):\r\nTypeError: issubclass() arg 1 must be a class\r\n```\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom types import FunctionType\n\nimport ray\nimport ray.cloudpickle as pickle\nfrom ray.experimental.internal_kv import _internal_kv_initialized, \\\n _internal_kv_get, _internal_kv_put\n\nTRAINABLE_CLASS = \"trainable_class\"\nENV_CREATOR = \"env_creator\"\nRLLIB_MODEL = \"rllib_model\"\nRLLIB_PREPROCESSOR = \"rllib_preprocessor\"\nKNOWN_CATEGORIES = [\n TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR\n]\n\n\ndef register_trainable(name, trainable):\n \"\"\"Register a trainable function or class.\n\n Args:\n name (str): Name to register.\n trainable (obj): Function or tune.Trainable class. Functions must\n take (config, status_reporter) as arguments and will be\n automatically converted into a class during registration.\n \"\"\"\n\n from ray.tune.trainable import Trainable, wrap_function\n\n if isinstance(trainable, FunctionType):\n trainable = wrap_function(trainable)\n if not issubclass(trainable, Trainable):\n raise TypeError(\"Second argument must be convertable to Trainable\",\n trainable)\n _global_registry.register(TRAINABLE_CLASS, name, trainable)\n\n\ndef register_env(name, env_creator):\n \"\"\"Register a custom environment for use with RLlib.\n\n Args:\n name (str): Name to register.\n env_creator (obj): Function that creates an env.\n \"\"\"\n\n if not isinstance(env_creator, FunctionType):\n raise TypeError(\"Second argument must be a function.\", env_creator)\n _global_registry.register(ENV_CREATOR, name, env_creator)\n\n\ndef _make_key(category, key):\n \"\"\"Generate a binary key for the given category and key.\n\n Args:\n category (str): The category of the item\n key (str): The unique identifier for the item\n\n Returns:\n The key to use for storing a the value.\n \"\"\"\n return (b\"TuneRegistry:\" + category.encode(\"ascii\") + b\"/\" +\n key.encode(\"ascii\"))\n\n\nclass _Registry(object):\n def __init__(self):\n self._to_flush = {}\n\n def register(self, category, key, value):\n if category not in KNOWN_CATEGORIES:\n from ray.tune import TuneError\n raise TuneError(\"Unknown category {} not among {}\".format(\n category, KNOWN_CATEGORIES))\n self._to_flush[(category, key)] = pickle.dumps(value)\n if _internal_kv_initialized():\n self.flush_values()\n\n def contains(self, category, key):\n if _internal_kv_initialized():\n value = _internal_kv_get(_make_key(category, key))\n return value is not None\n else:\n return (category, key) in self._to_flush\n\n def get(self, category, key):\n if _internal_kv_initialized():\n value = _internal_kv_get(_make_key(category, key))\n if value is None:\n raise ValueError(\n \"Registry value for {}/{} doesn't exist.\".format(\n category, key))\n return pickle.loads(value)\n else:\n return pickle.loads(self._to_flush[(category, key)])\n\n def flush_values(self):\n for (category, key), value in self._to_flush.items():\n _internal_kv_put(_make_key(category, key), value, overwrite=True)\n self._to_flush.clear()\n\n\n_global_registry = _Registry()\nray.worker._post_init_hooks.append(_global_registry.flush_values)\n", "path": "python/ray/tune/registry.py"}]} | 1,834 | 297 |
gh_patches_debug_20771 | rasdani/github-patches | git_diff | cupy__cupy-7068 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cupy.apply_along_axis failed with cupy.nonzero
### Description
cp.apply_along_axis(cp.nonzero, 1, cp.array([[1,2],[2,3]])) - failed with error
*** AttributeError: 'tuple' object has no attribute 'shape'
np.apply_along_axis(np.nonzero, 1, np.array([[1,2],[2,3]])) - is OK
UPDATE. Problem in _shape_base.py.
line 53:
buff = cupy.empty(inarr_view.shape[:-1] + res.shape, res.dtype)
res - is tuple (as a result of cp.nonzero(1d array) of single cupy-array, so line 44 ( if cupy.isscalar(res):) doesnt convert it from tuple to cupy-array
as a temporal solution is possible to use "buffer-like" function
def cupy_nonzero (a):
return cp.nonzero(a)[0]
### To Reproduce
```py
cp.apply_along_axis(cp.nonzero, 1, cp.array([[1,2],[2,3]]))
```
### Installation
_No response_
### Environment
```
# Paste the output here
```
### Additional Information
_No response_
</issue>
<code>
[start of cupy/lib/_shape_base.py]
1 from numpy.lib import index_tricks
2
3 import cupy
4 from cupy._core import internal
5
6
7 def apply_along_axis(func1d, axis, arr, *args, **kwargs):
8 """Apply a function to 1-D slices along the given axis.
9
10 Args:
11 func1d (function (M,) -> (Nj...)): This function should accept 1-D
12 arrays. It is applied to 1-D slices of ``arr`` along the specified
13 axis. It must return a 1-D ``cupy.ndarray``.
14 axis (integer): Axis along which ``arr`` is sliced.
15 arr (cupy.ndarray (Ni..., M, Nk...)): Input array.
16 args: Additional arguments for ``func1d``.
17 kwargs: Additional keyword arguments for ``func1d``.
18
19 Returns:
20 cupy.ndarray: The output array. The shape of ``out`` is identical to
21 the shape of ``arr``, except along the ``axis`` dimension. This
22 axis is removed, and replaced with new dimensions equal to the
23 shape of the return value of ``func1d``. So if ``func1d`` returns a
24 scalar ``out`` will have one fewer dimensions than ``arr``.
25
26 .. seealso:: :func:`numpy.apply_along_axis`
27 """
28 ndim = arr.ndim
29 axis = internal._normalize_axis_index(axis, ndim)
30 inarr_view = cupy.moveaxis(arr, axis, -1)
31
32 # compute indices for the iteration axes, and append a trailing ellipsis to
33 # prevent 0d arrays decaying to scalars
34 inds = index_tricks.ndindex(inarr_view.shape[:-1])
35 inds = (ind + (Ellipsis,) for ind in inds)
36
37 # invoke the function on the first item
38 try:
39 ind0 = next(inds)
40 except StopIteration:
41 raise ValueError(
42 'Cannot apply_along_axis when any iteration dimensions are 0'
43 )
44 res = func1d(inarr_view[ind0], *args, **kwargs)
45 if cupy.isscalar(res):
46 # scalar outputs need to be transfered to a device ndarray
47 res = cupy.asarray(res)
48
49 # build a buffer for storing evaluations of func1d.
50 # remove the requested axis, and add the new ones on the end.
51 # laid out so that each write is contiguous.
52 # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
53 buff = cupy.empty(inarr_view.shape[:-1] + res.shape, res.dtype)
54
55 # save the first result, then compute and save all remaining results
56 buff[ind0] = res
57 for ind in inds:
58 buff[ind] = func1d(inarr_view[ind], *args, **kwargs)
59
60 # restore the inserted axes back to where they belong
61 for i in range(res.ndim):
62 buff = cupy.moveaxis(buff, -1, axis)
63
64 return buff
65
[end of cupy/lib/_shape_base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/lib/_shape_base.py b/cupy/lib/_shape_base.py
--- a/cupy/lib/_shape_base.py
+++ b/cupy/lib/_shape_base.py
@@ -42,9 +42,7 @@
'Cannot apply_along_axis when any iteration dimensions are 0'
)
res = func1d(inarr_view[ind0], *args, **kwargs)
- if cupy.isscalar(res):
- # scalar outputs need to be transfered to a device ndarray
- res = cupy.asarray(res)
+ res = cupy.asarray(res)
# build a buffer for storing evaluations of func1d.
# remove the requested axis, and add the new ones on the end.
@@ -55,7 +53,8 @@
# save the first result, then compute and save all remaining results
buff[ind0] = res
for ind in inds:
- buff[ind] = func1d(inarr_view[ind], *args, **kwargs)
+ out = func1d(inarr_view[ind], *args, **kwargs)
+ buff[ind] = cupy.asarray(out)
# restore the inserted axes back to where they belong
for i in range(res.ndim):
| {"golden_diff": "diff --git a/cupy/lib/_shape_base.py b/cupy/lib/_shape_base.py\n--- a/cupy/lib/_shape_base.py\n+++ b/cupy/lib/_shape_base.py\n@@ -42,9 +42,7 @@\n 'Cannot apply_along_axis when any iteration dimensions are 0'\n )\n res = func1d(inarr_view[ind0], *args, **kwargs)\n- if cupy.isscalar(res):\n- # scalar outputs need to be transfered to a device ndarray\n- res = cupy.asarray(res)\n+ res = cupy.asarray(res)\n \n # build a buffer for storing evaluations of func1d.\n # remove the requested axis, and add the new ones on the end.\n@@ -55,7 +53,8 @@\n # save the first result, then compute and save all remaining results\n buff[ind0] = res\n for ind in inds:\n- buff[ind] = func1d(inarr_view[ind], *args, **kwargs)\n+ out = func1d(inarr_view[ind], *args, **kwargs)\n+ buff[ind] = cupy.asarray(out)\n \n # restore the inserted axes back to where they belong\n for i in range(res.ndim):\n", "issue": "cupy.apply_along_axis failed with cupy.nonzero\n### Description\r\n\r\ncp.apply_along_axis(cp.nonzero, 1, cp.array([[1,2],[2,3]])) - failed with error\r\n\r\n*** AttributeError: 'tuple' object has no attribute 'shape'\r\n\r\nnp.apply_along_axis(np.nonzero, 1, np.array([[1,2],[2,3]])) - is OK\r\n\r\nUPDATE. Problem in _shape_base.py. \r\nline 53:\r\nbuff = cupy.empty(inarr_view.shape[:-1] + res.shape, res.dtype)\r\n\r\nres - is tuple (as a result of cp.nonzero(1d array) of single cupy-array, so line 44 ( if cupy.isscalar(res):) doesnt convert it from tuple to cupy-array\r\n\r\nas a temporal solution is possible to use \"buffer-like\" function\r\ndef cupy_nonzero (a):\r\n return cp.nonzero(a)[0]\r\n\r\n### To Reproduce\r\n\r\n```py\r\ncp.apply_along_axis(cp.nonzero, 1, cp.array([[1,2],[2,3]]))\r\n```\r\n\r\n\r\n### Installation\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n```\r\n# Paste the output here\r\n```\r\n\r\n\r\n### Additional Information\r\n\r\n_No response_\n", "before_files": [{"content": "from numpy.lib import index_tricks\n\nimport cupy\nfrom cupy._core import internal\n\n\ndef apply_along_axis(func1d, axis, arr, *args, **kwargs):\n \"\"\"Apply a function to 1-D slices along the given axis.\n\n Args:\n func1d (function (M,) -> (Nj...)): This function should accept 1-D\n arrays. It is applied to 1-D slices of ``arr`` along the specified\n axis. It must return a 1-D ``cupy.ndarray``.\n axis (integer): Axis along which ``arr`` is sliced.\n arr (cupy.ndarray (Ni..., M, Nk...)): Input array.\n args: Additional arguments for ``func1d``.\n kwargs: Additional keyword arguments for ``func1d``.\n\n Returns:\n cupy.ndarray: The output array. The shape of ``out`` is identical to\n the shape of ``arr``, except along the ``axis`` dimension. This\n axis is removed, and replaced with new dimensions equal to the\n shape of the return value of ``func1d``. So if ``func1d`` returns a\n scalar ``out`` will have one fewer dimensions than ``arr``.\n\n .. seealso:: :func:`numpy.apply_along_axis`\n \"\"\"\n ndim = arr.ndim\n axis = internal._normalize_axis_index(axis, ndim)\n inarr_view = cupy.moveaxis(arr, axis, -1)\n\n # compute indices for the iteration axes, and append a trailing ellipsis to\n # prevent 0d arrays decaying to scalars\n inds = index_tricks.ndindex(inarr_view.shape[:-1])\n inds = (ind + (Ellipsis,) for ind in inds)\n\n # invoke the function on the first item\n try:\n ind0 = next(inds)\n except StopIteration:\n raise ValueError(\n 'Cannot apply_along_axis when any iteration dimensions are 0'\n )\n res = func1d(inarr_view[ind0], *args, **kwargs)\n if cupy.isscalar(res):\n # scalar outputs need to be transfered to a device ndarray\n res = cupy.asarray(res)\n\n # build a buffer for storing evaluations of func1d.\n # remove the requested axis, and add the new ones on the end.\n # laid out so that each write is contiguous.\n # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])\n buff = cupy.empty(inarr_view.shape[:-1] + res.shape, res.dtype)\n\n # save the first result, then compute and save all remaining results\n buff[ind0] = res\n for ind in inds:\n buff[ind] = func1d(inarr_view[ind], *args, **kwargs)\n\n # restore the inserted axes back to where they belong\n for i in range(res.ndim):\n buff = cupy.moveaxis(buff, -1, axis)\n\n return buff\n", "path": "cupy/lib/_shape_base.py"}]} | 1,571 | 272 |
gh_patches_debug_27156 | rasdani/github-patches | git_diff | falconry__falcon-364 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Not all modules are cythonized
Missing some modules, such as hooks and those in the util package.
</issue>
<code>
[start of setup.py]
1 import imp
2 import io
3 import sys
4 from os import path
5 from setuptools import setup, find_packages, Extension
6
7 VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))
8 VERSION = VERSION.__version__
9
10 # NOTE(kgriffs): python-mimeparse is newer than mimeparse, supports Py3
11 # TODO(kgriffs): Fork and optimize/modernize python-mimeparse
12 REQUIRES = ['six', 'python-mimeparse']
13
14 PYPY = True
15 CYTHON = False
16 try:
17 sys.pypy_version_info
18 except AttributeError:
19 PYPY = False
20
21 if not PYPY:
22 try:
23 from Cython.Distutils import build_ext
24 CYTHON = True
25 except ImportError:
26 print('\nWARNING: Cython not installed. '
27 'Falcon will still work fine, but may run '
28 'a bit slower.\n')
29 CYTHON = False
30
31 if CYTHON:
32 ext_names = (
33 'api',
34 'api_helpers',
35 'errors',
36 'http_error',
37 'request',
38 'request_helpers',
39 'responders',
40 'response',
41 'response_helpers',
42 )
43
44 cmdclass = {'build_ext': build_ext}
45 ext_modules = [
46 Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])
47 for ext in ext_names]
48 else:
49 cmdclass = {}
50 ext_modules = []
51
52 setup(
53 name='falcon',
54 version=VERSION,
55 description='An unladen web framework for building APIs and app backends.',
56 long_description=io.open('README.rst', 'r', encoding='utf-8').read(),
57 classifiers=[
58 'Development Status :: 5 - Production/Stable',
59 'Environment :: Web Environment',
60 'Natural Language :: English',
61 'Intended Audience :: Developers',
62 'Intended Audience :: System Administrators',
63 'License :: OSI Approved :: Apache Software License',
64 'Operating System :: MacOS :: MacOS X',
65 'Operating System :: Microsoft :: Windows',
66 'Operating System :: POSIX',
67 'Topic :: Internet :: WWW/HTTP :: WSGI',
68 'Topic :: Software Development :: Libraries :: Application Frameworks',
69 'Programming Language :: Python',
70 'Programming Language :: Python :: Implementation :: CPython',
71 'Programming Language :: Python :: Implementation :: PyPy',
72 'Programming Language :: Python :: 2.6',
73 'Programming Language :: Python :: 2.7',
74 'Programming Language :: Python :: 3.3',
75 'Programming Language :: Python :: 3.4',
76 ],
77 keywords='wsgi web api framework rest http cloud',
78 author='Kurt Griffiths',
79 author_email='[email protected]',
80 url='http://falconframework.org',
81 license='Apache 2.0',
82 packages=find_packages(exclude=['tests']),
83 include_package_data=True,
84 zip_safe=False,
85 install_requires=REQUIRES,
86 setup_requires=[],
87 cmdclass=cmdclass,
88 ext_modules=ext_modules,
89 test_suite='nose.collector',
90 entry_points={
91 'console_scripts': [
92 'falcon-bench = falcon.cmd.bench:main'
93 ]
94 }
95 )
96
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,8 +1,12 @@
+import glob
import imp
import io
-import sys
+import os
from os import path
from setuptools import setup, find_packages, Extension
+import sys
+
+MYDIR = path.abspath(os.path.dirname(__file__))
VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))
VERSION = VERSION.__version__
@@ -29,22 +33,29 @@
CYTHON = False
if CYTHON:
- ext_names = (
- 'api',
- 'api_helpers',
- 'errors',
- 'http_error',
- 'request',
- 'request_helpers',
- 'responders',
- 'response',
- 'response_helpers',
- )
+ def list_modules(dirname):
+ filenames = glob.glob(path.join(dirname, '*.py'))
+
+ module_names = []
+ for name in filenames:
+ module, ext = path.splitext(path.basename(name))
+ if module != '__init__':
+ module_names.append(module)
+
+ return module_names
- cmdclass = {'build_ext': build_ext}
ext_modules = [
Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])
- for ext in ext_names]
+ for ext in list_modules(path.join(MYDIR, 'falcon'))]
+
+ ext_modules += [
+ Extension('falcon.util.' + ext,
+ [path.join('falcon', 'util', ext + '.py')])
+
+ for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]
+
+ cmdclass = {'build_ext': build_ext}
+
else:
cmdclass = {}
ext_modules = []
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,8 +1,12 @@\n+import glob\n import imp\n import io\n-import sys\n+import os\n from os import path\n from setuptools import setup, find_packages, Extension\n+import sys\n+\n+MYDIR = path.abspath(os.path.dirname(__file__))\n \n VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\n VERSION = VERSION.__version__\n@@ -29,22 +33,29 @@\n CYTHON = False\n \n if CYTHON:\n- ext_names = (\n- 'api',\n- 'api_helpers',\n- 'errors',\n- 'http_error',\n- 'request',\n- 'request_helpers',\n- 'responders',\n- 'response',\n- 'response_helpers',\n- )\n+ def list_modules(dirname):\n+ filenames = glob.glob(path.join(dirname, '*.py'))\n+\n+ module_names = []\n+ for name in filenames:\n+ module, ext = path.splitext(path.basename(name))\n+ if module != '__init__':\n+ module_names.append(module)\n+\n+ return module_names\n \n- cmdclass = {'build_ext': build_ext}\n ext_modules = [\n Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])\n- for ext in ext_names]\n+ for ext in list_modules(path.join(MYDIR, 'falcon'))]\n+\n+ ext_modules += [\n+ Extension('falcon.util.' + ext,\n+ [path.join('falcon', 'util', ext + '.py')])\n+\n+ for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]\n+\n+ cmdclass = {'build_ext': build_ext}\n+\n else:\n cmdclass = {}\n ext_modules = []\n", "issue": "Not all modules are cythonized\nMissing some modules, such as hooks and those in the util package.\n\n", "before_files": [{"content": "import imp\nimport io\nimport sys\nfrom os import path\nfrom setuptools import setup, find_packages, Extension\n\nVERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\nVERSION = VERSION.__version__\n\n# NOTE(kgriffs): python-mimeparse is newer than mimeparse, supports Py3\n# TODO(kgriffs): Fork and optimize/modernize python-mimeparse\nREQUIRES = ['six', 'python-mimeparse']\n\nPYPY = True\nCYTHON = False\ntry:\n sys.pypy_version_info\nexcept AttributeError:\n PYPY = False\n\nif not PYPY:\n try:\n from Cython.Distutils import build_ext\n CYTHON = True\n except ImportError:\n print('\\nWARNING: Cython not installed. '\n 'Falcon will still work fine, but may run '\n 'a bit slower.\\n')\n CYTHON = False\n\nif CYTHON:\n ext_names = (\n 'api',\n 'api_helpers',\n 'errors',\n 'http_error',\n 'request',\n 'request_helpers',\n 'responders',\n 'response',\n 'response_helpers',\n )\n\n cmdclass = {'build_ext': build_ext}\n ext_modules = [\n Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])\n for ext in ext_names]\nelse:\n cmdclass = {}\n ext_modules = []\n\nsetup(\n name='falcon',\n version=VERSION,\n description='An unladen web framework for building APIs and app backends.',\n long_description=io.open('README.rst', 'r', encoding='utf-8').read(),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ],\n keywords='wsgi web api framework rest http cloud',\n author='Kurt Griffiths',\n author_email='[email protected]',\n url='http://falconframework.org',\n license='Apache 2.0',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIRES,\n setup_requires=[],\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n test_suite='nose.collector',\n entry_points={\n 'console_scripts': [\n 'falcon-bench = falcon.cmd.bench:main'\n ]\n }\n)\n", "path": "setup.py"}]} | 1,425 | 405 |
gh_patches_debug_286 | rasdani/github-patches | git_diff | Mailu__Mailu-2049 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fetchmail: /var/lib/fetchmail needs persistence
According [fetchmail documentation](https://www.fetchmail.info/fetchmail-man.html#12), an `.idfile` is used to keep track of previously downloaded messages. Shouldn't that file persistent over container restarts?
I'm not a Fetchmail user, perhaps somebody can shine a light on how this currently works?
cc: @Nebukadneza, @hoellen, @kaiyou
</issue>
<code>
[start of optional/fetchmail/fetchmail.py]
1 #!/usr/bin/python3
2
3 import time
4 import os
5 import tempfile
6 import shlex
7 import subprocess
8 import re
9 import requests
10 import sys
11 import traceback
12
13
14 FETCHMAIL = """
15 fetchmail -N \
16 --sslcertck --sslcertpath /etc/ssl/certs \
17 -f {}
18 """
19
20
21 RC_LINE = """
22 poll "{host}" proto {protocol} port {port}
23 user "{username}" password "{password}"
24 is "{user_email}"
25 smtphost "{smtphost}"
26 {options}
27 """
28
29
30 def extract_host_port(host_and_port, default_port):
31 host, _, port = re.match('^(.*?)(:([0-9]*))?$', host_and_port).groups()
32 return host, int(port) if port else default_port
33
34
35 def escape_rc_string(arg):
36 return "".join("\\x%2x" % ord(char) for char in arg)
37
38
39 def fetchmail(fetchmailrc):
40 with tempfile.NamedTemporaryFile() as handler:
41 handler.write(fetchmailrc.encode("utf8"))
42 handler.flush()
43 command = FETCHMAIL.format(shlex.quote(handler.name))
44 output = subprocess.check_output(command, shell=True)
45 return output
46
47
48 def run(debug):
49 try:
50 fetches = requests.get("http://" + os.environ.get("HOST_ADMIN", "admin") + "/internal/fetch").json()
51 smtphost, smtpport = extract_host_port(os.environ.get("HOST_SMTP", "smtp"), None)
52 if smtpport is None:
53 smtphostport = smtphost
54 else:
55 smtphostport = "%s/%d" % (smtphost, smtpport)
56 for fetch in fetches:
57 fetchmailrc = ""
58 options = "options antispam 501, 504, 550, 553, 554"
59 options += " ssl" if fetch["tls"] else ""
60 options += " keep" if fetch["keep"] else " fetchall"
61 fetchmailrc += RC_LINE.format(
62 user_email=escape_rc_string(fetch["user_email"]),
63 protocol=fetch["protocol"],
64 host=escape_rc_string(fetch["host"]),
65 port=fetch["port"],
66 smtphost=smtphostport,
67 username=escape_rc_string(fetch["username"]),
68 password=escape_rc_string(fetch["password"]),
69 options=options
70 )
71 if debug:
72 print(fetchmailrc)
73 try:
74 print(fetchmail(fetchmailrc))
75 error_message = ""
76 except subprocess.CalledProcessError as error:
77 error_message = error.output.decode("utf8")
78 # No mail is not an error
79 if not error_message.startswith("fetchmail: No mail"):
80 print(error_message)
81 user_info = "for %s at %s" % (fetch["user_email"], fetch["host"])
82 # Number of messages seen is not a error as well
83 if ("messages" in error_message and
84 "(seen " in error_message and
85 user_info in error_message):
86 print(error_message)
87 finally:
88 requests.post("http://" + os.environ.get("HOST_ADMIN", "admin") + "/internal/fetch/{}".format(fetch["id"]),
89 json=error_message.split("\n")[0]
90 )
91 except Exception:
92 traceback.print_exc()
93
94
95 if __name__ == "__main__":
96 while True:
97 time.sleep(int(os.environ.get("FETCHMAIL_DELAY", 60)))
98 run(os.environ.get("DEBUG", None) == "True")
99 sys.stdout.flush()
100
[end of optional/fetchmail/fetchmail.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py
--- a/optional/fetchmail/fetchmail.py
+++ b/optional/fetchmail/fetchmail.py
@@ -13,6 +13,7 @@
FETCHMAIL = """
fetchmail -N \
+ --idfile /data/fetchids --uidl \
--sslcertck --sslcertpath /etc/ssl/certs \
-f {}
"""
| {"golden_diff": "diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py\n--- a/optional/fetchmail/fetchmail.py\n+++ b/optional/fetchmail/fetchmail.py\n@@ -13,6 +13,7 @@\n \n FETCHMAIL = \"\"\"\n fetchmail -N \\\n+ --idfile /data/fetchids --uidl \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n \"\"\"\n", "issue": "Fetchmail: /var/lib/fetchmail needs persistence\nAccording [fetchmail documentation](https://www.fetchmail.info/fetchmail-man.html#12), an `.idfile` is used to keep track of previously downloaded messages. Shouldn't that file persistent over container restarts?\r\n\r\nI'm not a Fetchmail user, perhaps somebody can shine a light on how this currently works?\r\n\r\ncc: @Nebukadneza, @hoellen, @kaiyou \n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport time\nimport os\nimport tempfile\nimport shlex\nimport subprocess\nimport re\nimport requests\nimport sys\nimport traceback\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n\"\"\"\n\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*?)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\n\ndef escape_rc_string(arg):\n return \"\".join(\"\\\\x%2x\" % ord(char) for char in arg)\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n try:\n fetches = requests.get(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n else:\n smtphostport = \"%s/%d\" % (smtphost, smtpport)\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=smtphostport,\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n while True:\n time.sleep(int(os.environ.get(\"FETCHMAIL_DELAY\", 60)))\n run(os.environ.get(\"DEBUG\", None) == \"True\")\n sys.stdout.flush()\n", "path": "optional/fetchmail/fetchmail.py"}]} | 1,600 | 106 |
gh_patches_debug_18158 | rasdani/github-patches | git_diff | openai__gym-1966 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug in PixelObservationWrapper
In the pixel_observation.py, we have a bit of code that looks like this:
```
def _add_pixel_observation(self, observation):
if self._pixels_only:
observation = collections.OrderedDict()
elif self._observation_is_dict:
observation = type(observation)(observation)
else:
observation = collections.OrderedDict()
observation[STATE_KEY] = observation
```
If you note, the argument `observation` is being clobbered in the else case, so now the observation dictionary at the STATE_KEY refers to itself instead of the underlying env's observation.
I'm happy to fix this and submit a pull request but I wanted to raise the community's attention to this first.
</issue>
<code>
[start of gym/wrappers/pixel_observation.py]
1 """An observation wrapper that augments observations by pixel values."""
2
3 import collections
4 import copy
5
6 import numpy as np
7
8 from gym import spaces
9 from gym import ObservationWrapper
10
11 STATE_KEY = 'state'
12
13
14 class PixelObservationWrapper(ObservationWrapper):
15 """Augment observations by pixel values."""
16
17 def __init__(self,
18 env,
19 pixels_only=True,
20 render_kwargs=None,
21 pixel_keys=('pixels', )):
22 """Initializes a new pixel Wrapper.
23
24 Args:
25 env: The environment to wrap.
26 pixels_only: If `True` (default), the original observation returned
27 by the wrapped environment will be discarded, and a dictionary
28 observation will only include pixels. If `False`, the
29 observation dictionary will contain both the original
30 observations and the pixel observations.
31 render_kwargs: Optional `dict` containing keyword arguments passed
32 to the `self.render` method.
33 pixel_keys: Optional custom string specifying the pixel
34 observation's key in the `OrderedDict` of observations.
35 Defaults to 'pixels'.
36
37 Raises:
38 ValueError: If `env`'s observation spec is not compatible with the
39 wrapper. Supported formats are a single array, or a dict of
40 arrays.
41 ValueError: If `env`'s observation already contains any of the
42 specified `pixel_keys`.
43 """
44
45 super(PixelObservationWrapper, self).__init__(env)
46
47 if render_kwargs is None:
48 render_kwargs = {}
49
50 for key in pixel_keys:
51 render_kwargs.setdefault(key, {})
52
53 render_mode = render_kwargs[key].pop('mode', 'rgb_array')
54 assert render_mode == 'rgb_array', render_mode
55 render_kwargs[key]['mode'] = 'rgb_array'
56
57 wrapped_observation_space = env.observation_space
58
59 if isinstance(wrapped_observation_space, spaces.Box):
60 self._observation_is_dict = False
61 invalid_keys = set([STATE_KEY])
62 elif isinstance(wrapped_observation_space,
63 (spaces.Dict, collections.MutableMapping)):
64 self._observation_is_dict = True
65 invalid_keys = set(wrapped_observation_space.spaces.keys())
66 else:
67 raise ValueError("Unsupported observation space structure.")
68
69 if not pixels_only:
70 # Make sure that now keys in the `pixel_keys` overlap with
71 # `observation_keys`
72 overlapping_keys = set(pixel_keys) & set(invalid_keys)
73 if overlapping_keys:
74 raise ValueError("Duplicate or reserved pixel keys {!r}."
75 .format(overlapping_keys))
76
77 if pixels_only:
78 self.observation_space = spaces.Dict()
79 elif self._observation_is_dict:
80 self.observation_space = copy.deepcopy(wrapped_observation_space)
81 else:
82 self.observation_space = spaces.Dict()
83 self.observation_space.spaces[STATE_KEY] = wrapped_observation_space
84
85 # Extend observation space with pixels.
86
87 pixels_spaces = {}
88 for pixel_key in pixel_keys:
89 pixels = self.env.render(**render_kwargs[pixel_key])
90
91 if np.issubdtype(pixels.dtype, np.integer):
92 low, high = (0, 255)
93 elif np.issubdtype(pixels.dtype, np.float):
94 low, high = (-float('inf'), float('inf'))
95 else:
96 raise TypeError(pixels.dtype)
97
98 pixels_space = spaces.Box(
99 shape=pixels.shape, low=low, high=high, dtype=pixels.dtype)
100 pixels_spaces[pixel_key] = pixels_space
101
102 self.observation_space.spaces.update(pixels_spaces)
103
104 self._env = env
105 self._pixels_only = pixels_only
106 self._render_kwargs = render_kwargs
107 self._pixel_keys = pixel_keys
108
109 def observation(self, observation):
110 pixel_observation = self._add_pixel_observation(observation)
111 return pixel_observation
112
113 def _add_pixel_observation(self, observation):
114 if self._pixels_only:
115 observation = collections.OrderedDict()
116 elif self._observation_is_dict:
117 observation = type(observation)(observation)
118 else:
119 observation = collections.OrderedDict()
120 observation[STATE_KEY] = observation
121
122 pixel_observations = {
123 pixel_key: self.env.render(**self._render_kwargs[pixel_key])
124 for pixel_key in self._pixel_keys
125 }
126
127 observation.update(pixel_observations)
128
129 return observation
130
[end of gym/wrappers/pixel_observation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gym/wrappers/pixel_observation.py b/gym/wrappers/pixel_observation.py
--- a/gym/wrappers/pixel_observation.py
+++ b/gym/wrappers/pixel_observation.py
@@ -110,14 +110,14 @@
pixel_observation = self._add_pixel_observation(observation)
return pixel_observation
- def _add_pixel_observation(self, observation):
+ def _add_pixel_observation(self, wrapped_observation):
if self._pixels_only:
observation = collections.OrderedDict()
elif self._observation_is_dict:
- observation = type(observation)(observation)
+ observation = type(wrapped_observation)(wrapped_observation)
else:
observation = collections.OrderedDict()
- observation[STATE_KEY] = observation
+ observation[STATE_KEY] = wrapped_observation
pixel_observations = {
pixel_key: self.env.render(**self._render_kwargs[pixel_key])
| {"golden_diff": "diff --git a/gym/wrappers/pixel_observation.py b/gym/wrappers/pixel_observation.py\n--- a/gym/wrappers/pixel_observation.py\n+++ b/gym/wrappers/pixel_observation.py\n@@ -110,14 +110,14 @@\n pixel_observation = self._add_pixel_observation(observation)\n return pixel_observation\n \n- def _add_pixel_observation(self, observation):\n+ def _add_pixel_observation(self, wrapped_observation):\n if self._pixels_only:\n observation = collections.OrderedDict()\n elif self._observation_is_dict:\n- observation = type(observation)(observation)\n+ observation = type(wrapped_observation)(wrapped_observation)\n else:\n observation = collections.OrderedDict()\n- observation[STATE_KEY] = observation\n+ observation[STATE_KEY] = wrapped_observation\n \n pixel_observations = {\n pixel_key: self.env.render(**self._render_kwargs[pixel_key])\n", "issue": "Bug in PixelObservationWrapper\nIn the pixel_observation.py, we have a bit of code that looks like this: \r\n\r\n```\r\n def _add_pixel_observation(self, observation):\r\n if self._pixels_only:\r\n observation = collections.OrderedDict()\r\n elif self._observation_is_dict:\r\n observation = type(observation)(observation)\r\n else:\r\n observation = collections.OrderedDict()\r\n observation[STATE_KEY] = observation\r\n```\r\n\r\nIf you note, the argument `observation` is being clobbered in the else case, so now the observation dictionary at the STATE_KEY refers to itself instead of the underlying env's observation. \r\n\r\nI'm happy to fix this and submit a pull request but I wanted to raise the community's attention to this first. \n", "before_files": [{"content": "\"\"\"An observation wrapper that augments observations by pixel values.\"\"\"\n\nimport collections\nimport copy\n\nimport numpy as np\n\nfrom gym import spaces\nfrom gym import ObservationWrapper\n\nSTATE_KEY = 'state'\n\n\nclass PixelObservationWrapper(ObservationWrapper):\n \"\"\"Augment observations by pixel values.\"\"\"\n\n def __init__(self,\n env,\n pixels_only=True,\n render_kwargs=None,\n pixel_keys=('pixels', )):\n \"\"\"Initializes a new pixel Wrapper.\n\n Args:\n env: The environment to wrap.\n pixels_only: If `True` (default), the original observation returned\n by the wrapped environment will be discarded, and a dictionary\n observation will only include pixels. If `False`, the\n observation dictionary will contain both the original\n observations and the pixel observations.\n render_kwargs: Optional `dict` containing keyword arguments passed\n to the `self.render` method.\n pixel_keys: Optional custom string specifying the pixel\n observation's key in the `OrderedDict` of observations.\n Defaults to 'pixels'.\n\n Raises:\n ValueError: If `env`'s observation spec is not compatible with the\n wrapper. Supported formats are a single array, or a dict of\n arrays.\n ValueError: If `env`'s observation already contains any of the\n specified `pixel_keys`.\n \"\"\"\n\n super(PixelObservationWrapper, self).__init__(env)\n\n if render_kwargs is None:\n render_kwargs = {}\n\n for key in pixel_keys:\n render_kwargs.setdefault(key, {})\n\n render_mode = render_kwargs[key].pop('mode', 'rgb_array')\n assert render_mode == 'rgb_array', render_mode\n render_kwargs[key]['mode'] = 'rgb_array'\n\n wrapped_observation_space = env.observation_space\n\n if isinstance(wrapped_observation_space, spaces.Box):\n self._observation_is_dict = False\n invalid_keys = set([STATE_KEY])\n elif isinstance(wrapped_observation_space,\n (spaces.Dict, collections.MutableMapping)):\n self._observation_is_dict = True\n invalid_keys = set(wrapped_observation_space.spaces.keys())\n else:\n raise ValueError(\"Unsupported observation space structure.\")\n\n if not pixels_only:\n # Make sure that now keys in the `pixel_keys` overlap with\n # `observation_keys`\n overlapping_keys = set(pixel_keys) & set(invalid_keys)\n if overlapping_keys:\n raise ValueError(\"Duplicate or reserved pixel keys {!r}.\"\n .format(overlapping_keys))\n\n if pixels_only:\n self.observation_space = spaces.Dict()\n elif self._observation_is_dict:\n self.observation_space = copy.deepcopy(wrapped_observation_space)\n else:\n self.observation_space = spaces.Dict()\n self.observation_space.spaces[STATE_KEY] = wrapped_observation_space\n\n # Extend observation space with pixels.\n\n pixels_spaces = {}\n for pixel_key in pixel_keys:\n pixels = self.env.render(**render_kwargs[pixel_key])\n\n if np.issubdtype(pixels.dtype, np.integer):\n low, high = (0, 255)\n elif np.issubdtype(pixels.dtype, np.float):\n low, high = (-float('inf'), float('inf'))\n else:\n raise TypeError(pixels.dtype)\n\n pixels_space = spaces.Box(\n shape=pixels.shape, low=low, high=high, dtype=pixels.dtype)\n pixels_spaces[pixel_key] = pixels_space\n\n self.observation_space.spaces.update(pixels_spaces)\n\n self._env = env\n self._pixels_only = pixels_only\n self._render_kwargs = render_kwargs\n self._pixel_keys = pixel_keys\n\n def observation(self, observation):\n pixel_observation = self._add_pixel_observation(observation)\n return pixel_observation\n\n def _add_pixel_observation(self, observation):\n if self._pixels_only:\n observation = collections.OrderedDict()\n elif self._observation_is_dict:\n observation = type(observation)(observation)\n else:\n observation = collections.OrderedDict()\n observation[STATE_KEY] = observation\n\n pixel_observations = {\n pixel_key: self.env.render(**self._render_kwargs[pixel_key])\n for pixel_key in self._pixel_keys\n }\n\n observation.update(pixel_observations)\n\n return observation\n", "path": "gym/wrappers/pixel_observation.py"}]} | 1,915 | 217 |
gh_patches_debug_51330 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1281 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: io.imshow() and io.show() do not work as expected in winows
In my win7-x64 environemnt, io.imshow() and io.show() do not work as expected. I use io.imshow() to show mutiple images, and when I call io.show() to show all the images, only the last image shows. In linux, it works well and all the images will show when I call io.show()
</issue>
<code>
[start of skimage/io/_plugins/matplotlib_plugin.py]
1 import matplotlib.pyplot as plt
2
3
4 def imshow(*args, **kwargs):
5 kwargs.setdefault('interpolation', 'nearest')
6 kwargs.setdefault('cmap', 'gray')
7 plt.imshow(*args, **kwargs)
8
9 imread = plt.imread
10 show = plt.show
11
12
13 def _app_show():
14 show()
15
[end of skimage/io/_plugins/matplotlib_plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/io/_plugins/matplotlib_plugin.py b/skimage/io/_plugins/matplotlib_plugin.py
--- a/skimage/io/_plugins/matplotlib_plugin.py
+++ b/skimage/io/_plugins/matplotlib_plugin.py
@@ -2,6 +2,8 @@
def imshow(*args, **kwargs):
+ if plt.gca().has_data():
+ plt.figure()
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', 'gray')
plt.imshow(*args, **kwargs)
| {"golden_diff": "diff --git a/skimage/io/_plugins/matplotlib_plugin.py b/skimage/io/_plugins/matplotlib_plugin.py\n--- a/skimage/io/_plugins/matplotlib_plugin.py\n+++ b/skimage/io/_plugins/matplotlib_plugin.py\n@@ -2,6 +2,8 @@\n \n \n def imshow(*args, **kwargs):\n+ if plt.gca().has_data():\n+ plt.figure()\n kwargs.setdefault('interpolation', 'nearest')\n kwargs.setdefault('cmap', 'gray')\n plt.imshow(*args, **kwargs)\n", "issue": "Bug: io.imshow() and io.show() do not work as expected in winows\nIn my win7-x64 environemnt, io.imshow() and io.show() do not work as expected. I use io.imshow() to show mutiple images, and when I call io.show() to show all the images, only the last image shows. In linux, it works well and all the images will show when I call io.show()\n\n", "before_files": [{"content": "import matplotlib.pyplot as plt\n\n\ndef imshow(*args, **kwargs):\n kwargs.setdefault('interpolation', 'nearest')\n kwargs.setdefault('cmap', 'gray')\n plt.imshow(*args, **kwargs)\n\nimread = plt.imread\nshow = plt.show\n\n\ndef _app_show():\n show()\n", "path": "skimage/io/_plugins/matplotlib_plugin.py"}]} | 720 | 115 |
gh_patches_debug_64317 | rasdani/github-patches | git_diff | pex-tool__pex-1112 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.21
On the docket:
+ [x] "FileNotFoundError: [Errno 2] No such file or directory" in pex #1098
+ [x] Unclosed resource warning for `/dev/null` in PEX teardown. #1101
+ [x] Remove `--sources-directory` / `--resources-directory` distinction. #1100
+ [x] Invalid requirement, parse error at "'python_v' #940
+ [x] Pex skipping pandas activation #1017
+ [x] Changing vendored versions does not fully clean up previous version #1096
+ [x] Pex discards the current interpreter's PATH entry when it is a directory entry. #1109
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.20"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.20"
+__version__ = "2.1.21"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.20\"\n+__version__ = \"2.1.21\"\n", "issue": "Release 2.1.21\nOn the docket:\r\n+ [x] \"FileNotFoundError: [Errno 2] No such file or directory\" in pex #1098\r\n+ [x] Unclosed resource warning for `/dev/null` in PEX teardown. #1101\r\n+ [x] Remove `--sources-directory` / `--resources-directory` distinction. #1100\r\n+ [x] Invalid requirement, parse error at \"'python_v' #940\r\n+ [x] Pex skipping pandas activation #1017\r\n+ [x] Changing vendored versions does not fully clean up previous version #1096\r\n+ [x] Pex discards the current interpreter's PATH entry when it is a directory entry. #1109\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.20\"\n", "path": "pex/version.py"}]} | 756 | 97 |
gh_patches_debug_66169 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1071 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Show simplified domain statuses to the registrant
### Story
As a domain manager
I want to know the status of my domain in simple language
so that I know if any action is needed or if any functions are limited
### Acceptance Criteria
- [x] Domains table on the dashboard shows a user-friendly domain status
- [ ] Show the domain status on the "Domain Overview" page
- [ ] For Domain statuses Deleted and Hold, change manage link to View with eye icon [(Figma)](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428%3A11621&mode=design&t=eottghQ6NoX98F6q-1)
### Additional Context
**BACKGROUND**
In general, EPP / domain statuses are not easily understandable to most users; therefore, we need to ensure we're showing "user-friendly" versions of the domain status.
User-friendly statuses include:
- _Note:_ "Unknown" _Domain status shows as_ "DNS needed"
- DNS needed
- Ready
- On hold
- Deleted
Refer to[ status diagram Miro](https://miro.com/app/board/uXjVMuqbLOk=/?moveToWidget=3458764561795634398&cot=14)
**DOMAINS TABLE**
Currently, the approved Domains table displays the application status. But, because the application process is complete, we should update this to show the _domain_ status.
Reference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428-25637&mode=design&t=VBfj3tkkg2p3f8UT-0)
**DOMAIN OVERVIEW**
Currently, we do not show the domain status when viewing the "Manage Domains" pages. The "Manage Domains" pages can be accessed by clicking the "Manage" button next to an approved domain.
The first page is the "Domain Overview." Add stylized message to the top of that page to indicate the user-friendly domain status.
Reference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=4829-39693&mode=design&t=VBfj3tkkg2p3f8UT-0)
### Issue Links
Depends on: Domain Status presence for testing
</issue>
<code>
[start of src/registrar/views/index.py]
1 from django.db.models import F
2 from django.shortcuts import render
3
4 from registrar.models import DomainApplication
5
6
7 def index(request):
8 """This page is available to anyone without logging in."""
9 context = {}
10 if request.user.is_authenticated:
11 applications = DomainApplication.objects.filter(creator=request.user)
12 # Let's exclude the approved applications since our
13 # domain_applications context will be used to populate
14 # the active applications table
15 context["domain_applications"] = applications.exclude(status="approved")
16
17 domains = request.user.permissions.values(
18 "role",
19 pk=F("domain__id"),
20 name=F("domain__name"),
21 created_time=F("domain__created_at"),
22 application_status=F("domain__domain_application__status"),
23 )
24 context["domains"] = domains
25 return render(request, "home.html", context)
26
[end of src/registrar/views/index.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/registrar/views/index.py b/src/registrar/views/index.py
--- a/src/registrar/views/index.py
+++ b/src/registrar/views/index.py
@@ -19,7 +19,7 @@
pk=F("domain__id"),
name=F("domain__name"),
created_time=F("domain__created_at"),
- application_status=F("domain__domain_application__status"),
+ state=F("domain__state"),
)
context["domains"] = domains
return render(request, "home.html", context)
| {"golden_diff": "diff --git a/src/registrar/views/index.py b/src/registrar/views/index.py\n--- a/src/registrar/views/index.py\n+++ b/src/registrar/views/index.py\n@@ -19,7 +19,7 @@\n pk=F(\"domain__id\"),\n name=F(\"domain__name\"),\n created_time=F(\"domain__created_at\"),\n- application_status=F(\"domain__domain_application__status\"),\n+ state=F(\"domain__state\"),\n )\n context[\"domains\"] = domains\n return render(request, \"home.html\", context)\n", "issue": "Show simplified domain statuses to the registrant\n### Story\r\n\r\nAs a domain manager\r\nI want to know the status of my domain in simple language\r\nso that I know if any action is needed or if any functions are limited\r\n\r\n\r\n### Acceptance Criteria\r\n\r\n- [x] Domains table on the dashboard shows a user-friendly domain status\r\n- [ ] Show the domain status on the \"Domain Overview\" page\r\n- [ ] For Domain statuses Deleted and Hold, change manage link to View with eye icon [(Figma)](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428%3A11621&mode=design&t=eottghQ6NoX98F6q-1)\r\n\r\n\r\n### Additional Context\r\n**BACKGROUND**\r\nIn general, EPP / domain statuses are not easily understandable to most users; therefore, we need to ensure we're showing \"user-friendly\" versions of the domain status. \r\n\r\nUser-friendly statuses include: \r\n- _Note:_ \"Unknown\" _Domain status shows as_ \"DNS needed\"\r\n- DNS needed \r\n- Ready\r\n- On hold\r\n- Deleted\r\n\r\nRefer to[ status diagram Miro](https://miro.com/app/board/uXjVMuqbLOk=/?moveToWidget=3458764561795634398&cot=14)\r\n\r\n**DOMAINS TABLE**\r\nCurrently, the approved Domains table displays the application status. But, because the application process is complete, we should update this to show the _domain_ status. \r\n\r\nReference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428-25637&mode=design&t=VBfj3tkkg2p3f8UT-0)\r\n\r\n**DOMAIN OVERVIEW**\r\nCurrently, we do not show the domain status when viewing the \"Manage Domains\" pages. The \"Manage Domains\" pages can be accessed by clicking the \"Manage\" button next to an approved domain. \r\n\r\nThe first page is the \"Domain Overview.\" Add stylized message to the top of that page to indicate the user-friendly domain status.\r\n\r\nReference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=4829-39693&mode=design&t=VBfj3tkkg2p3f8UT-0)\r\n\r\n### Issue Links\r\n\r\nDepends on: Domain Status presence for testing\n", "before_files": [{"content": "from django.db.models import F\nfrom django.shortcuts import render\n\nfrom registrar.models import DomainApplication\n\n\ndef index(request):\n \"\"\"This page is available to anyone without logging in.\"\"\"\n context = {}\n if request.user.is_authenticated:\n applications = DomainApplication.objects.filter(creator=request.user)\n # Let's exclude the approved applications since our\n # domain_applications context will be used to populate\n # the active applications table\n context[\"domain_applications\"] = applications.exclude(status=\"approved\")\n\n domains = request.user.permissions.values(\n \"role\",\n pk=F(\"domain__id\"),\n name=F(\"domain__name\"),\n created_time=F(\"domain__created_at\"),\n application_status=F(\"domain__domain_application__status\"),\n )\n context[\"domains\"] = domains\n return render(request, \"home.html\", context)\n", "path": "src/registrar/views/index.py"}]} | 1,351 | 118 |
gh_patches_debug_20140 | rasdani/github-patches | git_diff | Flexget__Flexget-1599 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bakabt URL change
Bakabt url has change from http://www.bakabt.com to https://bakabt.me, the url rewriter plugin needs to be updated to match
</issue>
<code>
[start of flexget/plugins/sites/bakabt.py]
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import logging
5
6 from flexget import plugin
7 from flexget.event import event
8 from flexget.plugins.internal.urlrewriting import UrlRewritingError
9 from flexget.utils.soup import get_soup
10
11 log = logging.getLogger('bakabt')
12
13
14 class UrlRewriteBakaBT(object):
15 """BakaBT urlrewriter."""
16
17 # urlrewriter API
18 def url_rewritable(self, task, entry):
19 url = entry['url']
20 if url.startswith('http://www.bakabt.com/download/'):
21 return False
22 if url.startswith('http://www.bakabt.com/') or url.startswith('http://bakabt.com/'):
23 return True
24 return False
25
26 # urlrewriter API
27 def url_rewrite(self, task, entry):
28 entry['url'] = self.parse_download_page(entry['url'], task.requests)
29
30 @plugin.internet(log)
31 def parse_download_page(self, url, requests):
32 txheaders = {'User-agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
33 page = requests.get(url, headers=txheaders)
34 try:
35 soup = get_soup(page.text)
36 except Exception as e:
37 raise UrlRewritingError(e)
38 tag_a = soup.find('a', attrs={'class': 'download_link'})
39 if not tag_a:
40 raise UrlRewritingError('Unable to locate download link from url %s' % url)
41 torrent_url = 'http://www.bakabt.com' + tag_a.get('href')
42 return torrent_url
43
44
45 @event('plugin.register')
46 def register_plugin():
47 plugin.register(UrlRewriteBakaBT, 'bakabt', groups=['urlrewriter'], api_ver=2)
48
[end of flexget/plugins/sites/bakabt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/plugins/sites/bakabt.py b/flexget/plugins/sites/bakabt.py
--- a/flexget/plugins/sites/bakabt.py
+++ b/flexget/plugins/sites/bakabt.py
@@ -17,9 +17,9 @@
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
- if url.startswith('http://www.bakabt.com/download/'):
+ if url.startswith('http://www.bakabt.me/download/'):
return False
- if url.startswith('http://www.bakabt.com/') or url.startswith('http://bakabt.com/'):
+ if url.startswith('http://www.bakabt.me/') or url.startswith('http://bakabt.me/'):
return True
return False
@@ -38,7 +38,7 @@
tag_a = soup.find('a', attrs={'class': 'download_link'})
if not tag_a:
raise UrlRewritingError('Unable to locate download link from url %s' % url)
- torrent_url = 'http://www.bakabt.com' + tag_a.get('href')
+ torrent_url = 'http://www.bakabt.me' + tag_a.get('href')
return torrent_url
| {"golden_diff": "diff --git a/flexget/plugins/sites/bakabt.py b/flexget/plugins/sites/bakabt.py\n--- a/flexget/plugins/sites/bakabt.py\n+++ b/flexget/plugins/sites/bakabt.py\n@@ -17,9 +17,9 @@\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n- if url.startswith('http://www.bakabt.com/download/'):\n+ if url.startswith('http://www.bakabt.me/download/'):\n return False\n- if url.startswith('http://www.bakabt.com/') or url.startswith('http://bakabt.com/'):\n+ if url.startswith('http://www.bakabt.me/') or url.startswith('http://bakabt.me/'):\n return True\n return False\n \n@@ -38,7 +38,7 @@\n tag_a = soup.find('a', attrs={'class': 'download_link'})\n if not tag_a:\n raise UrlRewritingError('Unable to locate download link from url %s' % url)\n- torrent_url = 'http://www.bakabt.com' + tag_a.get('href')\n+ torrent_url = 'http://www.bakabt.me' + tag_a.get('href')\n return torrent_url\n", "issue": "Bakabt URL change\nBakabt url has change from http://www.bakabt.com to https://bakabt.me, the url rewriter plugin needs to be updated to match\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.plugins.internal.urlrewriting import UrlRewritingError\nfrom flexget.utils.soup import get_soup\n\nlog = logging.getLogger('bakabt')\n\n\nclass UrlRewriteBakaBT(object):\n \"\"\"BakaBT urlrewriter.\"\"\"\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n if url.startswith('http://www.bakabt.com/download/'):\n return False\n if url.startswith('http://www.bakabt.com/') or url.startswith('http://bakabt.com/'):\n return True\n return False\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n entry['url'] = self.parse_download_page(entry['url'], task.requests)\n\n @plugin.internet(log)\n def parse_download_page(self, url, requests):\n txheaders = {'User-agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}\n page = requests.get(url, headers=txheaders)\n try:\n soup = get_soup(page.text)\n except Exception as e:\n raise UrlRewritingError(e)\n tag_a = soup.find('a', attrs={'class': 'download_link'})\n if not tag_a:\n raise UrlRewritingError('Unable to locate download link from url %s' % url)\n torrent_url = 'http://www.bakabt.com' + tag_a.get('href')\n return torrent_url\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(UrlRewriteBakaBT, 'bakabt', groups=['urlrewriter'], api_ver=2)\n", "path": "flexget/plugins/sites/bakabt.py"}]} | 1,086 | 295 |
gh_patches_debug_23767 | rasdani/github-patches | git_diff | microsoft__presidio-650 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Image anonymization is slow
Hi,
First of all, thank you guys for making this awesome project :)
I wanted to ask if there are any ways to improve image anonymization performance. I'm using Presidio as a python package and it takes about 5~ seconds to process pretty small images. I'm using the example code from the Presidio docs.
</issue>
<code>
[start of presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py]
1 from typing import List
2
3 from presidio_analyzer import AnalyzerEngine
4 from presidio_analyzer import RecognizerResult
5 from presidio_image_redactor.entities.image_recognizer_result import (
6 ImageRecognizerResult,
7 )
8 from presidio_image_redactor.ocr import OCR
9
10
11 class ImageAnalyzerEngine:
12 """ImageAnalyzerEngine class."""
13
14 def analyze(self, image: object, **kwargs) -> List[ImageRecognizerResult]:
15 """Analyse method to analyse the given image.
16
17 :param image: PIL Image/numpy array or file path(str) to be processed
18
19 :return: list of the extract entities with image bounding boxes
20 """
21 ocr_result = OCR().perform_ocr(image)
22 text = OCR().get_text_from_ocr_dict(ocr_result)
23
24 analyzer = AnalyzerEngine()
25 analyzer_result = analyzer.analyze(text=text, language="en", **kwargs)
26 bboxes = self.map_analyzer_results_to_bounding_boxes(
27 analyzer_result, ocr_result, text
28 )
29 return bboxes
30
31 @staticmethod
32 def map_analyzer_results_to_bounding_boxes(
33 text_analyzer_results: List[RecognizerResult], ocr_result: dict, text: str
34 ) -> List[ImageRecognizerResult]:
35 """Map extracted PII entities to image bounding boxes.
36
37 Matching is based on the position of the recognized entity from analyzer
38 and word (in ocr dict) in the text.
39
40 :param text_analyzer_results: PII entities recognized by presidio analyzer
41 :param ocr_result: dict results with words and bboxes from OCR
42 :param text: text the results are based on
43
44 return: list of extracted entities with image bounding boxes
45 """
46 if (not ocr_result) or (not text_analyzer_results):
47 return []
48
49 bboxes = []
50 proc_indexes = 0
51 indexes = len(text_analyzer_results)
52
53 pos = 0
54 iter_ocr = enumerate(ocr_result["text"])
55 for index, word in iter_ocr:
56 if not word:
57 pos += 1
58 else:
59 for element in text_analyzer_results:
60 text_element = text[element.start : element.end]
61 # check position and text of ocr word matches recognized entity
62 if (
63 max(pos, element.start) < min(element.end, pos + len(word))
64 ) and ((text_element in word) or (word in text_element)):
65 bboxes.append(
66 ImageRecognizerResult(
67 element.entity_type,
68 element.start,
69 element.end,
70 element.score,
71 ocr_result["left"][index],
72 ocr_result["top"][index],
73 ocr_result["width"][index],
74 ocr_result["height"][index],
75 )
76 )
77
78 # add bounding boxes for all words in ocr dict
79 # contained within the text of recognized entity
80 # based on relative position in the full text
81 while pos + len(word) < element.end:
82 index, word = next(iter_ocr)
83 if word:
84 bboxes.append(
85 ImageRecognizerResult(
86 element.entity_type,
87 element.start,
88 element.end,
89 element.score,
90 ocr_result["left"][index],
91 ocr_result["top"][index],
92 ocr_result["width"][index],
93 ocr_result["height"][index],
94 )
95 )
96 pos += len(word) + 1
97 proc_indexes += 1
98
99 if proc_indexes == indexes:
100 break
101 pos += len(word) + 1
102
103 return bboxes
104
[end of presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py b/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py
--- a/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py
+++ b/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py
@@ -9,7 +9,16 @@
class ImageAnalyzerEngine:
- """ImageAnalyzerEngine class."""
+ """ImageAnalyzerEngine class.
+
+ :param analyzer_engine: The Presidio AnalyzerEngine instance
+ to be used to detect PII in text.
+ """
+
+ def __init__(self, analyzer_engine: AnalyzerEngine = None):
+ if not analyzer_engine:
+ analyzer_engine = AnalyzerEngine()
+ self.analyzer_engine = analyzer_engine
def analyze(self, image: object, **kwargs) -> List[ImageRecognizerResult]:
"""Analyse method to analyse the given image.
@@ -21,8 +30,8 @@
ocr_result = OCR().perform_ocr(image)
text = OCR().get_text_from_ocr_dict(ocr_result)
- analyzer = AnalyzerEngine()
- analyzer_result = analyzer.analyze(text=text, language="en", **kwargs)
+ analyzer_result = self.analyzer_engine.analyze(
+ text=text, language="en", **kwargs)
bboxes = self.map_analyzer_results_to_bounding_boxes(
analyzer_result, ocr_result, text
)
| {"golden_diff": "diff --git a/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py b/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py\n--- a/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py\n+++ b/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py\n@@ -9,7 +9,16 @@\n \n \n class ImageAnalyzerEngine:\n- \"\"\"ImageAnalyzerEngine class.\"\"\"\n+ \"\"\"ImageAnalyzerEngine class.\n+\n+ :param analyzer_engine: The Presidio AnalyzerEngine instance\n+ to be used to detect PII in text.\n+ \"\"\"\n+\n+ def __init__(self, analyzer_engine: AnalyzerEngine = None):\n+ if not analyzer_engine:\n+ analyzer_engine = AnalyzerEngine()\n+ self.analyzer_engine = analyzer_engine\n \n def analyze(self, image: object, **kwargs) -> List[ImageRecognizerResult]:\n \"\"\"Analyse method to analyse the given image.\n@@ -21,8 +30,8 @@\n ocr_result = OCR().perform_ocr(image)\n text = OCR().get_text_from_ocr_dict(ocr_result)\n \n- analyzer = AnalyzerEngine()\n- analyzer_result = analyzer.analyze(text=text, language=\"en\", **kwargs)\n+ analyzer_result = self.analyzer_engine.analyze(\n+ text=text, language=\"en\", **kwargs)\n bboxes = self.map_analyzer_results_to_bounding_boxes(\n analyzer_result, ocr_result, text\n )\n", "issue": "Image anonymization is slow\nHi,\r\n\r\nFirst of all, thank you guys for making this awesome project :)\r\n\r\nI wanted to ask if there are any ways to improve image anonymization performance. I'm using Presidio as a python package and it takes about 5~ seconds to process pretty small images. I'm using the example code from the Presidio docs.\n", "before_files": [{"content": "from typing import List\n\nfrom presidio_analyzer import AnalyzerEngine\nfrom presidio_analyzer import RecognizerResult\nfrom presidio_image_redactor.entities.image_recognizer_result import (\n ImageRecognizerResult,\n)\nfrom presidio_image_redactor.ocr import OCR\n\n\nclass ImageAnalyzerEngine:\n \"\"\"ImageAnalyzerEngine class.\"\"\"\n\n def analyze(self, image: object, **kwargs) -> List[ImageRecognizerResult]:\n \"\"\"Analyse method to analyse the given image.\n\n :param image: PIL Image/numpy array or file path(str) to be processed\n\n :return: list of the extract entities with image bounding boxes\n \"\"\"\n ocr_result = OCR().perform_ocr(image)\n text = OCR().get_text_from_ocr_dict(ocr_result)\n\n analyzer = AnalyzerEngine()\n analyzer_result = analyzer.analyze(text=text, language=\"en\", **kwargs)\n bboxes = self.map_analyzer_results_to_bounding_boxes(\n analyzer_result, ocr_result, text\n )\n return bboxes\n\n @staticmethod\n def map_analyzer_results_to_bounding_boxes(\n text_analyzer_results: List[RecognizerResult], ocr_result: dict, text: str\n ) -> List[ImageRecognizerResult]:\n \"\"\"Map extracted PII entities to image bounding boxes.\n\n Matching is based on the position of the recognized entity from analyzer\n and word (in ocr dict) in the text.\n\n :param text_analyzer_results: PII entities recognized by presidio analyzer\n :param ocr_result: dict results with words and bboxes from OCR\n :param text: text the results are based on\n\n return: list of extracted entities with image bounding boxes\n \"\"\"\n if (not ocr_result) or (not text_analyzer_results):\n return []\n\n bboxes = []\n proc_indexes = 0\n indexes = len(text_analyzer_results)\n\n pos = 0\n iter_ocr = enumerate(ocr_result[\"text\"])\n for index, word in iter_ocr:\n if not word:\n pos += 1\n else:\n for element in text_analyzer_results:\n text_element = text[element.start : element.end]\n # check position and text of ocr word matches recognized entity\n if (\n max(pos, element.start) < min(element.end, pos + len(word))\n ) and ((text_element in word) or (word in text_element)):\n bboxes.append(\n ImageRecognizerResult(\n element.entity_type,\n element.start,\n element.end,\n element.score,\n ocr_result[\"left\"][index],\n ocr_result[\"top\"][index],\n ocr_result[\"width\"][index],\n ocr_result[\"height\"][index],\n )\n )\n\n # add bounding boxes for all words in ocr dict\n # contained within the text of recognized entity\n # based on relative position in the full text\n while pos + len(word) < element.end:\n index, word = next(iter_ocr)\n if word:\n bboxes.append(\n ImageRecognizerResult(\n element.entity_type,\n element.start,\n element.end,\n element.score,\n ocr_result[\"left\"][index],\n ocr_result[\"top\"][index],\n ocr_result[\"width\"][index],\n ocr_result[\"height\"][index],\n )\n )\n pos += len(word) + 1\n proc_indexes += 1\n\n if proc_indexes == indexes:\n break\n pos += len(word) + 1\n\n return bboxes\n", "path": "presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py"}]} | 1,606 | 340 |
gh_patches_debug_15494 | rasdani/github-patches | git_diff | EleutherAI__gpt-neox-72 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement 1-Bit Adam
Integrate 1-bit Adam into our model. The DeepSpeed tutorial can be found [here](https://www.deepspeed.ai/tutorials/onebit-adam/)
</issue>
<code>
[start of train_pipeline.py]
1 import random
2 import deepspeed
3 import torch
4 from torch.utils.data import DataLoader
5 from tqdm.auto import trange
6 import torch.distributed as distributed
7
8 from gpt_neox import (GPTNeoX_Pipe, AutoregressiveWrapper, GPT2Dataset, extract_tarfile,
9 prepare_optimizer_parameters, get_tokenizer, is_main, prepare_data)
10
11 from gpt_neox.utils import get_args, get_params
12
13 import GPUtil
14
15 # arguments
16 train_args = get_args()
17 params = get_params(train_args.model)
18
19 # tokenizer
20 tokenizer = get_tokenizer(tokenizer_type=params["tokenizer"].get("type", None),
21 from_pretrained=params["tokenizer"].get("from_pretrained", True),
22 add_padding_token=params["tokenizer"].get("add_padding_token", False))
23 vocab_size = len(tokenizer) if params["vocab_size"] is None else params["vocab_size"]
24
25 # model
26 deepspeed.init_distributed(dist_backend='nccl')
27 torch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier
28
29 def loss_function(x, y):
30 losses = torch.nn.functional.cross_entropy(x, y, reduction='none')
31 loss = losses.mean()
32 return loss
33
34 model = GPTNeoX_Pipe(
35 num_tokens=params["vocab_size"],
36 dim=params["hidden_dim"],
37 seq_len=params["seq_len"],
38 depth=params["n_layers"],
39 heads=params["n_heads"],
40 dim_head=params["dim_head"],
41 loss_fn = loss_function,#torch.nn.CrossEntropyLoss(),
42 num_stages = params.get("pipeline_num_stages", 2)
43 )
44 model = AutoregressiveWrapper(model)
45
46 # optimizer
47 ds_model_params = prepare_optimizer_parameters(model)
48 optim = torch.optim.Adam(model.parameters(), lr=params["learning_rate"])
49
50 # prepare data
51 dset_params = params["dataset"]
52 assert dset_params is not None
53
54 if is_main(train_args):
55 prepare_data(dset_params["name"])
56 torch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier
57 else:
58 torch.distributed.barrier()
59
60 # data loading
61 train_dataset = GPT2Dataset(glob_pattern=dset_params["train_path"],
62 seq_len=params["seq_len"],
63 train=True,
64 **dset_params)
65 train_loader = model_engine.deepspeed_io(train_dataset, pin_memory=params.get("pin_memory", False))
66
67 eval_dataset = GPT2Dataset(glob_pattern=dset_params["eval_path"],
68 seq_len=params["seq_len"],
69 train=False,
70 **dset_params)
71
72 val_loader = DataLoader(eval_dataset, batch_size=params["eval_batch_size"])
73 val_loader = iter(val_loader)
74
75 # deepspeed loader
76 model_engine, optim, train_loader, _ = deepspeed.initialize(args=train_args,
77 model=model,
78 optimizer=optim,
79 model_parameters=ds_model_params,
80 training_data=train_dataset)
81
82
83 batches_to_train = 10000
84
85 pbar = trange(params["num_epochs"], mininterval=10., desc='Training Model', dynamic_ncols=True)
86 for _ in pbar:
87 for i in range(batches_to_train):
88
89 is_main = model_engine.local_rank == 0
90
91 loss = model_engine.train_batch()
92
93 pbar.set_description(f'Training Loss: {loss.item():.4f}')
94 pbar.update()
95
[end of train_pipeline.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/train_pipeline.py b/train_pipeline.py
--- a/train_pipeline.py
+++ b/train_pipeline.py
@@ -1,16 +1,21 @@
+import argparse
+import json
import random
+from collections import defaultdict
+import os
import deepspeed
import torch
from torch.utils.data import DataLoader
from tqdm.auto import trange
-import torch.distributed as distributed
-from gpt_neox import (GPTNeoX_Pipe, AutoregressiveWrapper, GPT2Dataset, extract_tarfile,
- prepare_optimizer_parameters, get_tokenizer, is_main, prepare_data)
+from gpt_neox import (GPTNeoX, AutoregressiveWrapper, TextSamplerDataset,
+ cycle, prepare_optimizer_parameters, decode_tokens, prepare_data,
+ GPTNeoX_Pipe)
+from gpt_neox.datasets import GPT2Dataset
+from gpt_neox.utils import is_main
+import gpt_neox
-from gpt_neox.utils import get_args, get_params
-
-import GPUtil
+WORLD_SIZE = os.getenv('WORLD_SIZE')
# arguments
train_args = get_args()
| {"golden_diff": "diff --git a/train_pipeline.py b/train_pipeline.py\n--- a/train_pipeline.py\n+++ b/train_pipeline.py\n@@ -1,16 +1,21 @@\n+import argparse\n+import json\n import random\n+from collections import defaultdict\n+import os\n import deepspeed\n import torch\n from torch.utils.data import DataLoader\n from tqdm.auto import trange\n-import torch.distributed as distributed\n \n-from gpt_neox import (GPTNeoX_Pipe, AutoregressiveWrapper, GPT2Dataset, extract_tarfile,\n- prepare_optimizer_parameters, get_tokenizer, is_main, prepare_data)\n+from gpt_neox import (GPTNeoX, AutoregressiveWrapper, TextSamplerDataset,\n+ cycle, prepare_optimizer_parameters, decode_tokens, prepare_data,\n+ GPTNeoX_Pipe)\n+from gpt_neox.datasets import GPT2Dataset\n+from gpt_neox.utils import is_main\n+import gpt_neox\n \n-from gpt_neox.utils import get_args, get_params\n-\n-import GPUtil\n+WORLD_SIZE = os.getenv('WORLD_SIZE')\n \n # arguments\n train_args = get_args()\n", "issue": "Implement 1-Bit Adam\nIntegrate 1-bit Adam into our model. The DeepSpeed tutorial can be found [here](https://www.deepspeed.ai/tutorials/onebit-adam/)\n", "before_files": [{"content": "import random\nimport deepspeed\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import trange\nimport torch.distributed as distributed\n\nfrom gpt_neox import (GPTNeoX_Pipe, AutoregressiveWrapper, GPT2Dataset, extract_tarfile,\n prepare_optimizer_parameters, get_tokenizer, is_main, prepare_data)\n\nfrom gpt_neox.utils import get_args, get_params\n\nimport GPUtil\n\n# arguments\ntrain_args = get_args()\nparams = get_params(train_args.model)\n\n# tokenizer\ntokenizer = get_tokenizer(tokenizer_type=params[\"tokenizer\"].get(\"type\", None),\n from_pretrained=params[\"tokenizer\"].get(\"from_pretrained\", True),\n add_padding_token=params[\"tokenizer\"].get(\"add_padding_token\", False))\nvocab_size = len(tokenizer) if params[\"vocab_size\"] is None else params[\"vocab_size\"]\n\n# model\ndeepspeed.init_distributed(dist_backend='nccl')\ntorch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier\n\ndef loss_function(x, y):\n losses = torch.nn.functional.cross_entropy(x, y, reduction='none')\n loss = losses.mean()\n return loss\n \nmodel = GPTNeoX_Pipe(\n num_tokens=params[\"vocab_size\"],\n dim=params[\"hidden_dim\"],\n seq_len=params[\"seq_len\"],\n depth=params[\"n_layers\"],\n heads=params[\"n_heads\"],\n dim_head=params[\"dim_head\"],\n loss_fn = loss_function,#torch.nn.CrossEntropyLoss(),\n num_stages = params.get(\"pipeline_num_stages\", 2)\n)\nmodel = AutoregressiveWrapper(model)\n\n# optimizer\nds_model_params = prepare_optimizer_parameters(model)\noptim = torch.optim.Adam(model.parameters(), lr=params[\"learning_rate\"])\n\n# prepare data\ndset_params = params[\"dataset\"]\nassert dset_params is not None\n\nif is_main(train_args):\n prepare_data(dset_params[\"name\"])\n torch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier\nelse:\n torch.distributed.barrier()\n \n# data loading\ntrain_dataset = GPT2Dataset(glob_pattern=dset_params[\"train_path\"],\n seq_len=params[\"seq_len\"],\n train=True,\n **dset_params)\ntrain_loader = model_engine.deepspeed_io(train_dataset, pin_memory=params.get(\"pin_memory\", False))\n\neval_dataset = GPT2Dataset(glob_pattern=dset_params[\"eval_path\"],\n seq_len=params[\"seq_len\"],\n train=False,\n **dset_params)\n\nval_loader = DataLoader(eval_dataset, batch_size=params[\"eval_batch_size\"])\nval_loader = iter(val_loader)\n\n# deepspeed loader\nmodel_engine, optim, train_loader, _ = deepspeed.initialize(args=train_args,\n model=model,\n optimizer=optim,\n model_parameters=ds_model_params,\n training_data=train_dataset)\n\n\nbatches_to_train = 10000\n\npbar = trange(params[\"num_epochs\"], mininterval=10., desc='Training Model', dynamic_ncols=True)\nfor _ in pbar:\n for i in range(batches_to_train):\n\n is_main = model_engine.local_rank == 0\n\n loss = model_engine.train_batch()\n\n pbar.set_description(f'Training Loss: {loss.item():.4f}')\n pbar.update()\n", "path": "train_pipeline.py"}]} | 1,477 | 245 |
gh_patches_debug_33275 | rasdani/github-patches | git_diff | liberapay__liberapay.com-82 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Switch Jinja templates to line statements
Because they're easier to type (especially on an azerty keyboard): `% if ...` instead of `{% if ... %}`. [Documentation](http://jinja.pocoo.org/docs/dev/templates/#line-statements).
Any objections?
</issue>
<code>
[start of liberapay/utils/emails.py]
1 from __future__ import unicode_literals
2
3 from aspen.simplates.pagination import parse_specline, split_and_escape
4 from aspen_jinja2_renderer import SimplateLoader
5 from jinja2 import Environment
6
7
8 ( VERIFICATION_MISSING
9 , VERIFICATION_FAILED
10 , VERIFICATION_EXPIRED
11 , VERIFICATION_REDUNDANT
12 , VERIFICATION_STYMIED
13 , VERIFICATION_SUCCEEDED
14 ) = range(6)
15
16
17 jinja_env = Environment(trim_blocks=True, lstrip_blocks=True)
18 jinja_env_html = Environment(
19 trim_blocks=True, lstrip_blocks=True,
20 autoescape=True, extensions=['jinja2.ext.autoescape'],
21 )
22
23 def compile_email_spt(fpath):
24 r = {}
25 with open(fpath) as f:
26 pages = list(split_and_escape(f.read()))
27 for i, page in enumerate(pages, 1):
28 tmpl = b'\n' * page.offset + page.content
29 content_type, renderer = parse_specline(page.header)
30 key = 'subject' if i == 1 else content_type
31 env = jinja_env_html if content_type == 'text/html' else jinja_env
32 r[key] = SimplateLoader(fpath, tmpl).load(env, fpath)
33 return r
34
[end of liberapay/utils/emails.py]
[start of liberapay/constants.py]
1 from __future__ import print_function, unicode_literals
2
3 from collections import OrderedDict
4 from datetime import date, timedelta
5 from decimal import Decimal
6 import re
7
8
9 ASCII_ALLOWED_IN_USERNAME = set("0123456789"
10 "abcdefghijklmnopqrstuvwxyz"
11 "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
12 "-_")
13
14
15 BIRTHDAY = date(2015, 5, 22)
16
17 EMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)
18 EMAIL_RE = re.compile(r'^[^@]+@[^@]+\.[^@]+$')
19
20 MAX_TIP = Decimal('100.00')
21 MIN_TIP = Decimal('0.01')
22
23 QUARANTINE = timedelta(weeks=4)
24
25 PASSWORD_MIN_SIZE = 8
26 PASSWORD_MAX_SIZE = 150
27
28 _ = lambda a: a
29 PRIVACY_FIELDS = OrderedDict([
30 ('hide_giving', _("Hide total giving from others.")),
31 ('hide_receiving', _("Hide total receiving from others.")),
32 ('hide_from_search', _("Hide myself from search results.")),
33 ])
34 del _
35 PRIVACY_FIELDS_S = ' '.join(PRIVACY_FIELDS.keys())
36
37 SESSION = b'session'
38 SESSION_REFRESH = timedelta(hours=1)
39 SESSION_TIMEOUT = timedelta(hours=6)
40
41 USERNAME_MAX_SIZE = 32
42
[end of liberapay/constants.py]
[start of liberapay/renderers/jinja2_htmlescaped.py]
1 import aspen_jinja2_renderer as base
2
3 from markupsafe import escape as htmlescape
4
5
6 class HTMLRenderer(base.Renderer):
7 def render_content(self, context):
8
9 # Extend to inject an HTML-escaping function. Since autoescape is on,
10 # template authors shouldn't normally need to use this function, but
11 # having it in the simplate context makes it easier to implement i18n.
12
13 context['escape'] = context['state']['escape'] = htmlescape
14
15 return base.Renderer.render_content(self, context)
16
17
18 class Factory(base.Factory):
19
20 Renderer = HTMLRenderer
21
22 def compile_meta(self, configuration):
23 # Override to turn on autoescaping.
24 loader = base.FileSystemLoader(configuration.project_root)
25 return base.Environment( loader=loader
26 , autoescape=True
27 , extensions=['jinja2.ext.autoescape']
28 )
29
[end of liberapay/renderers/jinja2_htmlescaped.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/liberapay/constants.py b/liberapay/constants.py
--- a/liberapay/constants.py
+++ b/liberapay/constants.py
@@ -17,6 +17,11 @@
EMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)
EMAIL_RE = re.compile(r'^[^@]+@[^@]+\.[^@]+$')
+JINJA_ENV_COMMON = dict(
+ trim_blocks=True, lstrip_blocks=True,
+ line_statement_prefix='%', line_comment_prefix='#',
+)
+
MAX_TIP = Decimal('100.00')
MIN_TIP = Decimal('0.01')
diff --git a/liberapay/renderers/jinja2_htmlescaped.py b/liberapay/renderers/jinja2_htmlescaped.py
--- a/liberapay/renderers/jinja2_htmlescaped.py
+++ b/liberapay/renderers/jinja2_htmlescaped.py
@@ -2,6 +2,8 @@
from markupsafe import escape as htmlescape
+from liberapay.constants import JINJA_ENV_COMMON
+
class HTMLRenderer(base.Renderer):
def render_content(self, context):
@@ -22,7 +24,8 @@
def compile_meta(self, configuration):
# Override to turn on autoescaping.
loader = base.FileSystemLoader(configuration.project_root)
- return base.Environment( loader=loader
- , autoescape=True
- , extensions=['jinja2.ext.autoescape']
- )
+ return base.Environment(
+ loader=loader,
+ autoescape=True, extensions=['jinja2.ext.autoescape'],
+ **JINJA_ENV_COMMON
+ )
diff --git a/liberapay/utils/emails.py b/liberapay/utils/emails.py
--- a/liberapay/utils/emails.py
+++ b/liberapay/utils/emails.py
@@ -4,6 +4,8 @@
from aspen_jinja2_renderer import SimplateLoader
from jinja2 import Environment
+from liberapay.constants import JINJA_ENV_COMMON
+
( VERIFICATION_MISSING
, VERIFICATION_FAILED
@@ -14,10 +16,10 @@
) = range(6)
-jinja_env = Environment(trim_blocks=True, lstrip_blocks=True)
+jinja_env = Environment(**JINJA_ENV_COMMON)
jinja_env_html = Environment(
- trim_blocks=True, lstrip_blocks=True,
autoescape=True, extensions=['jinja2.ext.autoescape'],
+ **JINJA_ENV_COMMON
)
def compile_email_spt(fpath):
| {"golden_diff": "diff --git a/liberapay/constants.py b/liberapay/constants.py\n--- a/liberapay/constants.py\n+++ b/liberapay/constants.py\n@@ -17,6 +17,11 @@\n EMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)\n EMAIL_RE = re.compile(r'^[^@]+@[^@]+\\.[^@]+$')\n \n+JINJA_ENV_COMMON = dict(\n+ trim_blocks=True, lstrip_blocks=True,\n+ line_statement_prefix='%', line_comment_prefix='#',\n+)\n+\n MAX_TIP = Decimal('100.00')\n MIN_TIP = Decimal('0.01')\n \ndiff --git a/liberapay/renderers/jinja2_htmlescaped.py b/liberapay/renderers/jinja2_htmlescaped.py\n--- a/liberapay/renderers/jinja2_htmlescaped.py\n+++ b/liberapay/renderers/jinja2_htmlescaped.py\n@@ -2,6 +2,8 @@\n \n from markupsafe import escape as htmlescape\n \n+from liberapay.constants import JINJA_ENV_COMMON\n+\n \n class HTMLRenderer(base.Renderer):\n def render_content(self, context):\n@@ -22,7 +24,8 @@\n def compile_meta(self, configuration):\n # Override to turn on autoescaping.\n loader = base.FileSystemLoader(configuration.project_root)\n- return base.Environment( loader=loader\n- , autoescape=True\n- , extensions=['jinja2.ext.autoescape']\n- )\n+ return base.Environment(\n+ loader=loader,\n+ autoescape=True, extensions=['jinja2.ext.autoescape'],\n+ **JINJA_ENV_COMMON\n+ )\ndiff --git a/liberapay/utils/emails.py b/liberapay/utils/emails.py\n--- a/liberapay/utils/emails.py\n+++ b/liberapay/utils/emails.py\n@@ -4,6 +4,8 @@\n from aspen_jinja2_renderer import SimplateLoader\n from jinja2 import Environment\n \n+from liberapay.constants import JINJA_ENV_COMMON\n+\n \n ( VERIFICATION_MISSING\n , VERIFICATION_FAILED\n@@ -14,10 +16,10 @@\n ) = range(6)\n \n \n-jinja_env = Environment(trim_blocks=True, lstrip_blocks=True)\n+jinja_env = Environment(**JINJA_ENV_COMMON)\n jinja_env_html = Environment(\n- trim_blocks=True, lstrip_blocks=True,\n autoescape=True, extensions=['jinja2.ext.autoescape'],\n+ **JINJA_ENV_COMMON\n )\n \n def compile_email_spt(fpath):\n", "issue": "Switch Jinja templates to line statements\nBecause they're easier to type (especially on an azerty keyboard): `% if ...` instead of `{% if ... %}`. [Documentation](http://jinja.pocoo.org/docs/dev/templates/#line-statements).\n\nAny objections?\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom aspen.simplates.pagination import parse_specline, split_and_escape\nfrom aspen_jinja2_renderer import SimplateLoader\nfrom jinja2 import Environment\n\n\n( VERIFICATION_MISSING\n, VERIFICATION_FAILED\n, VERIFICATION_EXPIRED\n, VERIFICATION_REDUNDANT\n, VERIFICATION_STYMIED\n, VERIFICATION_SUCCEEDED\n ) = range(6)\n\n\njinja_env = Environment(trim_blocks=True, lstrip_blocks=True)\njinja_env_html = Environment(\n trim_blocks=True, lstrip_blocks=True,\n autoescape=True, extensions=['jinja2.ext.autoescape'],\n)\n\ndef compile_email_spt(fpath):\n r = {}\n with open(fpath) as f:\n pages = list(split_and_escape(f.read()))\n for i, page in enumerate(pages, 1):\n tmpl = b'\\n' * page.offset + page.content\n content_type, renderer = parse_specline(page.header)\n key = 'subject' if i == 1 else content_type\n env = jinja_env_html if content_type == 'text/html' else jinja_env\n r[key] = SimplateLoader(fpath, tmpl).load(env, fpath)\n return r\n", "path": "liberapay/utils/emails.py"}, {"content": "from __future__ import print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom datetime import date, timedelta\nfrom decimal import Decimal\nimport re\n\n\nASCII_ALLOWED_IN_USERNAME = set(\"0123456789\"\n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"-_\")\n\n\nBIRTHDAY = date(2015, 5, 22)\n\nEMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)\nEMAIL_RE = re.compile(r'^[^@]+@[^@]+\\.[^@]+$')\n\nMAX_TIP = Decimal('100.00')\nMIN_TIP = Decimal('0.01')\n\nQUARANTINE = timedelta(weeks=4)\n\nPASSWORD_MIN_SIZE = 8\nPASSWORD_MAX_SIZE = 150\n\n_ = lambda a: a\nPRIVACY_FIELDS = OrderedDict([\n ('hide_giving', _(\"Hide total giving from others.\")),\n ('hide_receiving', _(\"Hide total receiving from others.\")),\n ('hide_from_search', _(\"Hide myself from search results.\")),\n])\ndel _\nPRIVACY_FIELDS_S = ' '.join(PRIVACY_FIELDS.keys())\n\nSESSION = b'session'\nSESSION_REFRESH = timedelta(hours=1)\nSESSION_TIMEOUT = timedelta(hours=6)\n\nUSERNAME_MAX_SIZE = 32\n", "path": "liberapay/constants.py"}, {"content": "import aspen_jinja2_renderer as base\n\nfrom markupsafe import escape as htmlescape\n\n\nclass HTMLRenderer(base.Renderer):\n def render_content(self, context):\n\n # Extend to inject an HTML-escaping function. Since autoescape is on,\n # template authors shouldn't normally need to use this function, but\n # having it in the simplate context makes it easier to implement i18n.\n\n context['escape'] = context['state']['escape'] = htmlescape\n\n return base.Renderer.render_content(self, context)\n\n\nclass Factory(base.Factory):\n\n Renderer = HTMLRenderer\n\n def compile_meta(self, configuration):\n # Override to turn on autoescaping.\n loader = base.FileSystemLoader(configuration.project_root)\n return base.Environment( loader=loader\n , autoescape=True\n , extensions=['jinja2.ext.autoescape']\n )\n", "path": "liberapay/renderers/jinja2_htmlescaped.py"}]} | 1,564 | 569 |
gh_patches_debug_2885 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-1666 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Wrong version number string within docker 1.9.1
### Description
After a successful pull and deploy via docker with https://ghcr.io/paperless-ngx/paperless-ngx:1.9.1 the version string on the paperless-ngx Web-UI is still 1.9.0.

### Steps to reproduce
1. Pull the new version via docker, docker-compose or portainer via https://ghcr.io/paperless-ngx/paperless-ngx and tag 1.9.1
2. Access the Web-UI.
3. Login
4. Find the version string on the lower left side.
### Webserver logs
_No response_
### Paperless-ngx version
1.9.1
### Host OS
Alpine Linux x86-64
### Installation method
Docker - official image
### Browser
Chrome
### Configuration changes
_No response_
### Other
_No response_
</issue>
<code>
[start of src/paperless/version.py]
1 from typing import Final
2 from typing import Tuple
3
4 __version__: Final[Tuple[int, int, int]] = (1, 9, 0)
5 # Version string like X.Y.Z
6 __full_version_str__: Final[str] = ".".join(map(str, __version__))
7 # Version string like X.Y
8 __major_minor_version_str__: Final[str] = ".".join(map(str, __version__[:-1]))
9
[end of src/paperless/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/paperless/version.py b/src/paperless/version.py
--- a/src/paperless/version.py
+++ b/src/paperless/version.py
@@ -1,7 +1,7 @@
from typing import Final
from typing import Tuple
-__version__: Final[Tuple[int, int, int]] = (1, 9, 0)
+__version__: Final[Tuple[int, int, int]] = (1, 9, 2)
# Version string like X.Y.Z
__full_version_str__: Final[str] = ".".join(map(str, __version__))
# Version string like X.Y
| {"golden_diff": "diff --git a/src/paperless/version.py b/src/paperless/version.py\n--- a/src/paperless/version.py\n+++ b/src/paperless/version.py\n@@ -1,7 +1,7 @@\n from typing import Final\n from typing import Tuple\n \n-__version__: Final[Tuple[int, int, int]] = (1, 9, 0)\n+__version__: Final[Tuple[int, int, int]] = (1, 9, 2)\n # Version string like X.Y.Z\n __full_version_str__: Final[str] = \".\".join(map(str, __version__))\n # Version string like X.Y\n", "issue": "[BUG] Wrong version number string within docker 1.9.1\n### Description\n\nAfter a successful pull and deploy via docker with https://ghcr.io/paperless-ngx/paperless-ngx:1.9.1 the version string on the paperless-ngx Web-UI is still 1.9.0.\r\n\r\n\r\n\n\n### Steps to reproduce\n\n1. Pull the new version via docker, docker-compose or portainer via https://ghcr.io/paperless-ngx/paperless-ngx and tag 1.9.1\r\n2. Access the Web-UI.\r\n3. Login\r\n4. Find the version string on the lower left side.\n\n### Webserver logs\n\n_No response_\n\n### Paperless-ngx version\n\n1.9.1\n\n### Host OS\n\nAlpine Linux x86-64\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nChrome\n\n### Configuration changes\n\n_No response_\n\n### Other\n\n_No response_\n", "before_files": [{"content": "from typing import Final\nfrom typing import Tuple\n\n__version__: Final[Tuple[int, int, int]] = (1, 9, 0)\n# Version string like X.Y.Z\n__full_version_str__: Final[str] = \".\".join(map(str, __version__))\n# Version string like X.Y\n__major_minor_version_str__: Final[str] = \".\".join(map(str, __version__[:-1]))\n", "path": "src/paperless/version.py"}]} | 925 | 136 |
gh_patches_debug_34542 | rasdani/github-patches | git_diff | numpy__numpy-13976 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MAINT: remove numpy/tools/test-installed-numpy.py ?
Looking at Matti's modifications in #13482, I don't see a good reason to keep `test-installed-numpy.py` if we are going to continue using `runtests.py`. Both have separate parsers to accomplish similar things, but `runtests.py` is more developed / actively used while `test-installed-numpy.py` still has discussion of nose / python 2.4, and Matti's proposed change there adds `-n` option which does something different in `runtests.py`.
`runtests.py -n` will test installed NumPy instead of rebuilding, so seems like redundancy / maintenance burden we don't need moving forward
</issue>
<code>
[start of tools/test-installed-numpy.py]
1 #!/usr/bin/env python
2 from __future__ import division, absolute_import, print_function
3
4 # A simple script to test the installed version of numpy by calling
5 # 'numpy.test()'. Key features:
6 # -- convenient command-line syntax
7 # -- sets exit status appropriately, useful for automated test environments
8
9 # It would be better to set this up as a module in the numpy namespace, so
10 # that it could be run as:
11 # python -m numpy.run_tests <args>
12 # But, python2.4's -m switch only works with top-level modules, not modules
13 # that are inside packages. So, once we drop 2.4 support, maybe...
14
15 import sys, os
16 # In case we are run from the source directory, we don't want to import numpy
17 # from there, we want to import the installed version:
18 sys.path.pop(0)
19
20 from optparse import OptionParser
21 parser = OptionParser("usage: %prog [options] -- [nosetests options]")
22 parser.add_option("-v", "--verbose",
23 action="count", dest="verbose", default=1,
24 help="increase verbosity")
25 parser.add_option("--doctests",
26 action="store_true", dest="doctests", default=False,
27 help="Run doctests in module")
28 parser.add_option("--coverage",
29 action="store_true", dest="coverage", default=False,
30 help="report coverage of NumPy code (requires 'pytest-cov' module")
31 parser.add_option("-m", "--mode",
32 action="store", dest="mode", default="fast",
33 help="'fast', 'full', or something that could be "
34 "passed to pytest [default: %default]")
35 parser.add_option("-n", "--durations",
36 dest="durations", default=-1,
37 help="show time to run slowest N tests [default: -1]")
38 (options, args) = parser.parse_args()
39
40 import numpy
41
42 # Check that NPY_RELAXED_STRIDES_CHECKING is active when set.
43 # The same flags check is also used in the tests to switch behavior.
44 if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0"):
45 if not numpy.ones((10, 1), order='C').flags.f_contiguous:
46 print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')
47 sys.exit(1)
48 elif numpy.ones((10, 1), order='C').flags.f_contiguous:
49 print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')
50 sys.exit(1)
51
52 if options.coverage:
53 # Produce code coverage XML report for codecov.io
54 args += ["--cov-report=xml"]
55
56 result = numpy.test(options.mode,
57 verbose=options.verbose,
58 extra_argv=args,
59 doctests=options.doctests,
60 durations=int(options.durations),
61 coverage=options.coverage)
62
63 if result:
64 sys.exit(0)
65 else:
66 sys.exit(1)
67
[end of tools/test-installed-numpy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/test-installed-numpy.py b/tools/test-installed-numpy.py
deleted file mode 100755
--- a/tools/test-installed-numpy.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division, absolute_import, print_function
-
-# A simple script to test the installed version of numpy by calling
-# 'numpy.test()'. Key features:
-# -- convenient command-line syntax
-# -- sets exit status appropriately, useful for automated test environments
-
-# It would be better to set this up as a module in the numpy namespace, so
-# that it could be run as:
-# python -m numpy.run_tests <args>
-# But, python2.4's -m switch only works with top-level modules, not modules
-# that are inside packages. So, once we drop 2.4 support, maybe...
-
-import sys, os
-# In case we are run from the source directory, we don't want to import numpy
-# from there, we want to import the installed version:
-sys.path.pop(0)
-
-from optparse import OptionParser
-parser = OptionParser("usage: %prog [options] -- [nosetests options]")
-parser.add_option("-v", "--verbose",
- action="count", dest="verbose", default=1,
- help="increase verbosity")
-parser.add_option("--doctests",
- action="store_true", dest="doctests", default=False,
- help="Run doctests in module")
-parser.add_option("--coverage",
- action="store_true", dest="coverage", default=False,
- help="report coverage of NumPy code (requires 'pytest-cov' module")
-parser.add_option("-m", "--mode",
- action="store", dest="mode", default="fast",
- help="'fast', 'full', or something that could be "
- "passed to pytest [default: %default]")
-parser.add_option("-n", "--durations",
- dest="durations", default=-1,
- help="show time to run slowest N tests [default: -1]")
-(options, args) = parser.parse_args()
-
-import numpy
-
-# Check that NPY_RELAXED_STRIDES_CHECKING is active when set.
-# The same flags check is also used in the tests to switch behavior.
-if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0"):
- if not numpy.ones((10, 1), order='C').flags.f_contiguous:
- print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')
- sys.exit(1)
-elif numpy.ones((10, 1), order='C').flags.f_contiguous:
- print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')
- sys.exit(1)
-
-if options.coverage:
- # Produce code coverage XML report for codecov.io
- args += ["--cov-report=xml"]
-
-result = numpy.test(options.mode,
- verbose=options.verbose,
- extra_argv=args,
- doctests=options.doctests,
- durations=int(options.durations),
- coverage=options.coverage)
-
-if result:
- sys.exit(0)
-else:
- sys.exit(1)
| {"golden_diff": "diff --git a/tools/test-installed-numpy.py b/tools/test-installed-numpy.py\ndeleted file mode 100755\n--- a/tools/test-installed-numpy.py\n+++ /dev/null\n@@ -1,66 +0,0 @@\n-#!/usr/bin/env python\n-from __future__ import division, absolute_import, print_function\n-\n-# A simple script to test the installed version of numpy by calling\n-# 'numpy.test()'. Key features:\n-# -- convenient command-line syntax\n-# -- sets exit status appropriately, useful for automated test environments\n-\n-# It would be better to set this up as a module in the numpy namespace, so\n-# that it could be run as:\n-# python -m numpy.run_tests <args>\n-# But, python2.4's -m switch only works with top-level modules, not modules\n-# that are inside packages. So, once we drop 2.4 support, maybe...\n-\n-import sys, os\n-# In case we are run from the source directory, we don't want to import numpy\n-# from there, we want to import the installed version:\n-sys.path.pop(0)\n-\n-from optparse import OptionParser\n-parser = OptionParser(\"usage: %prog [options] -- [nosetests options]\")\n-parser.add_option(\"-v\", \"--verbose\",\n- action=\"count\", dest=\"verbose\", default=1,\n- help=\"increase verbosity\")\n-parser.add_option(\"--doctests\",\n- action=\"store_true\", dest=\"doctests\", default=False,\n- help=\"Run doctests in module\")\n-parser.add_option(\"--coverage\",\n- action=\"store_true\", dest=\"coverage\", default=False,\n- help=\"report coverage of NumPy code (requires 'pytest-cov' module\")\n-parser.add_option(\"-m\", \"--mode\",\n- action=\"store\", dest=\"mode\", default=\"fast\",\n- help=\"'fast', 'full', or something that could be \"\n- \"passed to pytest [default: %default]\")\n-parser.add_option(\"-n\", \"--durations\",\n- dest=\"durations\", default=-1,\n- help=\"show time to run slowest N tests [default: -1]\")\n-(options, args) = parser.parse_args()\n-\n-import numpy\n-\n-# Check that NPY_RELAXED_STRIDES_CHECKING is active when set.\n-# The same flags check is also used in the tests to switch behavior.\n-if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', \"1\") != \"0\"):\n- if not numpy.ones((10, 1), order='C').flags.f_contiguous:\n- print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')\n- sys.exit(1)\n-elif numpy.ones((10, 1), order='C').flags.f_contiguous:\n- print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')\n- sys.exit(1)\n-\n-if options.coverage:\n- # Produce code coverage XML report for codecov.io\n- args += [\"--cov-report=xml\"]\n-\n-result = numpy.test(options.mode,\n- verbose=options.verbose,\n- extra_argv=args,\n- doctests=options.doctests,\n- durations=int(options.durations),\n- coverage=options.coverage)\n-\n-if result:\n- sys.exit(0)\n-else:\n- sys.exit(1)\n", "issue": "MAINT: remove numpy/tools/test-installed-numpy.py ?\nLooking at Matti's modifications in #13482, I don't see a good reason to keep `test-installed-numpy.py` if we are going to continue using `runtests.py`. Both have separate parsers to accomplish similar things, but `runtests.py` is more developed / actively used while `test-installed-numpy.py` still has discussion of nose / python 2.4, and Matti's proposed change there adds `-n` option which does something different in `runtests.py`.\r\n\r\n`runtests.py -n` will test installed NumPy instead of rebuilding, so seems like redundancy / maintenance burden we don't need moving forward\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import division, absolute_import, print_function\n\n# A simple script to test the installed version of numpy by calling\n# 'numpy.test()'. Key features:\n# -- convenient command-line syntax\n# -- sets exit status appropriately, useful for automated test environments\n\n# It would be better to set this up as a module in the numpy namespace, so\n# that it could be run as:\n# python -m numpy.run_tests <args>\n# But, python2.4's -m switch only works with top-level modules, not modules\n# that are inside packages. So, once we drop 2.4 support, maybe...\n\nimport sys, os\n# In case we are run from the source directory, we don't want to import numpy\n# from there, we want to import the installed version:\nsys.path.pop(0)\n\nfrom optparse import OptionParser\nparser = OptionParser(\"usage: %prog [options] -- [nosetests options]\")\nparser.add_option(\"-v\", \"--verbose\",\n action=\"count\", dest=\"verbose\", default=1,\n help=\"increase verbosity\")\nparser.add_option(\"--doctests\",\n action=\"store_true\", dest=\"doctests\", default=False,\n help=\"Run doctests in module\")\nparser.add_option(\"--coverage\",\n action=\"store_true\", dest=\"coverage\", default=False,\n help=\"report coverage of NumPy code (requires 'pytest-cov' module\")\nparser.add_option(\"-m\", \"--mode\",\n action=\"store\", dest=\"mode\", default=\"fast\",\n help=\"'fast', 'full', or something that could be \"\n \"passed to pytest [default: %default]\")\nparser.add_option(\"-n\", \"--durations\",\n dest=\"durations\", default=-1,\n help=\"show time to run slowest N tests [default: -1]\")\n(options, args) = parser.parse_args()\n\nimport numpy\n\n# Check that NPY_RELAXED_STRIDES_CHECKING is active when set.\n# The same flags check is also used in the tests to switch behavior.\nif (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', \"1\") != \"0\"):\n if not numpy.ones((10, 1), order='C').flags.f_contiguous:\n print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')\n sys.exit(1)\nelif numpy.ones((10, 1), order='C').flags.f_contiguous:\n print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')\n sys.exit(1)\n\nif options.coverage:\n # Produce code coverage XML report for codecov.io\n args += [\"--cov-report=xml\"]\n\nresult = numpy.test(options.mode,\n verbose=options.verbose,\n extra_argv=args,\n doctests=options.doctests,\n durations=int(options.durations),\n coverage=options.coverage)\n\nif result:\n sys.exit(0)\nelse:\n sys.exit(1)\n", "path": "tools/test-installed-numpy.py"}]} | 1,457 | 740 |
gh_patches_debug_35722 | rasdani/github-patches | git_diff | mdn__kuma-7782 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[regression] Multi-locale search is not searching in all locales
**Summary**
http://localhost.org:8000/api/v1/search?q=video&locale=fr&locale=de
actually searches on `locale == ['de']` and
http://localhost.org:8000/api/v1/search?q=video&locale=de&locale=fr actually searches on `locale== ['fr']`.
Originally from here:
https://github.com/mdn/yari/pull/1473#pullrequestreview-584750752
</issue>
<code>
[start of kuma/api/v1/search/forms.py]
1 from django import forms
2 from django.conf import settings
3 from django.utils.datastructures import MultiValueDict
4
5
6 class SearchForm(forms.Form):
7 q = forms.CharField(max_length=settings.ES_Q_MAXLENGTH)
8 locale = forms.MultipleChoiceField(
9 required=False,
10 # The `settings.LANGUAGES` looks like this:
11 # [('en-US', 'English (US)'), ...]
12 # But all locales are stored in lowercase in Elasticsearch, so
13 # force everything to lowercase.
14 choices=[(code.lower(), name) for code, name in settings.LANGUAGES],
15 )
16
17 SORT_CHOICES = ("best", "relevance", "popularity")
18 sort = forms.ChoiceField(required=False, choices=[(x, x) for x in SORT_CHOICES])
19
20 ARCHIVE_CHOICES = ("exclude", "include", "only")
21 archive = forms.ChoiceField(
22 required=False, choices=[(x, x) for x in ARCHIVE_CHOICES]
23 )
24
25 size = forms.IntegerField(required=True, min_value=1, max_value=100)
26 page = forms.IntegerField(required=True, min_value=1, max_value=10)
27
28 def __init__(self, data, **kwargs):
29 initial = kwargs.get("initial", {})
30 # This makes it possible to supply `initial={some dict}` to the form
31 # and have its values become part of the default. Normally, in Django,
32 # the `SomeForm(data, initial={...})` is just used to prepopulate the
33 # HTML generated form widgets.
34 # See https://www.peterbe.com/plog/initial-values-bound-django-form-rendered
35 data = MultiValueDict({**{k: [v] for k, v in initial.items()}, **data})
36
37 # Because the `?locale=en-US&locale=Fr` might come in from the `request.GET`
38 # we can't edit it there. So instead, we mutate it here in the `data`
39 if "locale" in data:
40 # Always force it to lowercase, because that's what the ChoiceField
41 # is configured to. And the searches should always be in lower case.
42 # Remember, Django forms will allow this to be a single string
43 # (e.g. `?locale=Fr`) or a multi-value (`?locale=fr&locale=En-US`).
44 if isinstance(data["locale"], str):
45 data["locale"] = data["locale"].lower()
46 else:
47 data["locale"] = [x.lower() for x in data["locale"]]
48
49 # If, for keys we have an initial value for, it was passed an empty string,
50 # then swap it for the initial value.
51 # For example `?q=searching&page=` you probably meant to omit it
52 # but "allowing" it to be an empty string makes it convenient for the client.
53 for key, values in data.items():
54 if key in initial and values == "":
55 data[key] = initial[key]
56
57 super().__init__(data, **kwargs)
58
[end of kuma/api/v1/search/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/api/v1/search/forms.py b/kuma/api/v1/search/forms.py
--- a/kuma/api/v1/search/forms.py
+++ b/kuma/api/v1/search/forms.py
@@ -3,15 +3,26 @@
from django.utils.datastructures import MultiValueDict
+class MultipleChoiceFieldICase(forms.MultipleChoiceField):
+ """Just like forms.MultipleChoiceField but everything's case insentive.
+
+ For simplicity, this field assumes that each choice is a tuple where
+ the first element is always a string.
+ """
+
+ def valid_value(self, value):
+ return str(value).lower() in [x[0].lower() for x in self.choices]
+
+
class SearchForm(forms.Form):
q = forms.CharField(max_length=settings.ES_Q_MAXLENGTH)
- locale = forms.MultipleChoiceField(
+ locale = MultipleChoiceFieldICase(
required=False,
# The `settings.LANGUAGES` looks like this:
# [('en-US', 'English (US)'), ...]
# But all locales are stored in lowercase in Elasticsearch, so
# force everything to lowercase.
- choices=[(code.lower(), name) for code, name in settings.LANGUAGES],
+ choices=[(code, name) for code, name in settings.LANGUAGES],
)
SORT_CHOICES = ("best", "relevance", "popularity")
@@ -34,18 +45,6 @@
# See https://www.peterbe.com/plog/initial-values-bound-django-form-rendered
data = MultiValueDict({**{k: [v] for k, v in initial.items()}, **data})
- # Because the `?locale=en-US&locale=Fr` might come in from the `request.GET`
- # we can't edit it there. So instead, we mutate it here in the `data`
- if "locale" in data:
- # Always force it to lowercase, because that's what the ChoiceField
- # is configured to. And the searches should always be in lower case.
- # Remember, Django forms will allow this to be a single string
- # (e.g. `?locale=Fr`) or a multi-value (`?locale=fr&locale=En-US`).
- if isinstance(data["locale"], str):
- data["locale"] = data["locale"].lower()
- else:
- data["locale"] = [x.lower() for x in data["locale"]]
-
# If, for keys we have an initial value for, it was passed an empty string,
# then swap it for the initial value.
# For example `?q=searching&page=` you probably meant to omit it
| {"golden_diff": "diff --git a/kuma/api/v1/search/forms.py b/kuma/api/v1/search/forms.py\n--- a/kuma/api/v1/search/forms.py\n+++ b/kuma/api/v1/search/forms.py\n@@ -3,15 +3,26 @@\n from django.utils.datastructures import MultiValueDict\n \n \n+class MultipleChoiceFieldICase(forms.MultipleChoiceField):\n+ \"\"\"Just like forms.MultipleChoiceField but everything's case insentive.\n+\n+ For simplicity, this field assumes that each choice is a tuple where\n+ the first element is always a string.\n+ \"\"\"\n+\n+ def valid_value(self, value):\n+ return str(value).lower() in [x[0].lower() for x in self.choices]\n+\n+\n class SearchForm(forms.Form):\n q = forms.CharField(max_length=settings.ES_Q_MAXLENGTH)\n- locale = forms.MultipleChoiceField(\n+ locale = MultipleChoiceFieldICase(\n required=False,\n # The `settings.LANGUAGES` looks like this:\n # [('en-US', 'English (US)'), ...]\n # But all locales are stored in lowercase in Elasticsearch, so\n # force everything to lowercase.\n- choices=[(code.lower(), name) for code, name in settings.LANGUAGES],\n+ choices=[(code, name) for code, name in settings.LANGUAGES],\n )\n \n SORT_CHOICES = (\"best\", \"relevance\", \"popularity\")\n@@ -34,18 +45,6 @@\n # See https://www.peterbe.com/plog/initial-values-bound-django-form-rendered\n data = MultiValueDict({**{k: [v] for k, v in initial.items()}, **data})\n \n- # Because the `?locale=en-US&locale=Fr` might come in from the `request.GET`\n- # we can't edit it there. So instead, we mutate it here in the `data`\n- if \"locale\" in data:\n- # Always force it to lowercase, because that's what the ChoiceField\n- # is configured to. And the searches should always be in lower case.\n- # Remember, Django forms will allow this to be a single string\n- # (e.g. `?locale=Fr`) or a multi-value (`?locale=fr&locale=En-US`).\n- if isinstance(data[\"locale\"], str):\n- data[\"locale\"] = data[\"locale\"].lower()\n- else:\n- data[\"locale\"] = [x.lower() for x in data[\"locale\"]]\n-\n # If, for keys we have an initial value for, it was passed an empty string,\n # then swap it for the initial value.\n # For example `?q=searching&page=` you probably meant to omit it\n", "issue": "[regression] Multi-locale search is not searching in all locales\n**Summary**\r\nhttp://localhost.org:8000/api/v1/search?q=video&locale=fr&locale=de\r\nactually searches on `locale == ['de']` and \r\nhttp://localhost.org:8000/api/v1/search?q=video&locale=de&locale=fr actually searches on `locale== ['fr']`.\r\n\r\nOriginally from here:\r\nhttps://github.com/mdn/yari/pull/1473#pullrequestreview-584750752\r\n\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.utils.datastructures import MultiValueDict\n\n\nclass SearchForm(forms.Form):\n q = forms.CharField(max_length=settings.ES_Q_MAXLENGTH)\n locale = forms.MultipleChoiceField(\n required=False,\n # The `settings.LANGUAGES` looks like this:\n # [('en-US', 'English (US)'), ...]\n # But all locales are stored in lowercase in Elasticsearch, so\n # force everything to lowercase.\n choices=[(code.lower(), name) for code, name in settings.LANGUAGES],\n )\n\n SORT_CHOICES = (\"best\", \"relevance\", \"popularity\")\n sort = forms.ChoiceField(required=False, choices=[(x, x) for x in SORT_CHOICES])\n\n ARCHIVE_CHOICES = (\"exclude\", \"include\", \"only\")\n archive = forms.ChoiceField(\n required=False, choices=[(x, x) for x in ARCHIVE_CHOICES]\n )\n\n size = forms.IntegerField(required=True, min_value=1, max_value=100)\n page = forms.IntegerField(required=True, min_value=1, max_value=10)\n\n def __init__(self, data, **kwargs):\n initial = kwargs.get(\"initial\", {})\n # This makes it possible to supply `initial={some dict}` to the form\n # and have its values become part of the default. Normally, in Django,\n # the `SomeForm(data, initial={...})` is just used to prepopulate the\n # HTML generated form widgets.\n # See https://www.peterbe.com/plog/initial-values-bound-django-form-rendered\n data = MultiValueDict({**{k: [v] for k, v in initial.items()}, **data})\n\n # Because the `?locale=en-US&locale=Fr` might come in from the `request.GET`\n # we can't edit it there. So instead, we mutate it here in the `data`\n if \"locale\" in data:\n # Always force it to lowercase, because that's what the ChoiceField\n # is configured to. And the searches should always be in lower case.\n # Remember, Django forms will allow this to be a single string\n # (e.g. `?locale=Fr`) or a multi-value (`?locale=fr&locale=En-US`).\n if isinstance(data[\"locale\"], str):\n data[\"locale\"] = data[\"locale\"].lower()\n else:\n data[\"locale\"] = [x.lower() for x in data[\"locale\"]]\n\n # If, for keys we have an initial value for, it was passed an empty string,\n # then swap it for the initial value.\n # For example `?q=searching&page=` you probably meant to omit it\n # but \"allowing\" it to be an empty string makes it convenient for the client.\n for key, values in data.items():\n if key in initial and values == \"\":\n data[key] = initial[key]\n\n super().__init__(data, **kwargs)\n", "path": "kuma/api/v1/search/forms.py"}]} | 1,431 | 604 |
gh_patches_debug_24202 | rasdani/github-patches | git_diff | helmholtz-analytics__heat-57 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix project description on PyPI
PyPI requires both a description and long_description to be set, with the former being used for listing a project among others and the latter for the detailed project page.
</issue>
<code>
[start of setup.py]
1 from distutils.core import setup
2
3 setup(
4 name='heat',
5 packages=['heat'],
6 version='0.0.1',
7 description='A framework for high performance data analytics and machine learning.',
8 author='Helmholtz Association',
9 author_email='[email protected]',
10 url='https://github.com/helmholtz-analytics/heat',
11 # download_url = 'https://github.com/helmholtz-analytics/heat/archive/0.1.tar.gz', # TBD
12 keywords=['data', 'analytics', 'tensors', 'distributed', 'gpu'],
13 classifiers=[],
14 install_requires=[
15 'numpy>=1.13.0',
16 # 'torch>=0.4.0'
17 ],
18 extras_require={
19 'hdf5': ['h5py>=2.8.0']
20 }
21 )
22
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,19 +1,28 @@
from distutils.core import setup
+with open("README.md", "r") as fh:
+ long_description = fh.read()
+
setup(
name='heat',
packages=['heat'],
version='0.0.1',
description='A framework for high performance data analytics and machine learning.',
+ long_description=long_description,
+ long_description_content_type='text/markdown',
author='Helmholtz Association',
author_email='[email protected]',
url='https://github.com/helmholtz-analytics/heat',
- # download_url = 'https://github.com/helmholtz-analytics/heat/archive/0.1.tar.gz', # TBD
keywords=['data', 'analytics', 'tensors', 'distributed', 'gpu'],
- classifiers=[],
+ classifiers=[
+ 'Development Status :: 2 - Pre-Alpha',
+ 'Programming Language :: Python :: 3.5',
+ 'License :: OSI Approved :: MIT License',
+ 'Intended Audience :: Science/Research',
+ 'Topic :: Scientific/Engineering'
+ ],
install_requires=[
'numpy>=1.13.0',
- # 'torch>=0.4.0'
],
extras_require={
'hdf5': ['h5py>=2.8.0']
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,19 +1,28 @@\n from distutils.core import setup\n \n+with open(\"README.md\", \"r\") as fh:\n+ long_description = fh.read()\n+\n setup(\n name='heat',\n packages=['heat'],\n version='0.0.1',\n description='A framework for high performance data analytics and machine learning.',\n+ long_description=long_description,\n+ long_description_content_type='text/markdown',\n author='Helmholtz Association',\n author_email='[email protected]',\n url='https://github.com/helmholtz-analytics/heat',\n- # download_url = 'https://github.com/helmholtz-analytics/heat/archive/0.1.tar.gz', # TBD\n keywords=['data', 'analytics', 'tensors', 'distributed', 'gpu'],\n- classifiers=[],\n+ classifiers=[\n+ 'Development Status :: 2 - Pre-Alpha',\n+ 'Programming Language :: Python :: 3.5',\n+ 'License :: OSI Approved :: MIT License',\n+ 'Intended Audience :: Science/Research',\n+ 'Topic :: Scientific/Engineering'\n+ ],\n install_requires=[\n 'numpy>=1.13.0',\n- # 'torch>=0.4.0'\n ],\n extras_require={\n 'hdf5': ['h5py>=2.8.0']\n", "issue": "Fix project description on PyPI\nPyPI requires both a description and long_description to be set, with the former being used for listing a project among others and the latter for the detailed project page.\n", "before_files": [{"content": "from distutils.core import setup\n\nsetup(\n name='heat',\n packages=['heat'],\n version='0.0.1',\n description='A framework for high performance data analytics and machine learning.',\n author='Helmholtz Association',\n author_email='[email protected]',\n url='https://github.com/helmholtz-analytics/heat',\n # download_url = 'https://github.com/helmholtz-analytics/heat/archive/0.1.tar.gz', # TBD\n keywords=['data', 'analytics', 'tensors', 'distributed', 'gpu'],\n classifiers=[],\n install_requires=[\n 'numpy>=1.13.0',\n # 'torch>=0.4.0'\n ],\n extras_require={\n 'hdf5': ['h5py>=2.8.0']\n }\n)\n", "path": "setup.py"}]} | 792 | 324 |
gh_patches_debug_17616 | rasdani/github-patches | git_diff | python-discord__bot-655 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Write unit tests for `bot/rules/mentions.py`
Write unit tests for [`bot/rules/mentions.py`](../blob/master/bot/rules/mentions.py).
## Implementation details
Please make sure to read the general information in the [meta issue](553) and the [testing README](../blob/master/tests/README.md). We are aiming for a 100% [branch coverage](https://coverage.readthedocs.io/en/stable/branch.html) for this file, but if you think that is not possible, please discuss that in this issue.
## Additional information
If you want to work on this issue, **please make sure that you get assigned to it** by one of the core devs before starting to work on it. We would like to prevent the situation that multiple people are working on the same issue. To get assigned, leave a comment showing your interesting in tackling this issue.
</issue>
<code>
[start of bot/rules/attachments.py]
1 from typing import Dict, Iterable, List, Optional, Tuple
2
3 from discord import Member, Message
4
5
6 async def apply(
7 last_message: Message, recent_messages: List[Message], config: Dict[str, int]
8 ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
9 """Detects total attachments exceeding the limit sent by a single user."""
10 relevant_messages = [last_message] + [
11 msg
12 for msg in recent_messages
13 if (
14 msg.author == last_message.author
15 and len(msg.attachments) > 0
16 )
17 ]
18 total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)
19
20 if total_recent_attachments > config['max']:
21 return (
22 f"sent {total_recent_attachments} attachments in {config['max']}s",
23 (last_message.author,),
24 relevant_messages
25 )
26 return None
27
[end of bot/rules/attachments.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/rules/attachments.py b/bot/rules/attachments.py
--- a/bot/rules/attachments.py
+++ b/bot/rules/attachments.py
@@ -7,14 +7,14 @@
last_message: Message, recent_messages: List[Message], config: Dict[str, int]
) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
"""Detects total attachments exceeding the limit sent by a single user."""
- relevant_messages = [last_message] + [
+ relevant_messages = tuple(
msg
for msg in recent_messages
if (
msg.author == last_message.author
and len(msg.attachments) > 0
)
- ]
+ )
total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)
if total_recent_attachments > config['max']:
| {"golden_diff": "diff --git a/bot/rules/attachments.py b/bot/rules/attachments.py\n--- a/bot/rules/attachments.py\n+++ b/bot/rules/attachments.py\n@@ -7,14 +7,14 @@\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total attachments exceeding the limit sent by a single user.\"\"\"\n- relevant_messages = [last_message] + [\n+ relevant_messages = tuple(\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and len(msg.attachments) > 0\n )\n- ]\n+ )\n total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)\n \n if total_recent_attachments > config['max']:\n", "issue": "Write unit tests for `bot/rules/mentions.py`\nWrite unit tests for [`bot/rules/mentions.py`](../blob/master/bot/rules/mentions.py).\n\n## Implementation details\nPlease make sure to read the general information in the [meta issue](553) and the [testing README](../blob/master/tests/README.md). We are aiming for a 100% [branch coverage](https://coverage.readthedocs.io/en/stable/branch.html) for this file, but if you think that is not possible, please discuss that in this issue.\n\n## Additional information\nIf you want to work on this issue, **please make sure that you get assigned to it** by one of the core devs before starting to work on it. We would like to prevent the situation that multiple people are working on the same issue. To get assigned, leave a comment showing your interesting in tackling this issue.\n\n", "before_files": [{"content": "from typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total attachments exceeding the limit sent by a single user.\"\"\"\n relevant_messages = [last_message] + [\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and len(msg.attachments) > 0\n )\n ]\n total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)\n\n if total_recent_attachments > config['max']:\n return (\n f\"sent {total_recent_attachments} attachments in {config['max']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/attachments.py"}]} | 955 | 186 |
gh_patches_debug_5761 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1456 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Data error for Aruba
For a few days in a row now (possibly longer), data for the island country of Aruba has been offline. I went to check back on the source: https://www.webaruba.com/ and saw that the figures for electricity generation under the "Aruba's renewable energy monitor" block giving 0 MW for everything.

When I click on [View ->], it takes me to [this page](https://webaruba.com/renewable-energy-dashboard/aruba)

Real-time data for Aruba's electricity generation is present there but not on the front page.
</issue>
<code>
[start of parsers/AW.py]
1 #!/usr/bin/env python3
2
3 import arrow
4 import requests
5 import datetime
6
7
8 def fetch_production(zone_key='AW', session=None, target_datetime=None, logger=None):
9 if target_datetime:
10 raise NotImplementedError('This parser is not yet able to parse past dates')
11
12 r = session or requests.session()
13 url = 'https://www.webaruba.com/renewable-energy-dashboard/app/rest/results.json'
14 # User agent is mandatory or services answers 404
15 headers = {'user-agent': 'electricitymap.org'}
16 response = r.get(url, headers=headers)
17 aruba_json = response.json()
18 top_data = aruba_json['dashboard_top_data']
19
20 # Values currenlty used from service
21 fossil = top_data['Fossil']
22 wind = top_data['Wind']
23 solar = top_data['TotalSolar']
24
25 # We're using Fossil data to get timestamp in correct time zone
26 local_date_time = datetime.datetime.strptime(fossil['timestamp'], "%Y-%m-%d %H:%M:%S.%f")
27 zone_date_time = arrow.Arrow.fromdatetime(local_date_time, 'America/Aruba')
28
29 data = {
30 'zoneKey': zone_key,
31 'datetime': zone_date_time.datetime,
32 'production': {
33 'oil': fossil['value'],
34 'wind': wind['value'],
35 'solar': solar['value'],
36 },
37 'storage': {},
38 'source': 'webaruba.com',
39 }
40
41 return data
42
43
44 if __name__ == '__main__':
45 print(fetch_production())
46
[end of parsers/AW.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/AW.py b/parsers/AW.py
--- a/parsers/AW.py
+++ b/parsers/AW.py
@@ -30,9 +30,9 @@
'zoneKey': zone_key,
'datetime': zone_date_time.datetime,
'production': {
- 'oil': fossil['value'],
- 'wind': wind['value'],
- 'solar': solar['value'],
+ 'oil': float(fossil['value']),
+ 'wind': float(wind['value']),
+ 'solar': float(solar['value']),
},
'storage': {},
'source': 'webaruba.com',
| {"golden_diff": "diff --git a/parsers/AW.py b/parsers/AW.py\n--- a/parsers/AW.py\n+++ b/parsers/AW.py\n@@ -30,9 +30,9 @@\n 'zoneKey': zone_key,\n 'datetime': zone_date_time.datetime,\n 'production': {\n- 'oil': fossil['value'],\n- 'wind': wind['value'],\n- 'solar': solar['value'],\n+ 'oil': float(fossil['value']),\n+ 'wind': float(wind['value']),\n+ 'solar': float(solar['value']),\n },\n 'storage': {},\n 'source': 'webaruba.com',\n", "issue": "Data error for Aruba\nFor a few days in a row now (possibly longer), data for the island country of Aruba has been offline. I went to check back on the source: https://www.webaruba.com/ and saw that the figures for electricity generation under the \"Aruba's renewable energy monitor\" block giving 0 MW for everything.\r\n\r\n\r\n\r\nWhen I click on [View ->], it takes me to [this page](https://webaruba.com/renewable-energy-dashboard/aruba)\r\n\r\n\r\n\r\nReal-time data for Aruba's electricity generation is present there but not on the front page.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport arrow\nimport requests\nimport datetime\n\n\ndef fetch_production(zone_key='AW', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n r = session or requests.session()\n url = 'https://www.webaruba.com/renewable-energy-dashboard/app/rest/results.json'\n # User agent is mandatory or services answers 404\n headers = {'user-agent': 'electricitymap.org'}\n response = r.get(url, headers=headers)\n aruba_json = response.json()\n top_data = aruba_json['dashboard_top_data']\n\n # Values currenlty used from service\n fossil = top_data['Fossil']\n wind = top_data['Wind']\n solar = top_data['TotalSolar']\n\n # We're using Fossil data to get timestamp in correct time zone\n local_date_time = datetime.datetime.strptime(fossil['timestamp'], \"%Y-%m-%d %H:%M:%S.%f\")\n zone_date_time = arrow.Arrow.fromdatetime(local_date_time, 'America/Aruba')\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': zone_date_time.datetime,\n 'production': {\n 'oil': fossil['value'],\n 'wind': wind['value'],\n 'solar': solar['value'],\n },\n 'storage': {},\n 'source': 'webaruba.com',\n }\n\n return data\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/AW.py"}]} | 1,204 | 146 |
gh_patches_debug_49143 | rasdani/github-patches | git_diff | vyperlang__vyper-2513 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
test fail with web3.py 5.21.0
### Version Information
* vyper Version (output of `vyper --version`): latest master (cff69d63)
* OS: macos
* Python Version (output of `python --version`): 3.9.6
### What's your issue about?
tests fail
tests/cli/vyper_json/test_parse_args_vyperjson.py::test_pretty_json - json.decoder.J...
FAILED tests/cli/vyper_json/test_parse_args_vyperjson.py::test_to_stdout - json.decoder.JSO...
FAILED tests/parser/features/test_assert.py::test_assert_reason[False] - AssertionError: as...
FAILED tests/parser/features/test_assert.py::test_assest_reason_revert
misses the string "execution reverted"
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 import os
4 import subprocess
5
6 from setuptools import find_packages, setup
7
8 __version__ = "0.3.0"
9
10 extras_require = {
11 "test": [
12 "pytest>=5.4,<6.0",
13 "pytest-cov>=2.10,<3.0",
14 "pytest-instafail>=0.4,<1.0",
15 "pytest-xdist>=1.32,<2.0",
16 "eth-tester[py-evm]>=0.5.0b1,<0.6",
17 "py-evm==0.4.0a4", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+
18 "web3==5.12.3",
19 "tox>=3.15,<4.0",
20 "lark-parser==0.10.0",
21 "hypothesis[lark]>=5.37.1,<6.0",
22 ],
23 "lint": [
24 "black==21.9b0",
25 "flake8==3.9.2",
26 "flake8-bugbear==20.1.4",
27 "flake8-use-fstring==1.1",
28 "isort==5.9.3",
29 "mypy==0.910",
30 ],
31 "docs": ["recommonmark", "sphinx>=3.0,<4.0", "sphinx_rtd_theme>=0.5,<0.6"],
32 "dev": ["ipython", "pre-commit", "pyinstaller", "twine"],
33 }
34
35 extras_require["dev"] = (
36 extras_require["test"] + extras_require["lint"] + extras_require["docs"] + extras_require["dev"]
37 )
38
39 hash_file_rel_path = os.path.join("vyper", "vyper_git_version.txt")
40 hashfile = os.path.relpath(hash_file_rel_path)
41
42 try:
43 commithash = subprocess.check_output("git rev-parse HEAD".split())
44 commithash_str = commithash.decode("utf-8").strip()
45 with open(hashfile, "w") as fh:
46 fh.write(f"{__version__}\n{commithash_str}")
47 except subprocess.CalledProcessError:
48 pass
49
50 with open("README.md", "r") as f:
51 long_description = f.read()
52
53 setup(
54 name="vyper",
55 version=__version__,
56 description="Vyper: the Pythonic Programming Language for the EVM",
57 long_description=long_description,
58 long_description_content_type="text/markdown",
59 author="Vyper Team",
60 author_email="",
61 url="https://github.com/vyperlang/vyper",
62 license="Apache License 2.0",
63 keywords="ethereum evm smart contract language",
64 include_package_data=True,
65 packages=find_packages(exclude=("tests", "docs")),
66 python_requires=">=3.7,<3.10",
67 py_modules=["vyper"],
68 install_requires=[
69 "asttokens==2.0.4",
70 "pycryptodome>=3.5.1,<4",
71 "semantic-version==2.8.5",
72 "cached-property==1.5.2 ; python_version<'3.8'",
73 ],
74 setup_requires=["pytest-runner"],
75 tests_require=extras_require["test"],
76 extras_require=extras_require,
77 entry_points={
78 "console_scripts": [
79 "vyper=vyper.cli.vyper_compile:_parse_cli_args",
80 "vyper-serve=vyper.cli.vyper_serve:_parse_cli_args",
81 "vyper-lll=vyper.cli.vyper_lll:_parse_cli_args",
82 "vyper-json=vyper.cli.vyper_json:_parse_cli_args",
83 ]
84 },
85 classifiers=[
86 "Intended Audience :: Developers",
87 "License :: OSI Approved :: Apache Software License",
88 "Programming Language :: Python :: 3.7",
89 "Programming Language :: Python :: 3.8",
90 "Programming Language :: Python :: 3.9",
91 ],
92 data_files=[("", [hash_file_rel_path])],
93 )
94
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,7 @@
"pytest-xdist>=1.32,<2.0",
"eth-tester[py-evm]>=0.5.0b1,<0.6",
"py-evm==0.4.0a4", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+
- "web3==5.12.3",
+ "web3==5.21.0",
"tox>=3.15,<4.0",
"lark-parser==0.10.0",
"hypothesis[lark]>=5.37.1,<6.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,7 +15,7 @@\n \"pytest-xdist>=1.32,<2.0\",\n \"eth-tester[py-evm]>=0.5.0b1,<0.6\",\n \"py-evm==0.4.0a4\", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+\n- \"web3==5.12.3\",\n+ \"web3==5.21.0\",\n \"tox>=3.15,<4.0\",\n \"lark-parser==0.10.0\",\n \"hypothesis[lark]>=5.37.1,<6.0\",\n", "issue": "test fail with web3.py 5.21.0\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): latest master (cff69d63)\r\n* OS: macos\r\n* Python Version (output of `python --version`): 3.9.6\r\n\r\n### What's your issue about?\r\n\r\ntests fail\r\n\r\ntests/cli/vyper_json/test_parse_args_vyperjson.py::test_pretty_json - json.decoder.J...\r\nFAILED tests/cli/vyper_json/test_parse_args_vyperjson.py::test_to_stdout - json.decoder.JSO...\r\nFAILED tests/parser/features/test_assert.py::test_assert_reason[False] - AssertionError: as...\r\nFAILED tests/parser/features/test_assert.py::test_assest_reason_revert\r\n\r\nmisses the string \"execution reverted\"\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\n__version__ = \"0.3.0\"\n\nextras_require = {\n \"test\": [\n \"pytest>=5.4,<6.0\",\n \"pytest-cov>=2.10,<3.0\",\n \"pytest-instafail>=0.4,<1.0\",\n \"pytest-xdist>=1.32,<2.0\",\n \"eth-tester[py-evm]>=0.5.0b1,<0.6\",\n \"py-evm==0.4.0a4\", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+\n \"web3==5.12.3\",\n \"tox>=3.15,<4.0\",\n \"lark-parser==0.10.0\",\n \"hypothesis[lark]>=5.37.1,<6.0\",\n ],\n \"lint\": [\n \"black==21.9b0\",\n \"flake8==3.9.2\",\n \"flake8-bugbear==20.1.4\",\n \"flake8-use-fstring==1.1\",\n \"isort==5.9.3\",\n \"mypy==0.910\",\n ],\n \"docs\": [\"recommonmark\", \"sphinx>=3.0,<4.0\", \"sphinx_rtd_theme>=0.5,<0.6\"],\n \"dev\": [\"ipython\", \"pre-commit\", \"pyinstaller\", \"twine\"],\n}\n\nextras_require[\"dev\"] = (\n extras_require[\"test\"] + extras_require[\"lint\"] + extras_require[\"docs\"] + extras_require[\"dev\"]\n)\n\nhash_file_rel_path = os.path.join(\"vyper\", \"vyper_git_version.txt\")\nhashfile = os.path.relpath(hash_file_rel_path)\n\ntry:\n commithash = subprocess.check_output(\"git rev-parse HEAD\".split())\n commithash_str = commithash.decode(\"utf-8\").strip()\n with open(hashfile, \"w\") as fh:\n fh.write(f\"{__version__}\\n{commithash_str}\")\nexcept subprocess.CalledProcessError:\n pass\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nsetup(\n name=\"vyper\",\n version=__version__,\n description=\"Vyper: the Pythonic Programming Language for the EVM\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Vyper Team\",\n author_email=\"\",\n url=\"https://github.com/vyperlang/vyper\",\n license=\"Apache License 2.0\",\n keywords=\"ethereum evm smart contract language\",\n include_package_data=True,\n packages=find_packages(exclude=(\"tests\", \"docs\")),\n python_requires=\">=3.7,<3.10\",\n py_modules=[\"vyper\"],\n install_requires=[\n \"asttokens==2.0.4\",\n \"pycryptodome>=3.5.1,<4\",\n \"semantic-version==2.8.5\",\n \"cached-property==1.5.2 ; python_version<'3.8'\",\n ],\n setup_requires=[\"pytest-runner\"],\n tests_require=extras_require[\"test\"],\n extras_require=extras_require,\n entry_points={\n \"console_scripts\": [\n \"vyper=vyper.cli.vyper_compile:_parse_cli_args\",\n \"vyper-serve=vyper.cli.vyper_serve:_parse_cli_args\",\n \"vyper-lll=vyper.cli.vyper_lll:_parse_cli_args\",\n \"vyper-json=vyper.cli.vyper_json:_parse_cli_args\",\n ]\n },\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n data_files=[(\"\", [hash_file_rel_path])],\n)\n", "path": "setup.py"}]} | 1,767 | 179 |
gh_patches_debug_10810 | rasdani/github-patches | git_diff | sunpy__sunpy-4430 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong plot in goes hek m25 example
<!--
We know asking good questions takes effort, and we appreciate your time.
Thank you.
Please be aware that everyone has to follow our code of conduct:
https://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst
Also that these comments are hidden when you submit this github issue.
Please have a search on our GitHub repository to see if a similar issue has already been posted.
If a similar issue is closed, have a quick look to see if you are satisfied by the resolution.
If not please go ahead and open an issue!
-->
### Description
<!-- Provide a general description of the bug. -->
The plot in timeseries example gallery of sunpy for goes and hek flare data looks very different when same code is run on master.
### Expected behavior
<!-- What did you expect to happen. -->
It should look like this
https://docs.sunpy.org/en/stable/_images/sphx_glr_goes_hek_m25_001.png
### Actual behavior
<!--
What actually happened.
Was the output confusing or poorly described?
-->

### Steps to Reproduce
<!--
Please include **code** that reproduces the issue whenever possible.
The best reproductions are self-contained scripts with minimal dependencies.
-->
https://docs.sunpy.org/en/stable/generated/gallery/time_series/goes_hek_m25.html#sphx-glr-generated-gallery-time-series-goes-hek-m25-py run this example.
### System Details
<!--
We at least need to know the sunpy version you are using.
We provide a short function (``sunpy.util.system_info()``) that will provide most of the below information.
This step is optional but strongly recommended.
-->
- SunPy Version: master, 2.0.1, 2.0.rc2 all are giving error
- Astropy Version: 4.0.1.post1
- Python Version: 3.8.5, 3.7.5 both were giving error
- OS information: Ubuntu 18.04 LTS
</issue>
<code>
[start of examples/time_series/goes_hek_m25.py]
1 """
2 ==============================
3 Flare times on a GOES XRS plot
4 ==============================
5
6 How to plot flare times as provided by the HEK on a GOES XRS plot.
7 """
8 import matplotlib.pyplot as plt
9
10 from sunpy.net import Fido
11 from sunpy.net import attrs as a
12 from sunpy.net import hek
13 from sunpy.time import TimeRange, parse_time
14 from sunpy.timeseries import TimeSeries
15
16 ###############################################################################
17 # Let's first grab GOES XRS data for a particular time of interest
18 tr = TimeRange(['2011-06-07 04:00', '2011-06-07 12:00'])
19 results = Fido.search(a.Time(tr), a.Instrument.xrs)
20
21 ###############################################################################
22 # Then download the data and load it into a TimeSeries
23 files = Fido.fetch(results)
24 goes = TimeSeries(files)
25
26 ###############################################################################
27 # Next lets grab the HEK flare data for this time from the NOAA Space Weather
28 # Prediction Center (SWPC)
29 client = hek.HEKClient()
30 flares_hek = client.search(hek.attrs.Time(tr.start, tr.end),
31 hek.attrs.FL, hek.attrs.FRM.Name == 'SWPC')
32
33 ###############################################################################
34 # Lets plot everything together
35 fig, ax = plt.subplots()
36 goes.plot()
37 ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).plot_date)
38 ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).plot_date,
39 parse_time(flares_hek[0].get('event_endtime')).plot_date,
40 alpha=0.2, label=flares_hek[0].get('fl_goescls'))
41 ax.legend(loc=2)
42 ax.set_yscale('log')
43 plt.show()
44
[end of examples/time_series/goes_hek_m25.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/time_series/goes_hek_m25.py b/examples/time_series/goes_hek_m25.py
--- a/examples/time_series/goes_hek_m25.py
+++ b/examples/time_series/goes_hek_m25.py
@@ -34,9 +34,9 @@
# Lets plot everything together
fig, ax = plt.subplots()
goes.plot()
-ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).plot_date)
-ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).plot_date,
- parse_time(flares_hek[0].get('event_endtime')).plot_date,
+ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).datetime)
+ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).datetime,
+ parse_time(flares_hek[0].get('event_endtime')).datetime,
alpha=0.2, label=flares_hek[0].get('fl_goescls'))
ax.legend(loc=2)
ax.set_yscale('log')
| {"golden_diff": "diff --git a/examples/time_series/goes_hek_m25.py b/examples/time_series/goes_hek_m25.py\n--- a/examples/time_series/goes_hek_m25.py\n+++ b/examples/time_series/goes_hek_m25.py\n@@ -34,9 +34,9 @@\n # Lets plot everything together\n fig, ax = plt.subplots()\n goes.plot()\n-ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).plot_date)\n-ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).plot_date,\n- parse_time(flares_hek[0].get('event_endtime')).plot_date,\n+ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).datetime)\n+ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).datetime,\n+ parse_time(flares_hek[0].get('event_endtime')).datetime,\n alpha=0.2, label=flares_hek[0].get('fl_goescls'))\n ax.legend(loc=2)\n ax.set_yscale('log')\n", "issue": "Wrong plot in goes hek m25 example\n<!--\r\nWe know asking good questions takes effort, and we appreciate your time.\r\nThank you.\r\n\r\nPlease be aware that everyone has to follow our code of conduct:\r\nhttps://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst\r\n\r\nAlso that these comments are hidden when you submit this github issue.\r\n\r\nPlease have a search on our GitHub repository to see if a similar issue has already been posted.\r\nIf a similar issue is closed, have a quick look to see if you are satisfied by the resolution.\r\nIf not please go ahead and open an issue!\r\n-->\r\n\r\n### Description\r\n<!-- Provide a general description of the bug. -->\r\nThe plot in timeseries example gallery of sunpy for goes and hek flare data looks very different when same code is run on master.\r\n\r\n### Expected behavior\r\n<!-- What did you expect to happen. -->\r\nIt should look like this\r\nhttps://docs.sunpy.org/en/stable/_images/sphx_glr_goes_hek_m25_001.png\r\n\r\n### Actual behavior\r\n<!--\r\nWhat actually happened.\r\nWas the output confusing or poorly described?\r\n-->\r\n\r\n### Steps to Reproduce\r\n<!--\r\nPlease include **code** that reproduces the issue whenever possible.\r\nThe best reproductions are self-contained scripts with minimal dependencies.\r\n-->\r\n\r\nhttps://docs.sunpy.org/en/stable/generated/gallery/time_series/goes_hek_m25.html#sphx-glr-generated-gallery-time-series-goes-hek-m25-py run this example.\r\n\r\n### System Details\r\n<!--\r\nWe at least need to know the sunpy version you are using.\r\nWe provide a short function (``sunpy.util.system_info()``) that will provide most of the below information.\r\nThis step is optional but strongly recommended.\r\n-->\r\n- SunPy Version: master, 2.0.1, 2.0.rc2 all are giving error\r\n- Astropy Version: 4.0.1.post1\r\n- Python Version: 3.8.5, 3.7.5 both were giving error\r\n- OS information: Ubuntu 18.04 LTS\r\n\n", "before_files": [{"content": "\"\"\"\n==============================\nFlare times on a GOES XRS plot\n==============================\n\nHow to plot flare times as provided by the HEK on a GOES XRS plot.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nfrom sunpy.net import Fido\nfrom sunpy.net import attrs as a\nfrom sunpy.net import hek\nfrom sunpy.time import TimeRange, parse_time\nfrom sunpy.timeseries import TimeSeries\n\n###############################################################################\n# Let's first grab GOES XRS data for a particular time of interest\ntr = TimeRange(['2011-06-07 04:00', '2011-06-07 12:00'])\nresults = Fido.search(a.Time(tr), a.Instrument.xrs)\n\n###############################################################################\n# Then download the data and load it into a TimeSeries\nfiles = Fido.fetch(results)\ngoes = TimeSeries(files)\n\n###############################################################################\n# Next lets grab the HEK flare data for this time from the NOAA Space Weather\n# Prediction Center (SWPC)\nclient = hek.HEKClient()\nflares_hek = client.search(hek.attrs.Time(tr.start, tr.end),\n hek.attrs.FL, hek.attrs.FRM.Name == 'SWPC')\n\n###############################################################################\n# Lets plot everything together\nfig, ax = plt.subplots()\ngoes.plot()\nax.axvline(parse_time(flares_hek[0].get('event_peaktime')).plot_date)\nax.axvspan(parse_time(flares_hek[0].get('event_starttime')).plot_date,\n parse_time(flares_hek[0].get('event_endtime')).plot_date,\n alpha=0.2, label=flares_hek[0].get('fl_goescls'))\nax.legend(loc=2)\nax.set_yscale('log')\nplt.show()\n", "path": "examples/time_series/goes_hek_m25.py"}]} | 1,523 | 256 |
gh_patches_debug_13115 | rasdani/github-patches | git_diff | TheAlgorithms__Python-10394 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve our test coverage
### Feature description
Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.
### How to find low-coverage files
Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage:
```
---------- coverage: platform linux, python 3.12.0-final-0 -----------
Name Stmts Miss Cover Missing
-----------------------------------------------------------------------------------------------------------
quantum/q_fourier_transform.py 30 30 0% 14-93
scripts/validate_solutions.py 54 54 0% 2-94
strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129
...
```
The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.
Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.
_**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.
### How to add doctests
A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:
```py
def add(a: int, b: int) -> int:
"""
Adds two non-negative numbers.
>>> add(1, 1)
2
>>> add(2, 5)
7
>>> add(1, 0)
1
>>> add(-1, -1)
Traceback (most recent last):
...
ValueError: Numbers must be non-negative
"""
```
For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).
Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.
_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_
</issue>
<code>
[start of bit_manipulation/missing_number.py]
1 def find_missing_number(nums: list[int]) -> int:
2 """
3 Finds the missing number in a list of consecutive integers.
4
5 Args:
6 nums: A list of integers.
7
8 Returns:
9 The missing number.
10
11 Example:
12 >>> find_missing_number([0, 1, 3, 4])
13 2
14 >>> find_missing_number([1, 3, 4, 5, 6])
15 2
16 >>> find_missing_number([6, 5, 4, 2, 1])
17 3
18 >>> find_missing_number([6, 1, 5, 3, 4])
19 2
20 """
21 low = min(nums)
22 high = max(nums)
23 missing_number = high
24
25 for i in range(low, high):
26 missing_number ^= i ^ nums[i - low]
27
28 return missing_number
29
[end of bit_manipulation/missing_number.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py
--- a/bit_manipulation/missing_number.py
+++ b/bit_manipulation/missing_number.py
@@ -11,6 +11,12 @@
Example:
>>> find_missing_number([0, 1, 3, 4])
2
+ >>> find_missing_number([4, 3, 1, 0])
+ 2
+ >>> find_missing_number([-4, -3, -1, 0])
+ -2
+ >>> find_missing_number([-2, 2, 1, 3, 0])
+ -1
>>> find_missing_number([1, 3, 4, 5, 6])
2
>>> find_missing_number([6, 5, 4, 2, 1])
@@ -26,3 +32,9 @@
missing_number ^= i ^ nums[i - low]
return missing_number
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
| {"golden_diff": "diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py\n--- a/bit_manipulation/missing_number.py\n+++ b/bit_manipulation/missing_number.py\n@@ -11,6 +11,12 @@\n Example:\n >>> find_missing_number([0, 1, 3, 4])\n 2\n+ >>> find_missing_number([4, 3, 1, 0])\n+ 2\n+ >>> find_missing_number([-4, -3, -1, 0])\n+ -2\n+ >>> find_missing_number([-2, 2, 1, 3, 0])\n+ -1\n >>> find_missing_number([1, 3, 4, 5, 6])\n 2\n >>> find_missing_number([6, 5, 4, 2, 1])\n@@ -26,3 +32,9 @@\n missing_number ^= i ^ nums[i - low]\n \n return missing_number\n+\n+\n+if __name__ == \"__main__\":\n+ import doctest\n+\n+ doctest.testmod()\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "def find_missing_number(nums: list[int]) -> int:\n \"\"\"\n Finds the missing number in a list of consecutive integers.\n\n Args:\n nums: A list of integers.\n\n Returns:\n The missing number.\n\n Example:\n >>> find_missing_number([0, 1, 3, 4])\n 2\n >>> find_missing_number([1, 3, 4, 5, 6])\n 2\n >>> find_missing_number([6, 5, 4, 2, 1])\n 3\n >>> find_missing_number([6, 1, 5, 3, 4])\n 2\n \"\"\"\n low = min(nums)\n high = max(nums)\n missing_number = high\n\n for i in range(low, high):\n missing_number ^= i ^ nums[i - low]\n\n return missing_number\n", "path": "bit_manipulation/missing_number.py"}]} | 1,623 | 252 |
gh_patches_debug_18480 | rasdani/github-patches | git_diff | litestar-org__litestar-1718 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs: template autoescaping behavior
### Summary
I appreciate this framework having a built-in choice between Jinja and Mako. The documentation however makes no mention of a significant difference in the Litestar behavior between the two -- that using the Jinja engine will autoescape for you, whereas Mako will not.
</issue>
<code>
[start of litestar/contrib/mako.py]
1 from __future__ import annotations
2
3 from functools import partial
4 from typing import TYPE_CHECKING, Any, Callable
5
6 from litestar.exceptions import MissingDependencyException, TemplateNotFoundException
7 from litestar.template.base import (
8 TemplateEngineProtocol,
9 TemplateProtocol,
10 csrf_token,
11 url_for,
12 url_for_static_asset,
13 )
14
15 __all__ = ("MakoTemplate", "MakoTemplateEngine")
16
17
18 try:
19 import mako # noqa: F401
20 except ImportError as e:
21 raise MissingDependencyException("mako") from e
22
23
24 from mako.exceptions import TemplateLookupException as MakoTemplateNotFound
25 from mako.lookup import TemplateLookup
26
27 if TYPE_CHECKING:
28 from mako.template import Template as _MakoTemplate
29 from pydantic import DirectoryPath
30
31
32 class MakoTemplate(TemplateProtocol):
33 """Mako template, implementing ``TemplateProtocol``"""
34
35 def __init__(self, template: _MakoTemplate, template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]]):
36 """Initialize a template.
37
38 Args:
39 template: Base ``MakoTemplate`` used by the underlying mako-engine
40 template_callables: List of callables passed to the template
41 """
42 super().__init__()
43 self.template = template
44 self.template_callables = template_callables
45
46 def render(self, *args: Any, **kwargs: Any) -> str:
47 """Render a template.
48
49 Args:
50 args: Positional arguments passed to the engines ``render`` function
51 kwargs: Keyword arguments passed to the engines ``render`` function
52
53 Returns:
54 Rendered template as a string
55 """
56 for callable_key, template_callable in self.template_callables:
57 kwargs_copy = {**kwargs}
58 kwargs[callable_key] = partial(template_callable, kwargs_copy)
59
60 return str(self.template.render(*args, **kwargs))
61
62
63 class MakoTemplateEngine(TemplateEngineProtocol[MakoTemplate]):
64 """Mako based TemplateEngine."""
65
66 def __init__(self, directory: DirectoryPath | list[DirectoryPath]) -> None:
67 """Initialize template engine.
68
69 Args:
70 directory: Direct path or list of directory paths from which to serve templates.
71 """
72 super().__init__(directory=directory)
73 self.engine = TemplateLookup(directories=directory if isinstance(directory, (list, tuple)) else [directory])
74 self._template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]] = []
75 self.register_template_callable(key="url_for_static_asset", template_callable=url_for_static_asset) # type: ignore
76 self.register_template_callable(key="csrf_token", template_callable=csrf_token) # type: ignore
77 self.register_template_callable(key="url_for", template_callable=url_for) # type: ignore
78
79 def get_template(self, template_name: str) -> MakoTemplate:
80 """Retrieve a template by matching its name (dotted path) with files in the directory or directories provided.
81
82 Args:
83 template_name: A dotted path
84
85 Returns:
86 MakoTemplate instance
87
88 Raises:
89 TemplateNotFoundException: if no template is found.
90 """
91 try:
92 return MakoTemplate(
93 template=self.engine.get_template(template_name), template_callables=self._template_callables
94 )
95 except MakoTemplateNotFound as exc:
96 raise TemplateNotFoundException(template_name=template_name) from exc
97
98 def register_template_callable(self, key: str, template_callable: Callable[[dict[str, Any]], Any]) -> None:
99 """Register a callable on the template engine.
100
101 Args:
102 key: The callable key, i.e. the value to use inside the template to call the callable.
103 template_callable: A callable to register.
104
105 Returns:
106 None
107 """
108 self._template_callables.append((key, template_callable))
109
[end of litestar/contrib/mako.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/litestar/contrib/mako.py b/litestar/contrib/mako.py
--- a/litestar/contrib/mako.py
+++ b/litestar/contrib/mako.py
@@ -70,7 +70,9 @@
directory: Direct path or list of directory paths from which to serve templates.
"""
super().__init__(directory=directory)
- self.engine = TemplateLookup(directories=directory if isinstance(directory, (list, tuple)) else [directory])
+ self.engine = TemplateLookup(
+ directories=directory if isinstance(directory, (list, tuple)) else [directory], default_filters=["h"]
+ )
self._template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]] = []
self.register_template_callable(key="url_for_static_asset", template_callable=url_for_static_asset) # type: ignore
self.register_template_callable(key="csrf_token", template_callable=csrf_token) # type: ignore
| {"golden_diff": "diff --git a/litestar/contrib/mako.py b/litestar/contrib/mako.py\n--- a/litestar/contrib/mako.py\n+++ b/litestar/contrib/mako.py\n@@ -70,7 +70,9 @@\n directory: Direct path or list of directory paths from which to serve templates.\n \"\"\"\n super().__init__(directory=directory)\n- self.engine = TemplateLookup(directories=directory if isinstance(directory, (list, tuple)) else [directory])\n+ self.engine = TemplateLookup(\n+ directories=directory if isinstance(directory, (list, tuple)) else [directory], default_filters=[\"h\"]\n+ )\n self._template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]] = []\n self.register_template_callable(key=\"url_for_static_asset\", template_callable=url_for_static_asset) # type: ignore\n self.register_template_callable(key=\"csrf_token\", template_callable=csrf_token) # type: ignore\n", "issue": "Docs: template autoescaping behavior\n### Summary\r\n\r\nI appreciate this framework having a built-in choice between Jinja and Mako. The documentation however makes no mention of a significant difference in the Litestar behavior between the two -- that using the Jinja engine will autoescape for you, whereas Mako will not. \n", "before_files": [{"content": "from __future__ import annotations\n\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Callable\n\nfrom litestar.exceptions import MissingDependencyException, TemplateNotFoundException\nfrom litestar.template.base import (\n TemplateEngineProtocol,\n TemplateProtocol,\n csrf_token,\n url_for,\n url_for_static_asset,\n)\n\n__all__ = (\"MakoTemplate\", \"MakoTemplateEngine\")\n\n\ntry:\n import mako # noqa: F401\nexcept ImportError as e:\n raise MissingDependencyException(\"mako\") from e\n\n\nfrom mako.exceptions import TemplateLookupException as MakoTemplateNotFound\nfrom mako.lookup import TemplateLookup\n\nif TYPE_CHECKING:\n from mako.template import Template as _MakoTemplate\n from pydantic import DirectoryPath\n\n\nclass MakoTemplate(TemplateProtocol):\n \"\"\"Mako template, implementing ``TemplateProtocol``\"\"\"\n\n def __init__(self, template: _MakoTemplate, template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]]):\n \"\"\"Initialize a template.\n\n Args:\n template: Base ``MakoTemplate`` used by the underlying mako-engine\n template_callables: List of callables passed to the template\n \"\"\"\n super().__init__()\n self.template = template\n self.template_callables = template_callables\n\n def render(self, *args: Any, **kwargs: Any) -> str:\n \"\"\"Render a template.\n\n Args:\n args: Positional arguments passed to the engines ``render`` function\n kwargs: Keyword arguments passed to the engines ``render`` function\n\n Returns:\n Rendered template as a string\n \"\"\"\n for callable_key, template_callable in self.template_callables:\n kwargs_copy = {**kwargs}\n kwargs[callable_key] = partial(template_callable, kwargs_copy)\n\n return str(self.template.render(*args, **kwargs))\n\n\nclass MakoTemplateEngine(TemplateEngineProtocol[MakoTemplate]):\n \"\"\"Mako based TemplateEngine.\"\"\"\n\n def __init__(self, directory: DirectoryPath | list[DirectoryPath]) -> None:\n \"\"\"Initialize template engine.\n\n Args:\n directory: Direct path or list of directory paths from which to serve templates.\n \"\"\"\n super().__init__(directory=directory)\n self.engine = TemplateLookup(directories=directory if isinstance(directory, (list, tuple)) else [directory])\n self._template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]] = []\n self.register_template_callable(key=\"url_for_static_asset\", template_callable=url_for_static_asset) # type: ignore\n self.register_template_callable(key=\"csrf_token\", template_callable=csrf_token) # type: ignore\n self.register_template_callable(key=\"url_for\", template_callable=url_for) # type: ignore\n\n def get_template(self, template_name: str) -> MakoTemplate:\n \"\"\"Retrieve a template by matching its name (dotted path) with files in the directory or directories provided.\n\n Args:\n template_name: A dotted path\n\n Returns:\n MakoTemplate instance\n\n Raises:\n TemplateNotFoundException: if no template is found.\n \"\"\"\n try:\n return MakoTemplate(\n template=self.engine.get_template(template_name), template_callables=self._template_callables\n )\n except MakoTemplateNotFound as exc:\n raise TemplateNotFoundException(template_name=template_name) from exc\n\n def register_template_callable(self, key: str, template_callable: Callable[[dict[str, Any]], Any]) -> None:\n \"\"\"Register a callable on the template engine.\n\n Args:\n key: The callable key, i.e. the value to use inside the template to call the callable.\n template_callable: A callable to register.\n\n Returns:\n None\n \"\"\"\n self._template_callables.append((key, template_callable))\n", "path": "litestar/contrib/mako.py"}]} | 1,654 | 214 |
gh_patches_debug_27060 | rasdani/github-patches | git_diff | svthalia__concrexit-1399 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Documents page broken
Steps to reproduce:
1. Go to https://thalia.nu/association/documents/
Sentry Issue: [CONCREXIT-4E](https://sentry.io/organizations/thalia/issues/2020405926/?referrer=github_integration)
```
FieldError: Cannot resolve keyword 'name_en' into field. Choices are: annualdocument, category, created, event, eventdocument, file, generalmeeting, id, last_updated, members_only, minutes, name
(5 additional frame(s) were not displayed)
...
File "documents/views.py", line 54, in get_context_data
"association_documents": AssociationDocument.objects.order_by(
File "django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "django/db/models/query.py", line 1134, in order_by
obj.query.add_ordering(*field_names)
File "django/db/models/sql/query.py", line 1919, in add_ordering
self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)
File "django/db/models/sql/query.py", line 1481, in names_to_path
raise FieldError("Cannot resolve keyword '%s' into field. "
```
</issue>
<code>
[start of website/documents/views.py]
1 """Views provided by the documents package"""
2 import os
3
4 from django.conf import settings
5 from django.core.exceptions import PermissionDenied
6 from django.http import Http404, HttpResponse
7 from django.shortcuts import redirect
8 from django.utils import timezone
9 from django.utils.text import slugify
10 from django.utils.translation import get_language
11 from django.views.generic import TemplateView, DetailView
12 from django_sendfile import sendfile
13
14 from documents.models import (
15 AnnualDocument,
16 AssociationDocument,
17 GeneralMeeting,
18 Document,
19 )
20 from utils.snippets import datetime_to_lectureyear
21
22
23 class DocumentsIndexView(TemplateView):
24 """
25 View that renders the documents index page
26 """
27
28 template_name = "documents/index.html"
29
30 def get_context_data(self, **kwargs) -> dict:
31 lecture_year = datetime_to_lectureyear(timezone.now())
32
33 years = {x: {} for x in reversed(range(1990, lecture_year + 1))}
34 for year in years:
35 years[year] = {
36 "documents": {"policy": None, "report": None, "financial": None},
37 "general_meetings": [],
38 }
39
40 for document in AnnualDocument.objects.filter(subcategory="policy"):
41 years[document.year]["documents"]["policy"] = document
42 for document in AnnualDocument.objects.filter(subcategory="report"):
43 years[document.year]["documents"]["report"] = document
44 for document in AnnualDocument.objects.filter(subcategory="financial"):
45 years[document.year]["documents"]["financial"] = document
46
47 for obj in GeneralMeeting.objects.all():
48 meeting_year = datetime_to_lectureyear(obj.datetime)
49 years[meeting_year]["general_meetings"].append(obj)
50
51 context = super().get_context_data(**kwargs)
52 context.update(
53 {
54 "association_documents": AssociationDocument.objects.order_by(
55 f"name_{get_language()}"
56 ).all(),
57 "years": list(years.items()),
58 }
59 )
60 return context
61
62
63 class DocumentDownloadView(DetailView):
64 """
65 View that allows you to download a specific document based on it's and your
66 permissions settings
67 """
68
69 model = Document
70
71 def get(self, request, *args, **kwargs) -> HttpResponse:
72 """
73 :return: either a 302 redirect to the login page or
74 a 200 with the document
75 """
76 response = super().get(request, *args, **kwargs)
77 document = response.context_data["document"]
78
79 if document.members_only and not request.user.is_authenticated:
80 return redirect("{}?next={}".format(settings.LOGIN_URL, request.path))
81 if document.members_only and not request.member.has_active_membership():
82 raise PermissionDenied
83
84 lang = request.GET.get("language")
85 try:
86 if lang == "en":
87 file = document.file_en
88 else: # Fall back on language detection
89 file = document.file
90 except ValueError as e:
91 raise Http404("This document does not exist.") from e
92
93 ext = os.path.splitext(file.path)[1]
94
95 return sendfile(
96 request,
97 file.path,
98 attachment=True,
99 attachment_filename=slugify(document.name) + ext,
100 )
101
[end of website/documents/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/documents/views.py b/website/documents/views.py
--- a/website/documents/views.py
+++ b/website/documents/views.py
@@ -7,7 +7,6 @@
from django.shortcuts import redirect
from django.utils import timezone
from django.utils.text import slugify
-from django.utils.translation import get_language
from django.views.generic import TemplateView, DetailView
from django_sendfile import sendfile
@@ -52,7 +51,7 @@
context.update(
{
"association_documents": AssociationDocument.objects.order_by(
- f"name_{get_language()}"
+ "name"
).all(),
"years": list(years.items()),
}
@@ -81,12 +80,8 @@
if document.members_only and not request.member.has_active_membership():
raise PermissionDenied
- lang = request.GET.get("language")
try:
- if lang == "en":
- file = document.file_en
- else: # Fall back on language detection
- file = document.file
+ file = document.file
except ValueError as e:
raise Http404("This document does not exist.") from e
| {"golden_diff": "diff --git a/website/documents/views.py b/website/documents/views.py\n--- a/website/documents/views.py\n+++ b/website/documents/views.py\n@@ -7,7 +7,6 @@\n from django.shortcuts import redirect\n from django.utils import timezone\n from django.utils.text import slugify\n-from django.utils.translation import get_language\n from django.views.generic import TemplateView, DetailView\n from django_sendfile import sendfile\n \n@@ -52,7 +51,7 @@\n context.update(\n {\n \"association_documents\": AssociationDocument.objects.order_by(\n- f\"name_{get_language()}\"\n+ \"name\"\n ).all(),\n \"years\": list(years.items()),\n }\n@@ -81,12 +80,8 @@\n if document.members_only and not request.member.has_active_membership():\n raise PermissionDenied\n \n- lang = request.GET.get(\"language\")\n try:\n- if lang == \"en\":\n- file = document.file_en\n- else: # Fall back on language detection\n- file = document.file\n+ file = document.file\n except ValueError as e:\n raise Http404(\"This document does not exist.\") from e\n", "issue": "Documents page broken\nSteps to reproduce:\n1. Go to https://thalia.nu/association/documents/\n\n\nSentry Issue: [CONCREXIT-4E](https://sentry.io/organizations/thalia/issues/2020405926/?referrer=github_integration)\n\n```\nFieldError: Cannot resolve keyword 'name_en' into field. Choices are: annualdocument, category, created, event, eventdocument, file, generalmeeting, id, last_updated, members_only, minutes, name\n(5 additional frame(s) were not displayed)\n...\n File \"documents/views.py\", line 54, in get_context_data\n \"association_documents\": AssociationDocument.objects.order_by(\n File \"django/db/models/manager.py\", line 85, in manager_method\n return getattr(self.get_queryset(), name)(*args, **kwargs)\n File \"django/db/models/query.py\", line 1134, in order_by\n obj.query.add_ordering(*field_names)\n File \"django/db/models/sql/query.py\", line 1919, in add_ordering\n self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)\n File \"django/db/models/sql/query.py\", line 1481, in names_to_path\n raise FieldError(\"Cannot resolve keyword '%s' into field. \"\n```\n", "before_files": [{"content": "\"\"\"Views provided by the documents package\"\"\"\nimport os\n\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nfrom django.utils.text import slugify\nfrom django.utils.translation import get_language\nfrom django.views.generic import TemplateView, DetailView\nfrom django_sendfile import sendfile\n\nfrom documents.models import (\n AnnualDocument,\n AssociationDocument,\n GeneralMeeting,\n Document,\n)\nfrom utils.snippets import datetime_to_lectureyear\n\n\nclass DocumentsIndexView(TemplateView):\n \"\"\"\n View that renders the documents index page\n \"\"\"\n\n template_name = \"documents/index.html\"\n\n def get_context_data(self, **kwargs) -> dict:\n lecture_year = datetime_to_lectureyear(timezone.now())\n\n years = {x: {} for x in reversed(range(1990, lecture_year + 1))}\n for year in years:\n years[year] = {\n \"documents\": {\"policy\": None, \"report\": None, \"financial\": None},\n \"general_meetings\": [],\n }\n\n for document in AnnualDocument.objects.filter(subcategory=\"policy\"):\n years[document.year][\"documents\"][\"policy\"] = document\n for document in AnnualDocument.objects.filter(subcategory=\"report\"):\n years[document.year][\"documents\"][\"report\"] = document\n for document in AnnualDocument.objects.filter(subcategory=\"financial\"):\n years[document.year][\"documents\"][\"financial\"] = document\n\n for obj in GeneralMeeting.objects.all():\n meeting_year = datetime_to_lectureyear(obj.datetime)\n years[meeting_year][\"general_meetings\"].append(obj)\n\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"association_documents\": AssociationDocument.objects.order_by(\n f\"name_{get_language()}\"\n ).all(),\n \"years\": list(years.items()),\n }\n )\n return context\n\n\nclass DocumentDownloadView(DetailView):\n \"\"\"\n View that allows you to download a specific document based on it's and your\n permissions settings\n \"\"\"\n\n model = Document\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n \"\"\"\n :return: either a 302 redirect to the login page or\n a 200 with the document\n \"\"\"\n response = super().get(request, *args, **kwargs)\n document = response.context_data[\"document\"]\n\n if document.members_only and not request.user.is_authenticated:\n return redirect(\"{}?next={}\".format(settings.LOGIN_URL, request.path))\n if document.members_only and not request.member.has_active_membership():\n raise PermissionDenied\n\n lang = request.GET.get(\"language\")\n try:\n if lang == \"en\":\n file = document.file_en\n else: # Fall back on language detection\n file = document.file\n except ValueError as e:\n raise Http404(\"This document does not exist.\") from e\n\n ext = os.path.splitext(file.path)[1]\n\n return sendfile(\n request,\n file.path,\n attachment=True,\n attachment_filename=slugify(document.name) + ext,\n )\n", "path": "website/documents/views.py"}]} | 1,703 | 256 |
gh_patches_debug_17927 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2583 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installation issues when running on mac (silicon)
## Description
I'm facing these issues while trying to install using the latest `install.sh` script from master. I'm running 'macOS Big Sur', on a mac with Apple M1 processor.
1. Error thrown, while the line `sudo docker compose --profile prod up -d --wait` runs in the install script. Any command with `sudo docker` throws an error.
```
2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root
2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root
2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root
2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root
[+] Running 0/0
⠋ watchtower Pulling 0.1s
⠋ db Pulling 0.1s
⠋ caddy-reverse-proxy Pulling 0.1s
⠋ service Pulling 0.1s
error getting credentials - err: exit status 1, out: ``
```
This is because docker cannot run as root (or with sudo privileges) in mac.
If possible, we should avoid `sudo` generally, while running on a mac.
2. The images don't run after downloading because the platforms do not match.
```
⠙ caddy-reverse-proxy The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested 0.0s
⠿ Container mathesar_service Waiting 19.1s
⠏ service The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested 0.0s
container for service "service" exited (3)
```
We should be publishing an arm image along with the existing amd image. I sent a mail regarding this.
3. Installation fails because wget is not installed by default. We need to check if it is present during installation.
4. Startup (i.e. `docker compose --profile prod up -d --wait`) fails because `SECRET_KEY` in `.env` file is empty.
</issue>
<code>
[start of db/install.py]
1 from sqlalchemy import text
2 from sqlalchemy.exc import OperationalError
3
4 from db import engine
5 from db.types import install
6
7
8 def install_mathesar(
9 database_name, username, password, hostname, port, skip_confirm
10 ):
11 """Create database and install Mathesar on it."""
12 user_db_engine = engine.create_future_engine(
13 username, password, hostname, database_name, port
14 )
15 try:
16 user_db_engine.connect()
17 print(f"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...")
18 install.install_mathesar_on_database(user_db_engine)
19 user_db_engine.dispose()
20 except OperationalError:
21 database_created = _create_database(
22 database_name=database_name,
23 hostname=hostname,
24 username=username,
25 password=password,
26 port=port,
27 skip_confirm=skip_confirm
28 )
29 if database_created:
30 print(f"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...")
31 install.install_mathesar_on_database(user_db_engine)
32 user_db_engine.dispose()
33 else:
34 print(f"Skipping installing on DB with key {database_name}.")
35
36
37 def _create_database(database_name, hostname, username, password, port, skip_confirm=True):
38 if skip_confirm is True:
39 create_database = "y"
40 else:
41 create_database = input(
42 f"Create a new Database called {database_name}? (y/n) > "
43 )
44 if create_database.lower() in ["y", "yes"]:
45 # We need to connect to an existing database inorder to create a new Database.
46 # So we use the default Database `postgres` that comes with postgres.
47 # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)
48 root_database = "postgres"
49 root_db_engine = engine.create_future_engine(
50 username, password, hostname, root_database, port,
51 )
52 with root_db_engine.connect() as conn:
53 conn.execution_options(isolation_level="AUTOCOMMIT")
54 conn.execute(text(f"CREATE DATABASE {database_name}"))
55 root_db_engine.dispose()
56 print(f"Created DB is {database_name}.")
57 return True
58 else:
59 print(f"Database {database_name} not created!")
60 return False
61
[end of db/install.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/db/install.py b/db/install.py
--- a/db/install.py
+++ b/db/install.py
@@ -10,7 +10,8 @@
):
"""Create database and install Mathesar on it."""
user_db_engine = engine.create_future_engine(
- username, password, hostname, database_name, port
+ username, password, hostname, database_name, port,
+ connect_args={"connect_timeout": 10}
)
try:
user_db_engine.connect()
@@ -48,6 +49,7 @@
root_database = "postgres"
root_db_engine = engine.create_future_engine(
username, password, hostname, root_database, port,
+ connect_args={"connect_timeout": 10}
)
with root_db_engine.connect() as conn:
conn.execution_options(isolation_level="AUTOCOMMIT")
| {"golden_diff": "diff --git a/db/install.py b/db/install.py\n--- a/db/install.py\n+++ b/db/install.py\n@@ -10,7 +10,8 @@\n ):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n- username, password, hostname, database_name, port\n+ username, password, hostname, database_name, port,\n+ connect_args={\"connect_timeout\": 10}\n )\n try:\n user_db_engine.connect()\n@@ -48,6 +49,7 @@\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n+ connect_args={\"connect_timeout\": 10}\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n", "issue": "Installation issues when running on mac (silicon)\n## Description\r\nI'm facing these issues while trying to install using the latest `install.sh` script from master. I'm running 'macOS Big Sur', on a mac with Apple M1 processor.\r\n\r\n1. Error thrown, while the line `sudo docker compose --profile prod up -d --wait` runs in the install script. Any command with `sudo docker` throws an error.\r\n\r\n ```\r\n 2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root\r\n 2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root\r\n 2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root\r\n 2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root\r\n [+] Running 0/0\r\n \u280b watchtower Pulling 0.1s\r\n \u280b db Pulling 0.1s\r\n \u280b caddy-reverse-proxy Pulling 0.1s\r\n \u280b service Pulling 0.1s\r\n error getting credentials - err: exit status 1, out: ``\r\n ``` \r\n This is because docker cannot run as root (or with sudo privileges) in mac.\r\n If possible, we should avoid `sudo` generally, while running on a mac.\r\n\r\n2. The images don't run after downloading because the platforms do not match.\r\n ```\r\n \u2819 caddy-reverse-proxy The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested 0.0s\r\n \u283f Container mathesar_service Waiting 19.1s\r\n \u280f service The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested 0.0s\r\n container for service \"service\" exited (3)\r\n ```\r\n We should be publishing an arm image along with the existing amd image. I sent a mail regarding this.\r\n\r\n3. Installation fails because wget is not installed by default. We need to check if it is present during installation.\r\n\r\n4. Startup (i.e. `docker compose --profile prod up -d --wait`) fails because `SECRET_KEY` in `.env` file is empty.\n", "before_files": [{"content": "from sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db import engine\nfrom db.types import install\n\n\ndef install_mathesar(\n database_name, username, password, hostname, port, skip_confirm\n):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n username, password, hostname, database_name, port\n )\n try:\n user_db_engine.connect()\n print(f\"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...\")\n install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n except OperationalError:\n database_created = _create_database(\n database_name=database_name,\n hostname=hostname,\n username=username,\n password=password,\n port=port,\n skip_confirm=skip_confirm\n )\n if database_created:\n print(f\"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...\")\n install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n else:\n print(f\"Skipping installing on DB with key {database_name}.\")\n\n\ndef _create_database(database_name, hostname, username, password, port, skip_confirm=True):\n if skip_confirm is True:\n create_database = \"y\"\n else:\n create_database = input(\n f\"Create a new Database called {database_name}? (y/n) > \"\n )\n if create_database.lower() in [\"y\", \"yes\"]:\n # We need to connect to an existing database inorder to create a new Database.\n # So we use the default Database `postgres` that comes with postgres.\n # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"CREATE DATABASE {database_name}\"))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "path": "db/install.py"}]} | 1,714 | 186 |
gh_patches_debug_31573 | rasdani/github-patches | git_diff | litestar-org__litestar-1483 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of starlite/cli/commands/core.py]
1 import inspect
2
3 from click import command, option
4 from rich.tree import Tree
5
6 from starlite import HTTPRoute, Starlite, WebSocketRoute
7 from starlite.cli.utils import StarliteCLIException, StarliteEnv, console, show_app_info
8 from starlite.utils.helpers import unwrap_partial
9
10
11 @command(name="info")
12 def info_command(app: Starlite) -> None:
13 """Show information about the detected Starlite app."""
14
15 show_app_info(app)
16
17
18 @command(name="run")
19 @option("-r", "--reload", help="Reload server on changes", default=False, is_flag=True)
20 @option("-p", "--port", help="Serve under this port", type=int, default=8000, show_default=True)
21 @option("--host", help="Server under this host", default="127.0.0.1", show_default=True)
22 @option("--debug", help="Run app in debug mode", is_flag=True)
23 def run_command(
24 reload: bool,
25 port: int,
26 host: str,
27 debug: bool,
28 env: StarliteEnv,
29 app: Starlite,
30 ) -> None:
31 """Run a Starlite app.
32
33 The app can be either passed as a module path in the form of <module name>.<submodule>:<app instance or factory>,
34 set as an environment variable STARLITE_APP with the same format or automatically discovered from one of these
35 canonical paths: app.py, asgi.py, application.py or app/__init__.py. When autodiscovering application factories,
36 functions with the name ``create_app`` are considered, or functions that are annotated as returning a ``Starlite``
37 instance.
38 """
39
40 try:
41 import uvicorn
42 except ImportError:
43 raise StarliteCLIException("Uvicorn needs to be installed to run an app") # pylint: disable=W0707
44
45 if debug or env.debug:
46 app.debug = True
47
48 show_app_info(app)
49
50 console.rule("[yellow]Starting server process", align="left")
51
52 uvicorn.run(
53 env.app_path,
54 reload=env.reload or reload,
55 host=env.host or host,
56 port=env.port or port,
57 factory=env.is_app_factory,
58 )
59
60
61 @command(name="routes")
62 def routes_command(app: Starlite) -> None: # pragma: no cover
63 """Display information about the application's routes."""
64
65 tree = Tree("", hide_root=True)
66
67 for route in sorted(app.routes, key=lambda r: r.path):
68 if isinstance(route, HTTPRoute):
69 branch = tree.add(f"[green]{route.path}[/green] (HTTP)")
70 for handler in route.route_handlers:
71 handler_info = [
72 f"[blue]{handler.name or handler.handler_name}[/blue]",
73 ]
74
75 if inspect.iscoroutinefunction(unwrap_partial(handler.fn.value)):
76 handler_info.append("[magenta]async[/magenta]")
77 else:
78 handler_info.append("[yellow]sync[/yellow]")
79
80 handler_info.append(f'[cyan]{", ".join(sorted(handler.http_methods))}[/cyan]')
81
82 if len(handler.paths) > 1:
83 for path in handler.paths:
84 branch.add(" ".join([f"[green]{path}[green]", *handler_info]))
85 else:
86 branch.add(" ".join(handler_info))
87
88 else:
89 if isinstance(route, WebSocketRoute):
90 route_type = "WS"
91 else:
92 route_type = "ASGI"
93 branch = tree.add(f"[green]{route.path}[/green] ({route_type})")
94 branch.add(f"[blue]{route.route_handler.name or route.route_handler.handler_name}[/blue]")
95
96 console.print(tree)
97
[end of starlite/cli/commands/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlite/cli/commands/core.py b/starlite/cli/commands/core.py
--- a/starlite/cli/commands/core.py
+++ b/starlite/cli/commands/core.py
@@ -1,4 +1,6 @@
import inspect
+import subprocess
+from typing import Any, Dict, List
from click import command, option
from rich.tree import Tree
@@ -8,6 +10,18 @@
from starlite.utils.helpers import unwrap_partial
+def _convert_uvicorn_args(args: Dict[str, Any]) -> List[str]:
+ process_args = []
+ for arg, value in args.items():
+ if isinstance(value, bool):
+ if value:
+ process_args.append(f"--{arg}")
+ else:
+ process_args.append(f"--{arg}={value}")
+
+ return process_args
+
+
@command(name="info")
def info_command(app: Starlite) -> None:
"""Show information about the detected Starlite app."""
@@ -38,24 +52,24 @@
"""
try:
- import uvicorn
+ import uvicorn # noqa: F401
except ImportError:
raise StarliteCLIException("Uvicorn needs to be installed to run an app") # pylint: disable=W0707
if debug or env.debug:
app.debug = True
- show_app_info(app)
+ # invoke uvicorn in a subprocess to be able to use the --reload flag. see
+ # https://github.com/litestar-org/litestar/issues/1191 and https://github.com/encode/uvicorn/issues/1045
- console.rule("[yellow]Starting server process", align="left")
+ process_args = {
+ "reload": env.reload or reload,
+ "host": env.host or host,
+ "port": env.port or port,
+ "factory": env.is_app_factory,
+ }
- uvicorn.run(
- env.app_path,
- reload=env.reload or reload,
- host=env.host or host,
- port=env.port or port,
- factory=env.is_app_factory,
- )
+ subprocess.run(["uvicorn", env.app_path, *_convert_uvicorn_args(process_args)], check=True)
@command(name="routes")
| {"golden_diff": "diff --git a/starlite/cli/commands/core.py b/starlite/cli/commands/core.py\n--- a/starlite/cli/commands/core.py\n+++ b/starlite/cli/commands/core.py\n@@ -1,4 +1,6 @@\n import inspect\n+import subprocess\n+from typing import Any, Dict, List\n \n from click import command, option\n from rich.tree import Tree\n@@ -8,6 +10,18 @@\n from starlite.utils.helpers import unwrap_partial\n \n \n+def _convert_uvicorn_args(args: Dict[str, Any]) -> List[str]:\n+ process_args = []\n+ for arg, value in args.items():\n+ if isinstance(value, bool):\n+ if value:\n+ process_args.append(f\"--{arg}\")\n+ else:\n+ process_args.append(f\"--{arg}={value}\")\n+\n+ return process_args\n+\n+\n @command(name=\"info\")\n def info_command(app: Starlite) -> None:\n \"\"\"Show information about the detected Starlite app.\"\"\"\n@@ -38,24 +52,24 @@\n \"\"\"\n \n try:\n- import uvicorn\n+ import uvicorn # noqa: F401\n except ImportError:\n raise StarliteCLIException(\"Uvicorn needs to be installed to run an app\") # pylint: disable=W0707\n \n if debug or env.debug:\n app.debug = True\n \n- show_app_info(app)\n+ # invoke uvicorn in a subprocess to be able to use the --reload flag. see\n+ # https://github.com/litestar-org/litestar/issues/1191 and https://github.com/encode/uvicorn/issues/1045\n \n- console.rule(\"[yellow]Starting server process\", align=\"left\")\n+ process_args = {\n+ \"reload\": env.reload or reload,\n+ \"host\": env.host or host,\n+ \"port\": env.port or port,\n+ \"factory\": env.is_app_factory,\n+ }\n \n- uvicorn.run(\n- env.app_path,\n- reload=env.reload or reload,\n- host=env.host or host,\n- port=env.port or port,\n- factory=env.is_app_factory,\n- )\n+ subprocess.run([\"uvicorn\", env.app_path, *_convert_uvicorn_args(process_args)], check=True)\n \n \n @command(name=\"routes\")\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "import inspect\n\nfrom click import command, option\nfrom rich.tree import Tree\n\nfrom starlite import HTTPRoute, Starlite, WebSocketRoute\nfrom starlite.cli.utils import StarliteCLIException, StarliteEnv, console, show_app_info\nfrom starlite.utils.helpers import unwrap_partial\n\n\n@command(name=\"info\")\ndef info_command(app: Starlite) -> None:\n \"\"\"Show information about the detected Starlite app.\"\"\"\n\n show_app_info(app)\n\n\n@command(name=\"run\")\n@option(\"-r\", \"--reload\", help=\"Reload server on changes\", default=False, is_flag=True)\n@option(\"-p\", \"--port\", help=\"Serve under this port\", type=int, default=8000, show_default=True)\n@option(\"--host\", help=\"Server under this host\", default=\"127.0.0.1\", show_default=True)\n@option(\"--debug\", help=\"Run app in debug mode\", is_flag=True)\ndef run_command(\n reload: bool,\n port: int,\n host: str,\n debug: bool,\n env: StarliteEnv,\n app: Starlite,\n) -> None:\n \"\"\"Run a Starlite app.\n\n The app can be either passed as a module path in the form of <module name>.<submodule>:<app instance or factory>,\n set as an environment variable STARLITE_APP with the same format or automatically discovered from one of these\n canonical paths: app.py, asgi.py, application.py or app/__init__.py. When autodiscovering application factories,\n functions with the name ``create_app`` are considered, or functions that are annotated as returning a ``Starlite``\n instance.\n \"\"\"\n\n try:\n import uvicorn\n except ImportError:\n raise StarliteCLIException(\"Uvicorn needs to be installed to run an app\") # pylint: disable=W0707\n\n if debug or env.debug:\n app.debug = True\n\n show_app_info(app)\n\n console.rule(\"[yellow]Starting server process\", align=\"left\")\n\n uvicorn.run(\n env.app_path,\n reload=env.reload or reload,\n host=env.host or host,\n port=env.port or port,\n factory=env.is_app_factory,\n )\n\n\n@command(name=\"routes\")\ndef routes_command(app: Starlite) -> None: # pragma: no cover\n \"\"\"Display information about the application's routes.\"\"\"\n\n tree = Tree(\"\", hide_root=True)\n\n for route in sorted(app.routes, key=lambda r: r.path):\n if isinstance(route, HTTPRoute):\n branch = tree.add(f\"[green]{route.path}[/green] (HTTP)\")\n for handler in route.route_handlers:\n handler_info = [\n f\"[blue]{handler.name or handler.handler_name}[/blue]\",\n ]\n\n if inspect.iscoroutinefunction(unwrap_partial(handler.fn.value)):\n handler_info.append(\"[magenta]async[/magenta]\")\n else:\n handler_info.append(\"[yellow]sync[/yellow]\")\n\n handler_info.append(f'[cyan]{\", \".join(sorted(handler.http_methods))}[/cyan]')\n\n if len(handler.paths) > 1:\n for path in handler.paths:\n branch.add(\" \".join([f\"[green]{path}[green]\", *handler_info]))\n else:\n branch.add(\" \".join(handler_info))\n\n else:\n if isinstance(route, WebSocketRoute):\n route_type = \"WS\"\n else:\n route_type = \"ASGI\"\n branch = tree.add(f\"[green]{route.path}[/green] ({route_type})\")\n branch.add(f\"[blue]{route.route_handler.name or route.route_handler.handler_name}[/blue]\")\n\n console.print(tree)\n", "path": "starlite/cli/commands/core.py"}]} | 1,683 | 514 |
gh_patches_debug_67477 | rasdani/github-patches | git_diff | scverse__scanpy-721 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Give `external` higher billing in the docs?
At the moment external modules are kind of hidden in the docs. I think it'd be worth making them more visible (at least on the same page as everything else). I've been giving this a shot, but have hit the limit of my sphinx/ rst abilities.
Two ideas for how they could be more discoverable:
* They get their own heading under `api`
* They're mixed in with everything else (so everything stays organized by topic), but their names are prepended with `sce` while scanpy functions are prepended with `sc`.
</issue>
<code>
[start of scanpy/external/__init__.py]
1 from . import tl
2 from . import pl
3 from . import pp
4
5 from .. import _exporting as exporting
6
7 import sys
8 from .. import utils
9 utils.annotate_doc_types(sys.modules[__name__], 'scanpy')
10 del sys, utils
11
12
13 __doc__ = """\
14 External API
15 ============
16
17
18 Import Scanpy's wrappers to external tools as::
19
20 import scanpy.external as sce
21
22 Preprocessing: PP
23 ------------------
24
25 Batch effect correction
26 ~~~~~~~~~~~~~~~~~~~~~~~
27
28 .. autosummary::
29 :toctree: .
30
31 pp.bbknn
32 pp.mnn_correct
33
34 Imputation
35 ~~~~~~~~~~
36
37 Note that the fundamental limitations of imputation are still under `debate
38 <https://github.com/theislab/scanpy/issues/189>`__.
39
40 .. autosummary::
41 :toctree: .
42
43 pp.dca
44 pp.magic
45
46
47 Tools: TL
48 ----------
49
50 Embeddings
51 ~~~~~~~~~~
52
53 .. autosummary::
54 :toctree: .
55
56 tl.phate
57 tl.palantir
58
59 Clustering and trajectory inference
60 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61
62 .. autosummary::
63 :toctree: .
64
65 tl.phenograph
66
67 Gene scores, Cell cycle
68 ~~~~~~~~~~~~~~~~~~~~~~~
69
70 .. autosummary::
71 :toctree: .
72
73 tl.sandbag
74 tl.cyclone
75
76
77 Plotting: PL
78 ------------
79
80 .. autosummary::
81 :toctree: .
82
83 pl.phate
84 tl.palantir
85
86
87 Exporting
88 ---------
89
90 .. autosummary::
91 :toctree: .
92
93 exporting.spring_project
94 exporting.cellbrowser
95 """
96
[end of scanpy/external/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scanpy/external/__init__.py b/scanpy/external/__init__.py
--- a/scanpy/external/__init__.py
+++ b/scanpy/external/__init__.py
@@ -19,6 +19,8 @@
import scanpy.external as sce
+If you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_!
+
Preprocessing: PP
------------------
| {"golden_diff": "diff --git a/scanpy/external/__init__.py b/scanpy/external/__init__.py\n--- a/scanpy/external/__init__.py\n+++ b/scanpy/external/__init__.py\n@@ -19,6 +19,8 @@\n \n import scanpy.external as sce\n \n+If you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_!\n+\n Preprocessing: PP\n ------------------\n", "issue": "Give `external` higher billing in the docs?\nAt the moment external modules are kind of hidden in the docs. I think it'd be worth making them more visible (at least on the same page as everything else). I've been giving this a shot, but have hit the limit of my sphinx/ rst abilities.\r\n\r\nTwo ideas for how they could be more discoverable:\r\n\r\n* They get their own heading under `api`\r\n* They're mixed in with everything else (so everything stays organized by topic), but their names are prepended with `sce` while scanpy functions are prepended with `sc`.\n", "before_files": [{"content": "from . import tl\nfrom . import pl\nfrom . import pp\n\nfrom .. import _exporting as exporting\n\nimport sys\nfrom .. import utils\nutils.annotate_doc_types(sys.modules[__name__], 'scanpy')\ndel sys, utils\n\n\n__doc__ = \"\"\"\\\nExternal API\n============\n\n\nImport Scanpy's wrappers to external tools as::\n\n import scanpy.external as sce\n\nPreprocessing: PP\n------------------\n\nBatch effect correction\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n pp.bbknn\n pp.mnn_correct\n\nImputation\n~~~~~~~~~~\n\nNote that the fundamental limitations of imputation are still under `debate\n<https://github.com/theislab/scanpy/issues/189>`__.\n\n.. autosummary::\n :toctree: .\n\n pp.dca\n pp.magic\n\n\nTools: TL\n----------\n\nEmbeddings\n~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phate\n tl.palantir\n\nClustering and trajectory inference\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phenograph\n\nGene scores, Cell cycle\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.sandbag\n tl.cyclone\n\n\nPlotting: PL\n------------\n\n.. autosummary::\n :toctree: .\n\n pl.phate\n tl.palantir\n\n\nExporting\n---------\n\n.. autosummary::\n :toctree: .\n\n exporting.spring_project\n exporting.cellbrowser\n\"\"\"\n", "path": "scanpy/external/__init__.py"}]} | 1,221 | 110 |
gh_patches_debug_4507 | rasdani/github-patches | git_diff | gratipay__gratipay.com-1875 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Linking accounts with an OpenStreetMap account does not work if confirmation is required.
Log in with OpenStreetMap account and log out.
Log in with GitHub account and link it with the previous OpenStreetMap account.
Before #1857 works, but not after commit f963d20321e368de89f892b33ea4bce829ebc59d
```
Internal server error, program!
Traceback (most recent call last):
File "/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/algorithm.py", line 288, in run
new_state = function(**deps.as_kwargs)
File "/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/algorithms/website.py", line 88, in get_response_for_resource
return {'response': resource.respond(request)}
File "/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/resources/dynamic_resource.py", line 52, in respond
exec self.pages[1] in context
File "/home/sim6/www.gittip.com/www/on/openstreetmap/associate.spt", line 97, in
raise request.resource.respond(request)
File "/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/resources/dynamic_resource.py", line 52, in respond
exec self.pages[1] in context
File "/home/sim6/www.gittip.com/www/on/confirm.html.spt", line 45, in
username = account.get_user_name()
AttributeError: 'OpenStreetMapAccount' object has no attribute 'get_user_name'
```
</issue>
<code>
[start of gittip/elsewhere/openstreetmap.py]
1 import logging
2
3 import gittip
4 import requests
5 from aspen import json, log, Response
6 from aspen.http.request import PathPart
7 from aspen.utils import typecheck
8 from gittip.elsewhere import AccountElsewhere
9
10
11
12 class OpenStreetMapAccount(AccountElsewhere):
13 platform = u'openstreetmap'
14
15 def get_url(self):
16 return self.user_info['html_url']
17
18
19 def oauth_url(website, action, then=""):
20 """Return a URL to start oauth dancing with OpenStreetMap.
21
22 For GitHub we can pass action and then through a querystring. For OpenStreetMap
23 we can't, so we send people through a local URL first where we stash this
24 info in an in-memory cache (eep! needs refactoring to scale).
25
26 Not sure why website is here. Vestige from GitHub forebear?
27
28 """
29 then = then.encode('base64').strip()
30 return "/on/openstreetmap/redirect?action=%s&then=%s" % (action, then)
31
32
33 def get_user_info(db, username, osm_api_url):
34 """Get the given user's information from the DB or failing that, openstreetmap.
35
36 :param username:
37 A unicode string representing a username in OpenStreetMap.
38
39 :param osm_api_url:
40 URL of OpenStreetMap API.
41
42 :returns:
43 A dictionary containing OpenStreetMap specific information for the user.
44 """
45 typecheck(username, (unicode, PathPart))
46 rec = db.one("""
47 SELECT user_info FROM elsewhere
48 WHERE platform='openstreetmap'
49 AND user_info->'username' = %s
50 """, (username,))
51 if rec is not None:
52 user_info = rec
53 else:
54 osm_user = requests.get("%s/user/%s" % (osm_api_url, username))
55 if osm_user.status_code == 200:
56 log("User %s found in OpenStreetMap but not in gittip." % username)
57 user_info = None
58 elif osm_user.status_code == 404:
59 raise Response(404,
60 "OpenStreetMap identity '{0}' not found.".format(username))
61 else:
62 log("OpenStreetMap api responded with {0}: {1}".format(status, content),
63 level=logging.WARNING)
64 raise Response(502, "OpenStreetMap lookup failed with %d." % status)
65
66 return user_info
67
[end of gittip/elsewhere/openstreetmap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gittip/elsewhere/openstreetmap.py b/gittip/elsewhere/openstreetmap.py
--- a/gittip/elsewhere/openstreetmap.py
+++ b/gittip/elsewhere/openstreetmap.py
@@ -15,6 +15,12 @@
def get_url(self):
return self.user_info['html_url']
+ def get_user_name(self):
+ return self.user_info['username']
+
+ def get_platform_icon(self):
+ return "/assets/icons/openstreetmap.12.png"
+
def oauth_url(website, action, then=""):
"""Return a URL to start oauth dancing with OpenStreetMap.
| {"golden_diff": "diff --git a/gittip/elsewhere/openstreetmap.py b/gittip/elsewhere/openstreetmap.py\n--- a/gittip/elsewhere/openstreetmap.py\n+++ b/gittip/elsewhere/openstreetmap.py\n@@ -15,6 +15,12 @@\n def get_url(self):\n return self.user_info['html_url']\n \n+ def get_user_name(self):\n+ return self.user_info['username']\n+\n+ def get_platform_icon(self):\n+ return \"/assets/icons/openstreetmap.12.png\"\n+\n \n def oauth_url(website, action, then=\"\"):\n \"\"\"Return a URL to start oauth dancing with OpenStreetMap.\n", "issue": "Linking accounts with an OpenStreetMap account does not work if confirmation is required.\nLog in with OpenStreetMap account and log out.\nLog in with GitHub account and link it with the previous OpenStreetMap account.\n\nBefore #1857 works, but not after commit f963d20321e368de89f892b33ea4bce829ebc59d\n\n```\nInternal server error, program!\n\nTraceback (most recent call last):\n File \"/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/algorithm.py\", line 288, in run\n new_state = function(**deps.as_kwargs)\n File \"/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/algorithms/website.py\", line 88, in get_response_for_resource\n return {'response': resource.respond(request)}\n File \"/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/resources/dynamic_resource.py\", line 52, in respond\n exec self.pages[1] in context\n File \"/home/sim6/www.gittip.com/www/on/openstreetmap/associate.spt\", line 97, in \n raise request.resource.respond(request)\n File \"/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/resources/dynamic_resource.py\", line 52, in respond\n exec self.pages[1] in context\n File \"/home/sim6/www.gittip.com/www/on/confirm.html.spt\", line 45, in \n username = account.get_user_name()\nAttributeError: 'OpenStreetMapAccount' object has no attribute 'get_user_name'\n```\n\n", "before_files": [{"content": "import logging\n\nimport gittip\nimport requests\nfrom aspen import json, log, Response\nfrom aspen.http.request import PathPart\nfrom aspen.utils import typecheck\nfrom gittip.elsewhere import AccountElsewhere\n\n\n\nclass OpenStreetMapAccount(AccountElsewhere):\n platform = u'openstreetmap'\n\n def get_url(self):\n return self.user_info['html_url']\n\n\ndef oauth_url(website, action, then=\"\"):\n \"\"\"Return a URL to start oauth dancing with OpenStreetMap.\n\n For GitHub we can pass action and then through a querystring. For OpenStreetMap\n we can't, so we send people through a local URL first where we stash this\n info in an in-memory cache (eep! needs refactoring to scale).\n\n Not sure why website is here. Vestige from GitHub forebear?\n\n \"\"\"\n then = then.encode('base64').strip()\n return \"/on/openstreetmap/redirect?action=%s&then=%s\" % (action, then)\n\n\ndef get_user_info(db, username, osm_api_url):\n \"\"\"Get the given user's information from the DB or failing that, openstreetmap.\n\n :param username:\n A unicode string representing a username in OpenStreetMap.\n\n :param osm_api_url:\n\tURL of OpenStreetMap API.\n\n :returns:\n A dictionary containing OpenStreetMap specific information for the user.\n \"\"\"\n typecheck(username, (unicode, PathPart))\n rec = db.one(\"\"\"\n SELECT user_info FROM elsewhere\n WHERE platform='openstreetmap'\n AND user_info->'username' = %s\n \"\"\", (username,))\n if rec is not None:\n user_info = rec\n else:\n osm_user = requests.get(\"%s/user/%s\" % (osm_api_url, username))\n if osm_user.status_code == 200:\n log(\"User %s found in OpenStreetMap but not in gittip.\" % username)\n user_info = None\n elif osm_user.status_code == 404:\n raise Response(404,\n \"OpenStreetMap identity '{0}' not found.\".format(username))\n else:\n log(\"OpenStreetMap api responded with {0}: {1}\".format(status, content),\n level=logging.WARNING)\n raise Response(502, \"OpenStreetMap lookup failed with %d.\" % status)\n\n return user_info\n", "path": "gittip/elsewhere/openstreetmap.py"}]} | 1,591 | 149 |
gh_patches_debug_14384 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-2915 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Impossible de rechercher certainnes informations au delà de la première page
**Scénario 1:**
- Aller sur la page de recherche : `http://zestedesavoir.com/rechercher/`
- Saisir dans la zone de recherche : `&ab`
- Constatez qu'il y'a des résultats sur plusieurs pages
- Cliquer sur suivant
- **Pouf : une erreur 404**
**Scénario 2:**
- Aller sur la page de recherche : `http://zestedesavoir.com/rechercher/`
- Saisir dans la zone de recherche : `#1`
- Constatez qu'il y'a des résultats sur plusieurs pages
- Cliquer sur suivant
- **Pouf : le vide s'empare de nous**
</issue>
<code>
[start of zds/utils/templatetags/append_to_get.py]
1 # -*- coding: utf-8 -*-
2
3 from django import template
4 from functools import wraps
5
6 register = template.Library()
7
8 """
9 Decorator to facilitate template tag creation.
10 """
11
12
13 def easy_tag(func):
14 """
15 Deal with the repetitive parts of parsing template tags :
16
17 - Wraps functions attributes;
18 - Raise `TemplateSyntaxError` if arguments are not well formatted.
19
20 :rtype: function
21 :param func: Function to wraps.
22 :type func: function
23 """
24
25 @wraps(func)
26 def inner(_, token):
27 split_arg = token.split_contents()
28 try:
29 return func(*split_arg)
30 except TypeError:
31 import inspect
32 args = inspect.getargspec(func).args[1:]
33
34 err_msg = 'Bad arguments for tag "{0}".\nThe tag "{0}" take {1} arguments ({2}).\n {3} were provided ({4}).'
35 fstring = err_msg.format(split_arg[0],
36 len(args),
37 ", ".join(args),
38 len(split_arg),
39 ", ".join(split_arg))
40 raise template.TemplateSyntaxError(fstring)
41 return inner
42
43
44 class AppendGetNode(template.Node):
45 """
46 Template node allowing to render an URL appending argument to current GET address.
47
48 Parse a string like `key1=var1,key2=var2` and generate a new URL with the provided parameters appended to current
49 parameters.
50 """
51
52 def __init__(self, arg_list):
53 """
54 Create a template node which append `arg_list` to GET URL.
55
56 :param str arg_list: the argument list to append.
57 """
58
59 self.__dict_pairs = {}
60 for pair in arg_list.split(','):
61 if pair:
62 try:
63 key, val = pair.split('=')
64 if not val:
65 raise template.TemplateSyntaxError(
66 "Bad argument format. Empty value for key '{}".format(key))
67 self.__dict_pairs[key] = template.Variable(val)
68 except ValueError:
69 raise template.TemplateSyntaxError(
70 "Bad argument format.\n'{}' must use the format 'key1=var1,key2=var2'".format(arg_list))
71
72 def render(self, context):
73 """
74 Render the new URL according to the current context.
75
76 :param context: Current context.
77 :return: New URL with arguments appended.
78 :rtype: str
79 """
80 get = context['request'].GET.copy()
81 path = context['request'].META['PATH_INFO']
82
83 for key in self.__dict_pairs:
84 get[key] = self.__dict_pairs[key].resolve(context)
85
86 if len(get) > 0:
87 list_arg = [u"{0}={1}".format(key, value) for key in get.keys() for value in get.getlist(key)]
88 path += u"?" + u"&".join(list_arg)
89
90 return path
91
92
93 @register.tag()
94 @easy_tag
95 def append_to_get(_, arg_list):
96 """Render an URL appending argument to current GET address.
97
98 :param _: Tag name (not used)
99 :param arg_list: Argument list like `key1=var1,key2=var2`
100 :return: Template node.
101 """
102 return AppendGetNode(arg_list)
103
[end of zds/utils/templatetags/append_to_get.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/utils/templatetags/append_to_get.py b/zds/utils/templatetags/append_to_get.py
--- a/zds/utils/templatetags/append_to_get.py
+++ b/zds/utils/templatetags/append_to_get.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
from django import template
+from django.utils.http import urlquote
from functools import wraps
register = template.Library()
@@ -84,7 +85,7 @@
get[key] = self.__dict_pairs[key].resolve(context)
if len(get) > 0:
- list_arg = [u"{0}={1}".format(key, value) for key in get.keys() for value in get.getlist(key)]
+ list_arg = [u"{0}={1}".format(key, urlquote(value)) for key in get.keys() for value in get.getlist(key)]
path += u"?" + u"&".join(list_arg)
return path
| {"golden_diff": "diff --git a/zds/utils/templatetags/append_to_get.py b/zds/utils/templatetags/append_to_get.py\n--- a/zds/utils/templatetags/append_to_get.py\n+++ b/zds/utils/templatetags/append_to_get.py\n@@ -1,6 +1,7 @@\n # -*- coding: utf-8 -*-\n \n from django import template\n+from django.utils.http import urlquote\n from functools import wraps\n \n register = template.Library()\n@@ -84,7 +85,7 @@\n get[key] = self.__dict_pairs[key].resolve(context)\n \n if len(get) > 0:\n- list_arg = [u\"{0}={1}\".format(key, value) for key in get.keys() for value in get.getlist(key)]\n+ list_arg = [u\"{0}={1}\".format(key, urlquote(value)) for key in get.keys() for value in get.getlist(key)]\n path += u\"?\" + u\"&\".join(list_arg)\n \n return path\n", "issue": "Impossible de rechercher certainnes informations au del\u00e0 de la premi\u00e8re page\n**Sc\u00e9nario 1:**\n- Aller sur la page de recherche : `http://zestedesavoir.com/rechercher/`\n- Saisir dans la zone de recherche : `&ab`\n- Constatez qu'il y'a des r\u00e9sultats sur plusieurs pages\n- Cliquer sur suivant\n- **Pouf : une erreur 404**\n\n**Sc\u00e9nario 2:**\n- Aller sur la page de recherche : `http://zestedesavoir.com/rechercher/`\n- Saisir dans la zone de recherche : `#1`\n- Constatez qu'il y'a des r\u00e9sultats sur plusieurs pages\n- Cliquer sur suivant\n- **Pouf : le vide s'empare de nous**\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django import template\nfrom functools import wraps\n\nregister = template.Library()\n\n\"\"\"\nDecorator to facilitate template tag creation.\n\"\"\"\n\n\ndef easy_tag(func):\n \"\"\"\n Deal with the repetitive parts of parsing template tags :\n\n - Wraps functions attributes;\n - Raise `TemplateSyntaxError` if arguments are not well formatted.\n\n :rtype: function\n :param func: Function to wraps.\n :type func: function\n \"\"\"\n\n @wraps(func)\n def inner(_, token):\n split_arg = token.split_contents()\n try:\n return func(*split_arg)\n except TypeError:\n import inspect\n args = inspect.getargspec(func).args[1:]\n\n err_msg = 'Bad arguments for tag \"{0}\".\\nThe tag \"{0}\" take {1} arguments ({2}).\\n {3} were provided ({4}).'\n fstring = err_msg.format(split_arg[0],\n len(args),\n \", \".join(args),\n len(split_arg),\n \", \".join(split_arg))\n raise template.TemplateSyntaxError(fstring)\n return inner\n\n\nclass AppendGetNode(template.Node):\n \"\"\"\n Template node allowing to render an URL appending argument to current GET address.\n\n Parse a string like `key1=var1,key2=var2` and generate a new URL with the provided parameters appended to current\n parameters.\n \"\"\"\n\n def __init__(self, arg_list):\n \"\"\"\n Create a template node which append `arg_list` to GET URL.\n\n :param str arg_list: the argument list to append.\n \"\"\"\n\n self.__dict_pairs = {}\n for pair in arg_list.split(','):\n if pair:\n try:\n key, val = pair.split('=')\n if not val:\n raise template.TemplateSyntaxError(\n \"Bad argument format. Empty value for key '{}\".format(key))\n self.__dict_pairs[key] = template.Variable(val)\n except ValueError:\n raise template.TemplateSyntaxError(\n \"Bad argument format.\\n'{}' must use the format 'key1=var1,key2=var2'\".format(arg_list))\n\n def render(self, context):\n \"\"\"\n Render the new URL according to the current context.\n\n :param context: Current context.\n :return: New URL with arguments appended.\n :rtype: str\n \"\"\"\n get = context['request'].GET.copy()\n path = context['request'].META['PATH_INFO']\n\n for key in self.__dict_pairs:\n get[key] = self.__dict_pairs[key].resolve(context)\n\n if len(get) > 0:\n list_arg = [u\"{0}={1}\".format(key, value) for key in get.keys() for value in get.getlist(key)]\n path += u\"?\" + u\"&\".join(list_arg)\n\n return path\n\n\[email protected]()\n@easy_tag\ndef append_to_get(_, arg_list):\n \"\"\"Render an URL appending argument to current GET address.\n\n :param _: Tag name (not used)\n :param arg_list: Argument list like `key1=var1,key2=var2`\n :return: Template node.\n \"\"\"\n return AppendGetNode(arg_list)\n", "path": "zds/utils/templatetags/append_to_get.py"}]} | 1,623 | 229 |
gh_patches_debug_30253 | rasdani/github-patches | git_diff | streamlink__streamlink-1663 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to validate result: <_sre.SRE_Match object; ... should be 'list' but is 'str'
### Checklist
- [x] This is a bug report.
- [ ] This is a feature request.
- [ ] This is a plugin (improvement) request.
- [ ] I have read the contribution guidelines.
### Description
Hi, something's failing when trying to fetch a video from INE, mac OS 10.12.6:
```
$ streamlink -o ./streamlink.mp4 https://streaming.ine.com/play/97c49b6f-5cda-4e66-859d-627ba2e9e26e/lab-setup 720p --loglevel debug --http-cookie laravel_session=<removed>
[cli][debug] OS: macOS 10.12.6
[cli][debug] Python: 3.5.5
[cli][debug] Streamlink: 0.12.1
[cli][debug] Requests(2.18.1), Socks(1.6.7), Websocket(0.47.0)
[cli][info] Found matching plugin ine for URL https://streaming.ine.com/play/97c49b6f-5cda-4e66-859d-627ba2e9e26e/lab-setup
[plugin.ine][debug] Found video ID: 97c49b6f-5cda-4e66-859d-627ba2e9e26e
[plugin.ine][debug] Loading player JS: https://content.jwplatform.com/players/gdH3hfpy-p4NBeNN0.js?exp=1527771621&sig=<removed>
error: Unable to validate result: <_sre.SRE_Match object; span=(100223, 101420), match='jwConfig = {\n "aspectratio": "16:9",\n "autost> does not equal None or Unable to validate key 'playlist': Type of '//content.jwplatform.com/v2/media/gdH3hfpy?token=<removed>' should be 'list' but is 'str'
$
$ python --version
Python 3.5.5
$ streamlink --version-check
[cli][info] Your Streamlink version (0.12.1) is up to date!
$
```
Any ideas? Thanks!
</issue>
<code>
[start of src/streamlink/plugins/ine.py]
1 from __future__ import print_function
2
3 import json
4 import re
5
6 from streamlink.plugin import Plugin
7 from streamlink.plugin.api import http
8 from streamlink.plugin.api import validate
9 from streamlink.stream import HLSStream
10
11
12 class INE(Plugin):
13 url_re = re.compile(r"""https://streaming.ine.com/play\#?/
14 ([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/?
15 (.*?)""", re.VERBOSE)
16 play_url = "https://streaming.ine.com/play/{vid}/watch"
17 js_re = re.compile(r'''script type="text/javascript" src="(https://content.jwplatform.com/players/.*?)"''')
18 jwplayer_re = re.compile(r'''jwConfig\s*=\s*(\{.*\});''', re.DOTALL)
19 setup_schema = validate.Schema(
20 validate.transform(jwplayer_re.search),
21 validate.any(
22 None,
23 validate.all(
24 validate.get(1),
25 validate.transform(json.loads),
26 {"playlist": [
27 {"sources": [{"file": validate.text,
28 "type": validate.text}]}
29 ]}
30 )
31 )
32 )
33
34 @classmethod
35 def can_handle_url(cls, url):
36 return cls.url_re.match(url) is not None
37
38 def _get_streams(self):
39 vid = self.url_re.match(self.url).group(1)
40 self.logger.debug("Found video ID: {0}", vid)
41
42 page = http.get(self.play_url.format(vid=vid))
43 js_url_m = self.js_re.search(page.text)
44 if js_url_m:
45 js_url = js_url_m.group(1)
46 self.logger.debug("Loading player JS: {0}", js_url)
47
48 res = http.get(js_url)
49 data = self.setup_schema.validate(res.text)
50 for source in data["playlist"][0]["sources"]:
51 if source["type"] == "hls":
52 return HLSStream.parse_variant_playlist(self.session, "https:" + source["file"])
53
54
55 __plugin__ = INE
56
[end of src/streamlink/plugins/ine.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/ine.py b/src/streamlink/plugins/ine.py
--- a/src/streamlink/plugins/ine.py
+++ b/src/streamlink/plugins/ine.py
@@ -6,7 +6,8 @@
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.plugin.api import validate
-from streamlink.stream import HLSStream
+from streamlink.stream import HLSStream, HTTPStream
+from streamlink.utils import update_scheme
class INE(Plugin):
@@ -23,10 +24,8 @@
validate.all(
validate.get(1),
validate.transform(json.loads),
- {"playlist": [
- {"sources": [{"file": validate.text,
- "type": validate.text}]}
- ]}
+ {"playlist": str},
+ validate.get("playlist")
)
)
)
@@ -46,10 +45,15 @@
self.logger.debug("Loading player JS: {0}", js_url)
res = http.get(js_url)
- data = self.setup_schema.validate(res.text)
+ metadata_url = update_scheme(self.url, self.setup_schema.validate(res.text))
+ data = http.json(http.get(metadata_url))
+
for source in data["playlist"][0]["sources"]:
- if source["type"] == "hls":
- return HLSStream.parse_variant_playlist(self.session, "https:" + source["file"])
+ if source["type"] == "application/vnd.apple.mpegurl":
+ for s in HLSStream.parse_variant_playlist(self.session, source["file"]).items():
+ yield s
+ elif source["type"] == "video/mp4":
+ yield "{0}p".format(source["height"]), HTTPStream(self.session, source["file"])
__plugin__ = INE
| {"golden_diff": "diff --git a/src/streamlink/plugins/ine.py b/src/streamlink/plugins/ine.py\n--- a/src/streamlink/plugins/ine.py\n+++ b/src/streamlink/plugins/ine.py\n@@ -6,7 +6,8 @@\n from streamlink.plugin import Plugin\n from streamlink.plugin.api import http\n from streamlink.plugin.api import validate\n-from streamlink.stream import HLSStream\n+from streamlink.stream import HLSStream, HTTPStream\n+from streamlink.utils import update_scheme\n \n \n class INE(Plugin):\n@@ -23,10 +24,8 @@\n validate.all(\n validate.get(1),\n validate.transform(json.loads),\n- {\"playlist\": [\n- {\"sources\": [{\"file\": validate.text,\n- \"type\": validate.text}]}\n- ]}\n+ {\"playlist\": str},\n+ validate.get(\"playlist\")\n )\n )\n )\n@@ -46,10 +45,15 @@\n self.logger.debug(\"Loading player JS: {0}\", js_url)\n \n res = http.get(js_url)\n- data = self.setup_schema.validate(res.text)\n+ metadata_url = update_scheme(self.url, self.setup_schema.validate(res.text))\n+ data = http.json(http.get(metadata_url))\n+\n for source in data[\"playlist\"][0][\"sources\"]:\n- if source[\"type\"] == \"hls\":\n- return HLSStream.parse_variant_playlist(self.session, \"https:\" + source[\"file\"])\n+ if source[\"type\"] == \"application/vnd.apple.mpegurl\":\n+ for s in HLSStream.parse_variant_playlist(self.session, source[\"file\"]).items():\n+ yield s\n+ elif source[\"type\"] == \"video/mp4\":\n+ yield \"{0}p\".format(source[\"height\"]), HTTPStream(self.session, source[\"file\"])\n \n \n __plugin__ = INE\n", "issue": "Unable to validate result: <_sre.SRE_Match object; ... should be 'list' but is 'str'\n### Checklist\r\n\r\n- [x] This is a bug report.\r\n- [ ] This is a feature request.\r\n- [ ] This is a plugin (improvement) request.\r\n- [ ] I have read the contribution guidelines.\r\n\r\n### Description\r\n\r\nHi, something's failing when trying to fetch a video from INE, mac OS 10.12.6:\r\n\r\n```\r\n$ streamlink -o ./streamlink.mp4 https://streaming.ine.com/play/97c49b6f-5cda-4e66-859d-627ba2e9e26e/lab-setup 720p --loglevel debug --http-cookie laravel_session=<removed>\r\n[cli][debug] OS: macOS 10.12.6\r\n[cli][debug] Python: 3.5.5\r\n[cli][debug] Streamlink: 0.12.1\r\n[cli][debug] Requests(2.18.1), Socks(1.6.7), Websocket(0.47.0)\r\n[cli][info] Found matching plugin ine for URL https://streaming.ine.com/play/97c49b6f-5cda-4e66-859d-627ba2e9e26e/lab-setup\r\n[plugin.ine][debug] Found video ID: 97c49b6f-5cda-4e66-859d-627ba2e9e26e\r\n[plugin.ine][debug] Loading player JS: https://content.jwplatform.com/players/gdH3hfpy-p4NBeNN0.js?exp=1527771621&sig=<removed>\r\nerror: Unable to validate result: <_sre.SRE_Match object; span=(100223, 101420), match='jwConfig = {\\n \"aspectratio\": \"16:9\",\\n \"autost> does not equal None or Unable to validate key 'playlist': Type of '//content.jwplatform.com/v2/media/gdH3hfpy?token=<removed>' should be 'list' but is 'str'\r\n$ \r\n$ python --version\r\nPython 3.5.5\r\n$ streamlink --version-check\r\n[cli][info] Your Streamlink version (0.12.1) is up to date!\r\n$ \r\n```\r\nAny ideas? Thanks!\r\n\n", "before_files": [{"content": "from __future__ import print_function\n\nimport json\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass INE(Plugin):\n url_re = re.compile(r\"\"\"https://streaming.ine.com/play\\#?/\n ([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/?\n (.*?)\"\"\", re.VERBOSE)\n play_url = \"https://streaming.ine.com/play/{vid}/watch\"\n js_re = re.compile(r'''script type=\"text/javascript\" src=\"(https://content.jwplatform.com/players/.*?)\"''')\n jwplayer_re = re.compile(r'''jwConfig\\s*=\\s*(\\{.*\\});''', re.DOTALL)\n setup_schema = validate.Schema(\n validate.transform(jwplayer_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.transform(json.loads),\n {\"playlist\": [\n {\"sources\": [{\"file\": validate.text,\n \"type\": validate.text}]}\n ]}\n )\n )\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n vid = self.url_re.match(self.url).group(1)\n self.logger.debug(\"Found video ID: {0}\", vid)\n\n page = http.get(self.play_url.format(vid=vid))\n js_url_m = self.js_re.search(page.text)\n if js_url_m:\n js_url = js_url_m.group(1)\n self.logger.debug(\"Loading player JS: {0}\", js_url)\n\n res = http.get(js_url)\n data = self.setup_schema.validate(res.text)\n for source in data[\"playlist\"][0][\"sources\"]:\n if source[\"type\"] == \"hls\":\n return HLSStream.parse_variant_playlist(self.session, \"https:\" + source[\"file\"])\n\n\n__plugin__ = INE\n", "path": "src/streamlink/plugins/ine.py"}]} | 1,688 | 392 |
gh_patches_debug_21861 | rasdani/github-patches | git_diff | facebookresearch__hydra-352 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] system.exit(code) is not respected in Hydra
</issue>
<code>
[start of hydra/main.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import functools
3 import sys
4 from typing import Callable, Optional
5
6 from ._internal.utils import get_args_parser, run_hydra
7 from .types import TaskFunction
8
9 # TODO: change config_path to Optional[str]
10
11
12 def main(
13 config_path: str = "", strict: Optional[bool] = None
14 ) -> Callable[[TaskFunction], Callable[[], None]]:
15 """
16 :param config_path: the config path, can be a directory in which it's used as the config root
17 or a file to load
18 :param strict: strict mode, will throw an error if command line overrides are not changing an
19 existing key or
20 if the code is accessing a non existent key
21 """
22
23 def main_decorator(task_function: TaskFunction) -> Callable[[], None]:
24 @functools.wraps(task_function)
25 def decorated_main() -> None:
26 try:
27 run_hydra(
28 args_parser=get_args_parser(),
29 task_function=task_function,
30 config_path=config_path,
31 strict=strict,
32 )
33 except KeyboardInterrupt:
34 sys.exit(-1)
35 except SystemExit:
36 pass
37
38 return decorated_main
39
40 return main_decorator
41
[end of hydra/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hydra/main.py b/hydra/main.py
--- a/hydra/main.py
+++ b/hydra/main.py
@@ -1,6 +1,5 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
-import sys
from typing import Callable, Optional
from ._internal.utils import get_args_parser, run_hydra
@@ -23,17 +22,12 @@
def main_decorator(task_function: TaskFunction) -> Callable[[], None]:
@functools.wraps(task_function)
def decorated_main() -> None:
- try:
- run_hydra(
- args_parser=get_args_parser(),
- task_function=task_function,
- config_path=config_path,
- strict=strict,
- )
- except KeyboardInterrupt:
- sys.exit(-1)
- except SystemExit:
- pass
+ run_hydra(
+ args_parser=get_args_parser(),
+ task_function=task_function,
+ config_path=config_path,
+ strict=strict,
+ )
return decorated_main
| {"golden_diff": "diff --git a/hydra/main.py b/hydra/main.py\n--- a/hydra/main.py\n+++ b/hydra/main.py\n@@ -1,6 +1,5 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n import functools\n-import sys\n from typing import Callable, Optional\n \n from ._internal.utils import get_args_parser, run_hydra\n@@ -23,17 +22,12 @@\n def main_decorator(task_function: TaskFunction) -> Callable[[], None]:\n @functools.wraps(task_function)\n def decorated_main() -> None:\n- try:\n- run_hydra(\n- args_parser=get_args_parser(),\n- task_function=task_function,\n- config_path=config_path,\n- strict=strict,\n- )\n- except KeyboardInterrupt:\n- sys.exit(-1)\n- except SystemExit:\n- pass\n+ run_hydra(\n+ args_parser=get_args_parser(),\n+ task_function=task_function,\n+ config_path=config_path,\n+ strict=strict,\n+ )\n \n return decorated_main\n", "issue": "[Bug] system.exit(code) is not respected in Hydra\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport functools\nimport sys\nfrom typing import Callable, Optional\n\nfrom ._internal.utils import get_args_parser, run_hydra\nfrom .types import TaskFunction\n\n# TODO: change config_path to Optional[str]\n\n\ndef main(\n config_path: str = \"\", strict: Optional[bool] = None\n) -> Callable[[TaskFunction], Callable[[], None]]:\n \"\"\"\n :param config_path: the config path, can be a directory in which it's used as the config root\n or a file to load\n :param strict: strict mode, will throw an error if command line overrides are not changing an\n existing key or\n if the code is accessing a non existent key\n \"\"\"\n\n def main_decorator(task_function: TaskFunction) -> Callable[[], None]:\n @functools.wraps(task_function)\n def decorated_main() -> None:\n try:\n run_hydra(\n args_parser=get_args_parser(),\n task_function=task_function,\n config_path=config_path,\n strict=strict,\n )\n except KeyboardInterrupt:\n sys.exit(-1)\n except SystemExit:\n pass\n\n return decorated_main\n\n return main_decorator\n", "path": "hydra/main.py"}]} | 890 | 245 |
gh_patches_debug_9471 | rasdani/github-patches | git_diff | vispy__vispy-1084 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IPython WebGL Examples not working.
The IPython notebook examples are not working with the latest IPython(Jupyter) 4.0 release.
</issue>
<code>
[start of vispy/app/backends/ipython/_widget.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2014, 2015, Vispy Development Team.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 try:
6 from IPython.html.widgets import DOMWidget
7 from IPython.utils.traitlets import Unicode, Int, Bool
8 except Exception as exp:
9 # Init dummy objects needed to import this module withour errors.
10 # These are all overwritten with imports from IPython (on success)
11 DOMWidget = object
12 Unicode = Int = Float = Bool = lambda *args, **kwargs: None
13 available, testable, why_not, which = False, False, str(exp), None
14 else:
15 available, testable, why_not, which = True, False, None, None
16 from vispy.app.backends._ipynb_util import create_glir_message
17 from vispy.app import Timer
18
19
20 # ---------------------------------------------------------- IPython Widget ---
21 def _stop_timers(canvas):
22 """Stop all timers in a canvas."""
23 for attr in dir(canvas):
24 try:
25 attr_obj = getattr(canvas, attr)
26 except NotImplementedError:
27 # This try/except is needed because canvas.position raises
28 # an error (it is not implemented in this backend).
29 attr_obj = None
30 if isinstance(attr_obj, Timer):
31 attr_obj.stop()
32
33
34 class VispyWidget(DOMWidget):
35 _view_name = Unicode("VispyView", sync=True)
36 _view_module = Unicode('/nbextensions/vispy/webgl-backend.js', sync=True)
37
38 #height/width of the widget is managed by IPython.
39 #it's a string and can be anything valid in CSS.
40 #here we only manage the size of the viewport.
41 width = Int(sync=True)
42 height = Int(sync=True)
43 resizable = Bool(value=True, sync=True)
44
45 def __init__(self, **kwargs):
46 super(VispyWidget, self).__init__(**kwargs)
47 self.on_msg(self.events_received)
48 self.canvas = None
49 self.canvas_backend = None
50 self.gen_event = None
51
52 def set_canvas(self, canvas):
53 self.width, self.height = canvas._backend._default_size
54 self.canvas = canvas
55 self.canvas_backend = self.canvas._backend
56 self.canvas_backend.set_widget(self)
57 self.gen_event = self.canvas_backend._gen_event
58 #setup the backend widget then.
59
60 def events_received(self, _, msg):
61 if msg['msg_type'] == 'init':
62 self.canvas_backend._reinit_widget()
63 elif msg['msg_type'] == 'events':
64 events = msg['contents']
65 for ev in events:
66 self.gen_event(ev)
67 elif msg['msg_type'] == 'status':
68 if msg['contents'] == 'removed':
69 # Stop all timers associated to the widget.
70 _stop_timers(self.canvas_backend._vispy_canvas)
71
72 def send_glir_commands(self, commands):
73 # TODO: check whether binary websocket is available (ipython >= 3)
74 # Until IPython 3.0 is released, use base64.
75 array_serialization = 'base64'
76 # array_serialization = 'binary'
77 if array_serialization == 'base64':
78 msg = create_glir_message(commands, 'base64')
79 msg['array_serialization'] = 'base64'
80 self.send(msg)
81 elif array_serialization == 'binary':
82 msg = create_glir_message(commands, 'binary')
83 msg['array_serialization'] = 'binary'
84 # Remove the buffers from the JSON message: they will be sent
85 # independently via binary WebSocket.
86 buffers = msg.pop('buffers')
87 self.comm.send({"method": "custom", "content": msg},
88 buffers=buffers)
89
[end of vispy/app/backends/ipython/_widget.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vispy/app/backends/ipython/_widget.py b/vispy/app/backends/ipython/_widget.py
--- a/vispy/app/backends/ipython/_widget.py
+++ b/vispy/app/backends/ipython/_widget.py
@@ -57,7 +57,10 @@
self.gen_event = self.canvas_backend._gen_event
#setup the backend widget then.
- def events_received(self, _, msg):
+ # In IPython < 4, these callbacks are given two arguments; in
+ # IPython/jupyter 4, they take 3. events_received is variadic to
+ # accommodate both cases.
+ def events_received(self, _, msg, *args):
if msg['msg_type'] == 'init':
self.canvas_backend._reinit_widget()
elif msg['msg_type'] == 'events':
| {"golden_diff": "diff --git a/vispy/app/backends/ipython/_widget.py b/vispy/app/backends/ipython/_widget.py\n--- a/vispy/app/backends/ipython/_widget.py\n+++ b/vispy/app/backends/ipython/_widget.py\n@@ -57,7 +57,10 @@\n self.gen_event = self.canvas_backend._gen_event\n #setup the backend widget then.\n \n- def events_received(self, _, msg):\n+ # In IPython < 4, these callbacks are given two arguments; in\n+ # IPython/jupyter 4, they take 3. events_received is variadic to\n+ # accommodate both cases.\n+ def events_received(self, _, msg, *args):\n if msg['msg_type'] == 'init':\n self.canvas_backend._reinit_widget()\n elif msg['msg_type'] == 'events':\n", "issue": "IPython WebGL Examples not working.\nThe IPython notebook examples are not working with the latest IPython(Jupyter) 4.0 release.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, 2015, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\ntry:\n from IPython.html.widgets import DOMWidget\n from IPython.utils.traitlets import Unicode, Int, Bool\nexcept Exception as exp:\n # Init dummy objects needed to import this module withour errors.\n # These are all overwritten with imports from IPython (on success)\n DOMWidget = object\n Unicode = Int = Float = Bool = lambda *args, **kwargs: None\n available, testable, why_not, which = False, False, str(exp), None\nelse:\n available, testable, why_not, which = True, False, None, None\nfrom vispy.app.backends._ipynb_util import create_glir_message\nfrom vispy.app import Timer\n\n\n# ---------------------------------------------------------- IPython Widget ---\ndef _stop_timers(canvas):\n \"\"\"Stop all timers in a canvas.\"\"\"\n for attr in dir(canvas):\n try:\n attr_obj = getattr(canvas, attr)\n except NotImplementedError:\n # This try/except is needed because canvas.position raises\n # an error (it is not implemented in this backend).\n attr_obj = None\n if isinstance(attr_obj, Timer):\n attr_obj.stop()\n\n\nclass VispyWidget(DOMWidget):\n _view_name = Unicode(\"VispyView\", sync=True)\n _view_module = Unicode('/nbextensions/vispy/webgl-backend.js', sync=True)\n\n #height/width of the widget is managed by IPython.\n #it's a string and can be anything valid in CSS.\n #here we only manage the size of the viewport.\n width = Int(sync=True)\n height = Int(sync=True)\n resizable = Bool(value=True, sync=True)\n\n def __init__(self, **kwargs):\n super(VispyWidget, self).__init__(**kwargs)\n self.on_msg(self.events_received)\n self.canvas = None\n self.canvas_backend = None\n self.gen_event = None\n\n def set_canvas(self, canvas):\n self.width, self.height = canvas._backend._default_size\n self.canvas = canvas\n self.canvas_backend = self.canvas._backend\n self.canvas_backend.set_widget(self)\n self.gen_event = self.canvas_backend._gen_event\n #setup the backend widget then.\n\n def events_received(self, _, msg):\n if msg['msg_type'] == 'init':\n self.canvas_backend._reinit_widget()\n elif msg['msg_type'] == 'events':\n events = msg['contents']\n for ev in events:\n self.gen_event(ev)\n elif msg['msg_type'] == 'status':\n if msg['contents'] == 'removed':\n # Stop all timers associated to the widget.\n _stop_timers(self.canvas_backend._vispy_canvas)\n\n def send_glir_commands(self, commands):\n # TODO: check whether binary websocket is available (ipython >= 3)\n # Until IPython 3.0 is released, use base64.\n array_serialization = 'base64'\n # array_serialization = 'binary'\n if array_serialization == 'base64':\n msg = create_glir_message(commands, 'base64')\n msg['array_serialization'] = 'base64'\n self.send(msg)\n elif array_serialization == 'binary':\n msg = create_glir_message(commands, 'binary')\n msg['array_serialization'] = 'binary'\n # Remove the buffers from the JSON message: they will be sent\n # independently via binary WebSocket.\n buffers = msg.pop('buffers')\n self.comm.send({\"method\": \"custom\", \"content\": msg},\n buffers=buffers)\n", "path": "vispy/app/backends/ipython/_widget.py"}]} | 1,559 | 197 |
gh_patches_debug_29933 | rasdani/github-patches | git_diff | jazzband__pip-tools-1912 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The latest and the stable documentation may have been built using the development version
Shouldn't be [there](https://pip-tools.readthedocs.io/) the latest released version?
<img width="788" alt="Screenshot 2023-04-07 at 01 17 17" src="https://user-images.githubusercontent.com/7377671/230510654-fd15e934-4243-4ee3-85c6-bb8d55e656d4.png">
</issue>
<code>
[start of docs/conf.py]
1 # https://www.sphinx-doc.org/en/master/usage/configuration.html
2 """Configuration file for the Sphinx documentation builder."""
3
4 from __future__ import annotations
5
6 from functools import partial
7 from pathlib import Path
8
9 from setuptools_scm import get_version
10
11 # -- Path setup --------------------------------------------------------------
12
13 PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()
14 get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)
15
16
17 # -- Project information -----------------------------------------------------
18
19 project = "pip-tools"
20 author = f"{project} Contributors"
21 copyright = f"The {author}"
22
23 # The short X.Y version
24 version = ".".join(
25 get_scm_version(
26 local_scheme="no-local-version",
27 ).split(
28 "."
29 )[:3],
30 )
31
32 # The full version, including alpha/beta/rc tags
33 release = get_scm_version()
34
35
36 # -- General configuration ---------------------------------------------------
37
38 # Add any Sphinx extension module names here, as strings. They can be
39 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
40 # ones.
41 extensions = ["myst_parser"]
42
43
44 # -- Options for HTML output -------------------------------------------------
45
46 # The theme to use for HTML and HTML Help pages. See the documentation for
47 # a list of builtin themes.
48 #
49 html_theme = "furo"
50
51
52 # -------------------------------------------------------------------------
53 default_role = "any"
54 nitpicky = True
55
56 linkcheck_ignore = [
57 r"^https://matrix\.to/#",
58 ]
59
60 suppress_warnings = ["myst.xref_missing"]
61
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -3,15 +3,17 @@
from __future__ import annotations
-from functools import partial
+from importlib.metadata import version as get_version
from pathlib import Path
-from setuptools_scm import get_version
+from sphinx.util import logging
+from sphinx.util.console import bold
+
+logger = logging.getLogger(__name__)
# -- Path setup --------------------------------------------------------------
PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()
-get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)
# -- Project information -----------------------------------------------------
@@ -20,18 +22,14 @@
author = f"{project} Contributors"
copyright = f"The {author}"
-# The short X.Y version
-version = ".".join(
- get_scm_version(
- local_scheme="no-local-version",
- ).split(
- "."
- )[:3],
-)
-
# The full version, including alpha/beta/rc tags
-release = get_scm_version()
+release = get_version(project)
+
+# The short X.Y version
+version = ".".join(release.split(".")[:3])
+logger.info(bold("%s version: %s"), project, version)
+logger.info(bold("%s release: %s"), project, release)
# -- General configuration ---------------------------------------------------
@@ -47,6 +45,7 @@
# a list of builtin themes.
#
html_theme = "furo"
+html_title = f"<nobr>{project}</nobr> documentation v{release}"
# -------------------------------------------------------------------------
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -3,15 +3,17 @@\n \n from __future__ import annotations\n \n-from functools import partial\n+from importlib.metadata import version as get_version\n from pathlib import Path\n \n-from setuptools_scm import get_version\n+from sphinx.util import logging\n+from sphinx.util.console import bold\n+\n+logger = logging.getLogger(__name__)\n \n # -- Path setup --------------------------------------------------------------\n \n PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()\n-get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)\n \n \n # -- Project information -----------------------------------------------------\n@@ -20,18 +22,14 @@\n author = f\"{project} Contributors\"\n copyright = f\"The {author}\"\n \n-# The short X.Y version\n-version = \".\".join(\n- get_scm_version(\n- local_scheme=\"no-local-version\",\n- ).split(\n- \".\"\n- )[:3],\n-)\n-\n # The full version, including alpha/beta/rc tags\n-release = get_scm_version()\n+release = get_version(project)\n+\n+# The short X.Y version\n+version = \".\".join(release.split(\".\")[:3])\n \n+logger.info(bold(\"%s version: %s\"), project, version)\n+logger.info(bold(\"%s release: %s\"), project, release)\n \n # -- General configuration ---------------------------------------------------\n \n@@ -47,6 +45,7 @@\n # a list of builtin themes.\n #\n html_theme = \"furo\"\n+html_title = f\"<nobr>{project}</nobr> documentation v{release}\"\n \n \n # -------------------------------------------------------------------------\n", "issue": "The latest and the stable documentation may have been built using the development version\nShouldn't be [there](https://pip-tools.readthedocs.io/) the latest released version?\r\n\r\n<img width=\"788\" alt=\"Screenshot 2023-04-07 at 01 17 17\" src=\"https://user-images.githubusercontent.com/7377671/230510654-fd15e934-4243-4ee3-85c6-bb8d55e656d4.png\">\r\n\r\n\n", "before_files": [{"content": "# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\nfrom __future__ import annotations\n\nfrom functools import partial\nfrom pathlib import Path\n\nfrom setuptools_scm import get_version\n\n# -- Path setup --------------------------------------------------------------\n\nPROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()\nget_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pip-tools\"\nauthor = f\"{project} Contributors\"\ncopyright = f\"The {author}\"\n\n# The short X.Y version\nversion = \".\".join(\n get_scm_version(\n local_scheme=\"no-local-version\",\n ).split(\n \".\"\n )[:3],\n)\n\n# The full version, including alpha/beta/rc tags\nrelease = get_scm_version()\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"myst_parser\"]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\n\n\n# -------------------------------------------------------------------------\ndefault_role = \"any\"\nnitpicky = True\n\nlinkcheck_ignore = [\n r\"^https://matrix\\.to/#\",\n]\n\nsuppress_warnings = [\"myst.xref_missing\"]\n", "path": "docs/conf.py"}]} | 1,098 | 359 |
gh_patches_debug_27526 | rasdani/github-patches | git_diff | pantsbuild__pants-11274 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Target Hitting Recursion Limit During Pants Setup (with workaround)
# Description of Problem
We’re in the process of migrating from 1.25 to 2.1.0., and hit an issue trying to run a test on specific target. The target is large and results in a max recursion limit exceeded.
I tried hacking on `sys.setrecursionlimit` and found for our use case 1021 was the min that would allow the test to succeed.
We can try breaking that target up, but the app it is testing is kind of a monolith so i don’t know how successful that would be.
Can you make a runtime limit in pants to handle?
This error happens in the pants setup before our pytest is run.
# Workaround
In one of our plugin's `register.py` we added `sys.setrecursionlimit(1021)` and this resolved our problem.
</issue>
<code>
[start of src/python/pants/bin/pants_loader.py]
1 # Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 import importlib
5 import locale
6 import os
7 import warnings
8 from textwrap import dedent
9
10
11 class PantsLoader:
12 """Loads and executes entrypoints."""
13
14 ENTRYPOINT_ENV_VAR = "PANTS_ENTRYPOINT"
15 DEFAULT_ENTRYPOINT = "pants.bin.pants_exe:main"
16
17 ENCODING_IGNORE_ENV_VAR = "PANTS_IGNORE_UNRECOGNIZED_ENCODING"
18
19 class InvalidLocaleError(Exception):
20 """Raised when a valid locale can't be found."""
21
22 @staticmethod
23 def setup_warnings():
24 # We want to present warnings to the user, set this up before importing any of our own code,
25 # to ensure all deprecation warnings are seen, including module deprecations.
26 # The "default" action displays a warning for a particular file and line number exactly once.
27 # See https://docs.python.org/3/library/warnings.html#the-warnings-filter for the complete list.
28 #
29 # However, we do turn off deprecation warnings for libraries that Pants uses for which we do
30 # not have a fixed upstream version, typically because the library is no longer maintained.
31 warnings.simplefilter("default", category=DeprecationWarning)
32 # TODO: Eric-Arellano has emailed the author to see if he is willing to accept a PR fixing the
33 # deprecation warnings and to release the fix. If he says yes, remove this once fixed.
34 warnings.filterwarnings("ignore", category=DeprecationWarning, module="ansicolors")
35 # Silence this ubiquitous warning. Several of our 3rd party deps incur this.
36 warnings.filterwarnings(
37 "ignore",
38 category=DeprecationWarning,
39 message="Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated",
40 )
41
42 @classmethod
43 def ensure_locale(cls):
44 # Sanity check for locale, See https://github.com/pantsbuild/pants/issues/2465.
45 # This check is done early to give good feedback to user on how to fix the problem. Other
46 # libraries called by Pants may fail with more obscure errors.
47 encoding = locale.getpreferredencoding()
48 if (
49 encoding.lower() != "utf-8"
50 and os.environ.get(cls.ENCODING_IGNORE_ENV_VAR, None) is None
51 ):
52 raise cls.InvalidLocaleError(
53 dedent(
54 """
55 Your system's preferred encoding is `{}`, but Pants requires `UTF-8`.
56 Specifically, Python's `locale.getpreferredencoding()` must resolve to `UTF-8`.
57
58 Fix it by setting the LC_* and LANG environment settings. Example:
59 LC_ALL=en_US.UTF-8
60 LANG=en_US.UTF-8
61 Or, bypass it by setting the below environment variable.
62 {}=1
63 Note: we cannot guarantee consistent behavior with this bypass enabled.
64 """.format(
65 encoding, cls.ENCODING_IGNORE_ENV_VAR
66 )
67 )
68 )
69
70 @staticmethod
71 def determine_entrypoint(env_var, default):
72 return os.environ.pop(env_var, default)
73
74 @staticmethod
75 def load_and_execute(entrypoint):
76 assert ":" in entrypoint, "ERROR: entrypoint must be of the form `module.path:callable`"
77 module_path, func_name = entrypoint.split(":", 1)
78 module = importlib.import_module(module_path)
79 entrypoint_main = getattr(module, func_name)
80 assert callable(entrypoint_main), "ERROR: entrypoint `{}` is not callable".format(
81 entrypoint
82 )
83 entrypoint_main()
84
85 @classmethod
86 def run(cls):
87 cls.setup_warnings()
88 cls.ensure_locale()
89 entrypoint = cls.determine_entrypoint(cls.ENTRYPOINT_ENV_VAR, cls.DEFAULT_ENTRYPOINT)
90 cls.load_and_execute(entrypoint)
91
92
93 def main():
94 PantsLoader.run()
95
96
97 if __name__ == "__main__":
98 main()
99
[end of src/python/pants/bin/pants_loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/bin/pants_loader.py b/src/python/pants/bin/pants_loader.py
--- a/src/python/pants/bin/pants_loader.py
+++ b/src/python/pants/bin/pants_loader.py
@@ -4,6 +4,7 @@
import importlib
import locale
import os
+import sys
import warnings
from textwrap import dedent
@@ -14,6 +15,8 @@
ENTRYPOINT_ENV_VAR = "PANTS_ENTRYPOINT"
DEFAULT_ENTRYPOINT = "pants.bin.pants_exe:main"
+ RECURSION_LIMIT_ENV_VAR = "PANTS_RECURSION_LIMIT"
+
ENCODING_IGNORE_ENV_VAR = "PANTS_IGNORE_UNRECOGNIZED_ENCODING"
class InvalidLocaleError(Exception):
@@ -67,6 +70,10 @@
)
)
+ @classmethod
+ def set_recursion_limit(cls):
+ sys.setrecursionlimit(int(os.environ.get(cls.RECURSION_LIMIT_ENV_VAR, "10000")))
+
@staticmethod
def determine_entrypoint(env_var, default):
return os.environ.pop(env_var, default)
@@ -86,6 +93,7 @@
def run(cls):
cls.setup_warnings()
cls.ensure_locale()
+ cls.set_recursion_limit()
entrypoint = cls.determine_entrypoint(cls.ENTRYPOINT_ENV_VAR, cls.DEFAULT_ENTRYPOINT)
cls.load_and_execute(entrypoint)
| {"golden_diff": "diff --git a/src/python/pants/bin/pants_loader.py b/src/python/pants/bin/pants_loader.py\n--- a/src/python/pants/bin/pants_loader.py\n+++ b/src/python/pants/bin/pants_loader.py\n@@ -4,6 +4,7 @@\n import importlib\n import locale\n import os\n+import sys\n import warnings\n from textwrap import dedent\n \n@@ -14,6 +15,8 @@\n ENTRYPOINT_ENV_VAR = \"PANTS_ENTRYPOINT\"\n DEFAULT_ENTRYPOINT = \"pants.bin.pants_exe:main\"\n \n+ RECURSION_LIMIT_ENV_VAR = \"PANTS_RECURSION_LIMIT\"\n+\n ENCODING_IGNORE_ENV_VAR = \"PANTS_IGNORE_UNRECOGNIZED_ENCODING\"\n \n class InvalidLocaleError(Exception):\n@@ -67,6 +70,10 @@\n )\n )\n \n+ @classmethod\n+ def set_recursion_limit(cls):\n+ sys.setrecursionlimit(int(os.environ.get(cls.RECURSION_LIMIT_ENV_VAR, \"10000\")))\n+\n @staticmethod\n def determine_entrypoint(env_var, default):\n return os.environ.pop(env_var, default)\n@@ -86,6 +93,7 @@\n def run(cls):\n cls.setup_warnings()\n cls.ensure_locale()\n+ cls.set_recursion_limit()\n entrypoint = cls.determine_entrypoint(cls.ENTRYPOINT_ENV_VAR, cls.DEFAULT_ENTRYPOINT)\n cls.load_and_execute(entrypoint)\n", "issue": "Target Hitting Recursion Limit During Pants Setup (with workaround)\n# Description of Problem\r\nWe\u2019re in the process of migrating from 1.25 to 2.1.0., and hit an issue trying to run a test on specific target. The target is large and results in a max recursion limit exceeded.\r\n\r\nI tried hacking on `sys.setrecursionlimit` and found for our use case 1021 was the min that would allow the test to succeed.\r\n\r\nWe can try breaking that target up, but the app it is testing is kind of a monolith so i don\u2019t know how successful that would be.\r\n\r\nCan you make a runtime limit in pants to handle?\r\n\r\nThis error happens in the pants setup before our pytest is run.\r\n\r\n# Workaround\r\nIn one of our plugin's `register.py` we added `sys.setrecursionlimit(1021)` and this resolved our problem.\n", "before_files": [{"content": "# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport importlib\nimport locale\nimport os\nimport warnings\nfrom textwrap import dedent\n\n\nclass PantsLoader:\n \"\"\"Loads and executes entrypoints.\"\"\"\n\n ENTRYPOINT_ENV_VAR = \"PANTS_ENTRYPOINT\"\n DEFAULT_ENTRYPOINT = \"pants.bin.pants_exe:main\"\n\n ENCODING_IGNORE_ENV_VAR = \"PANTS_IGNORE_UNRECOGNIZED_ENCODING\"\n\n class InvalidLocaleError(Exception):\n \"\"\"Raised when a valid locale can't be found.\"\"\"\n\n @staticmethod\n def setup_warnings():\n # We want to present warnings to the user, set this up before importing any of our own code,\n # to ensure all deprecation warnings are seen, including module deprecations.\n # The \"default\" action displays a warning for a particular file and line number exactly once.\n # See https://docs.python.org/3/library/warnings.html#the-warnings-filter for the complete list.\n #\n # However, we do turn off deprecation warnings for libraries that Pants uses for which we do\n # not have a fixed upstream version, typically because the library is no longer maintained.\n warnings.simplefilter(\"default\", category=DeprecationWarning)\n # TODO: Eric-Arellano has emailed the author to see if he is willing to accept a PR fixing the\n # deprecation warnings and to release the fix. If he says yes, remove this once fixed.\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning, module=\"ansicolors\")\n # Silence this ubiquitous warning. Several of our 3rd party deps incur this.\n warnings.filterwarnings(\n \"ignore\",\n category=DeprecationWarning,\n message=\"Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated\",\n )\n\n @classmethod\n def ensure_locale(cls):\n # Sanity check for locale, See https://github.com/pantsbuild/pants/issues/2465.\n # This check is done early to give good feedback to user on how to fix the problem. Other\n # libraries called by Pants may fail with more obscure errors.\n encoding = locale.getpreferredencoding()\n if (\n encoding.lower() != \"utf-8\"\n and os.environ.get(cls.ENCODING_IGNORE_ENV_VAR, None) is None\n ):\n raise cls.InvalidLocaleError(\n dedent(\n \"\"\"\n Your system's preferred encoding is `{}`, but Pants requires `UTF-8`.\n Specifically, Python's `locale.getpreferredencoding()` must resolve to `UTF-8`.\n\n Fix it by setting the LC_* and LANG environment settings. Example:\n LC_ALL=en_US.UTF-8\n LANG=en_US.UTF-8\n Or, bypass it by setting the below environment variable.\n {}=1\n Note: we cannot guarantee consistent behavior with this bypass enabled.\n \"\"\".format(\n encoding, cls.ENCODING_IGNORE_ENV_VAR\n )\n )\n )\n\n @staticmethod\n def determine_entrypoint(env_var, default):\n return os.environ.pop(env_var, default)\n\n @staticmethod\n def load_and_execute(entrypoint):\n assert \":\" in entrypoint, \"ERROR: entrypoint must be of the form `module.path:callable`\"\n module_path, func_name = entrypoint.split(\":\", 1)\n module = importlib.import_module(module_path)\n entrypoint_main = getattr(module, func_name)\n assert callable(entrypoint_main), \"ERROR: entrypoint `{}` is not callable\".format(\n entrypoint\n )\n entrypoint_main()\n\n @classmethod\n def run(cls):\n cls.setup_warnings()\n cls.ensure_locale()\n entrypoint = cls.determine_entrypoint(cls.ENTRYPOINT_ENV_VAR, cls.DEFAULT_ENTRYPOINT)\n cls.load_and_execute(entrypoint)\n\n\ndef main():\n PantsLoader.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "src/python/pants/bin/pants_loader.py"}]} | 1,776 | 314 |
gh_patches_debug_29782 | rasdani/github-patches | git_diff | cupy__cupy-7693 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix `cupyx.scipy.sparse.csgraph` module to work with the latest RAPIDS cuGraph
Follow the API change introduced in RAPIDS 22.12.
https://github.com/cupy/cupy/pull/7647#discussion_r1244820097
cc/ @pentschev
</issue>
<code>
[start of cupyx/scipy/sparse/csgraph/_traversal.py]
1 import cupy
2 import cupyx.scipy.sparse
3 try:
4 import pylibcugraph
5 pylibcugraph_available = True
6 except ModuleNotFoundError:
7 pylibcugraph_available = False
8
9
10 def connected_components(csgraph, directed=True, connection='weak',
11 return_labels=True):
12 """Analyzes the connected components of a sparse graph
13
14 Args:
15 csgraph (cupy.ndarray of cupyx.scipy.sparse.csr_matrix): The adjacency
16 matrix representing connectivity among nodes.
17 directed (bool): If ``True``, it operates on a directed graph. If
18 ``False``, it operates on an undirected graph.
19 connection (str): ``'weak'`` or ``'strong'``. For directed graphs, the
20 type of connection to use. Nodes i and j are "strongly" connected
21 only when a path exists both from i to j and from j to i.
22 If ``directed`` is ``False``, this argument is ignored.
23 return_labels (bool): If ``True``, it returns the labels for each of
24 the connected components.
25
26 Returns:
27 tuple of int and cupy.ndarray, or int:
28 If ``return_labels`` == ``True``, returns a tuple ``(n, labels)``,
29 where ``n`` is the number of connected components and ``labels`` is
30 labels of each connected components. Otherwise, returns ``n``.
31
32 .. seealso:: :func:`scipy.sparse.csgraph.connected_components`
33 """
34 if not pylibcugraph_available:
35 raise RuntimeError('pylibcugraph is not available')
36
37 connection = connection.lower()
38 if connection not in ('weak', 'strong'):
39 raise ValueError("connection must be 'weak' or 'strong'")
40
41 if not directed:
42 connection = 'weak'
43
44 if csgraph.ndim != 2:
45 raise ValueError('graph should have two dimensions')
46
47 if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):
48 csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)
49 m, m1 = csgraph.shape
50 if m != m1:
51 raise ValueError('graph should be a square array')
52 if csgraph.nnz == 0:
53 return m, cupy.arange(m, dtype=csgraph.indices.dtype)
54 labels = cupy.empty(m, dtype=csgraph.indices.dtype)
55
56 if connection == 'strong':
57 pylibcugraph.strongly_connected_components(
58 offsets=csgraph.indptr, indices=csgraph.indices, weights=None,
59 num_verts=m, num_edges=csgraph.nnz, labels=labels)
60 else:
61 csgraph += csgraph.T
62 if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):
63 csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)
64 pylibcugraph.weakly_connected_components(
65 offsets=csgraph.indptr, indices=csgraph.indices, weights=None,
66 num_verts=m, num_edges=csgraph.nnz, labels=labels)
67 # Note: In the case of weak connection, cuGraph creates labels with a
68 # start number of 1, so decrement the label number.
69 labels -= 1
70
71 count = cupy.zeros((1,), dtype=csgraph.indices.dtype)
72 root_labels = cupy.empty((m,), dtype=csgraph.indices.dtype)
73 _cupy_count_components(labels, count, root_labels, size=m)
74 n = int(count[0])
75 if not return_labels:
76 return n
77 _cupy_adjust_labels(n, cupy.sort(root_labels[:n]), labels)
78 return n, labels
79
80
81 _cupy_count_components = cupy.ElementwiseKernel(
82 '',
83 'raw I labels, raw int32 count, raw int32 root_labels',
84 '''
85 int j = i;
86 while (j != labels[j]) { j = labels[j]; }
87 if (j != i) {
88 labels[i] = j;
89 } else {
90 int k = atomicAdd(&count[0], 1);
91 root_labels[k] = i;
92 }
93 ''',
94 '_cupy_count_components')
95
96
97 _cupy_adjust_labels = cupy.ElementwiseKernel(
98 'int32 n_root_labels, raw I root_labels',
99 'I labels',
100 '''
101 int cur_label = labels;
102 int j_min = 0;
103 int j_max = n_root_labels - 1;
104 int j = (j_min + j_max) / 2;
105 while (j_min < j_max) {
106 if (cur_label == root_labels[j]) break;
107 if (cur_label < root_labels[j]) {
108 j_max = j - 1;
109 } else {
110 j_min = j + 1;
111 }
112 j = (j_min + j_max) / 2;
113 }
114 labels = j;
115 ''',
116 '_cupy_adjust_labels')
117
[end of cupyx/scipy/sparse/csgraph/_traversal.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupyx/scipy/sparse/csgraph/_traversal.py b/cupyx/scipy/sparse/csgraph/_traversal.py
--- a/cupyx/scipy/sparse/csgraph/_traversal.py
+++ b/cupyx/scipy/sparse/csgraph/_traversal.py
@@ -51,9 +51,9 @@
raise ValueError('graph should be a square array')
if csgraph.nnz == 0:
return m, cupy.arange(m, dtype=csgraph.indices.dtype)
- labels = cupy.empty(m, dtype=csgraph.indices.dtype)
if connection == 'strong':
+ labels = cupy.empty(m, dtype=csgraph.indices.dtype)
pylibcugraph.strongly_connected_components(
offsets=csgraph.indptr, indices=csgraph.indices, weights=None,
num_verts=m, num_edges=csgraph.nnz, labels=labels)
@@ -61,12 +61,15 @@
csgraph += csgraph.T
if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):
csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)
- pylibcugraph.weakly_connected_components(
- offsets=csgraph.indptr, indices=csgraph.indices, weights=None,
- num_verts=m, num_edges=csgraph.nnz, labels=labels)
- # Note: In the case of weak connection, cuGraph creates labels with a
- # start number of 1, so decrement the label number.
- labels -= 1
+ _, labels = pylibcugraph.weakly_connected_components(
+ resource_handle=None,
+ graph=None,
+ indices=csgraph.indices,
+ offsets=csgraph.indptr,
+ weights=None,
+ labels=None,
+ do_expensive_check=False,
+ )
count = cupy.zeros((1,), dtype=csgraph.indices.dtype)
root_labels = cupy.empty((m,), dtype=csgraph.indices.dtype)
| {"golden_diff": "diff --git a/cupyx/scipy/sparse/csgraph/_traversal.py b/cupyx/scipy/sparse/csgraph/_traversal.py\n--- a/cupyx/scipy/sparse/csgraph/_traversal.py\n+++ b/cupyx/scipy/sparse/csgraph/_traversal.py\n@@ -51,9 +51,9 @@\n raise ValueError('graph should be a square array')\n if csgraph.nnz == 0:\n return m, cupy.arange(m, dtype=csgraph.indices.dtype)\n- labels = cupy.empty(m, dtype=csgraph.indices.dtype)\n \n if connection == 'strong':\n+ labels = cupy.empty(m, dtype=csgraph.indices.dtype)\n pylibcugraph.strongly_connected_components(\n offsets=csgraph.indptr, indices=csgraph.indices, weights=None,\n num_verts=m, num_edges=csgraph.nnz, labels=labels)\n@@ -61,12 +61,15 @@\n csgraph += csgraph.T\n if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):\n csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)\n- pylibcugraph.weakly_connected_components(\n- offsets=csgraph.indptr, indices=csgraph.indices, weights=None,\n- num_verts=m, num_edges=csgraph.nnz, labels=labels)\n- # Note: In the case of weak connection, cuGraph creates labels with a\n- # start number of 1, so decrement the label number.\n- labels -= 1\n+ _, labels = pylibcugraph.weakly_connected_components(\n+ resource_handle=None,\n+ graph=None,\n+ indices=csgraph.indices,\n+ offsets=csgraph.indptr,\n+ weights=None,\n+ labels=None,\n+ do_expensive_check=False,\n+ )\n \n count = cupy.zeros((1,), dtype=csgraph.indices.dtype)\n root_labels = cupy.empty((m,), dtype=csgraph.indices.dtype)\n", "issue": "Fix `cupyx.scipy.sparse.csgraph` module to work with the latest RAPIDS cuGraph\nFollow the API change introduced in RAPIDS 22.12.\r\nhttps://github.com/cupy/cupy/pull/7647#discussion_r1244820097\r\n\r\ncc/ @pentschev \n", "before_files": [{"content": "import cupy\nimport cupyx.scipy.sparse\ntry:\n import pylibcugraph\n pylibcugraph_available = True\nexcept ModuleNotFoundError:\n pylibcugraph_available = False\n\n\ndef connected_components(csgraph, directed=True, connection='weak',\n return_labels=True):\n \"\"\"Analyzes the connected components of a sparse graph\n\n Args:\n csgraph (cupy.ndarray of cupyx.scipy.sparse.csr_matrix): The adjacency\n matrix representing connectivity among nodes.\n directed (bool): If ``True``, it operates on a directed graph. If\n ``False``, it operates on an undirected graph.\n connection (str): ``'weak'`` or ``'strong'``. For directed graphs, the\n type of connection to use. Nodes i and j are \"strongly\" connected\n only when a path exists both from i to j and from j to i.\n If ``directed`` is ``False``, this argument is ignored.\n return_labels (bool): If ``True``, it returns the labels for each of\n the connected components.\n\n Returns:\n tuple of int and cupy.ndarray, or int:\n If ``return_labels`` == ``True``, returns a tuple ``(n, labels)``,\n where ``n`` is the number of connected components and ``labels`` is\n labels of each connected components. Otherwise, returns ``n``.\n\n .. seealso:: :func:`scipy.sparse.csgraph.connected_components`\n \"\"\"\n if not pylibcugraph_available:\n raise RuntimeError('pylibcugraph is not available')\n\n connection = connection.lower()\n if connection not in ('weak', 'strong'):\n raise ValueError(\"connection must be 'weak' or 'strong'\")\n\n if not directed:\n connection = 'weak'\n\n if csgraph.ndim != 2:\n raise ValueError('graph should have two dimensions')\n\n if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):\n csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)\n m, m1 = csgraph.shape\n if m != m1:\n raise ValueError('graph should be a square array')\n if csgraph.nnz == 0:\n return m, cupy.arange(m, dtype=csgraph.indices.dtype)\n labels = cupy.empty(m, dtype=csgraph.indices.dtype)\n\n if connection == 'strong':\n pylibcugraph.strongly_connected_components(\n offsets=csgraph.indptr, indices=csgraph.indices, weights=None,\n num_verts=m, num_edges=csgraph.nnz, labels=labels)\n else:\n csgraph += csgraph.T\n if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):\n csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)\n pylibcugraph.weakly_connected_components(\n offsets=csgraph.indptr, indices=csgraph.indices, weights=None,\n num_verts=m, num_edges=csgraph.nnz, labels=labels)\n # Note: In the case of weak connection, cuGraph creates labels with a\n # start number of 1, so decrement the label number.\n labels -= 1\n\n count = cupy.zeros((1,), dtype=csgraph.indices.dtype)\n root_labels = cupy.empty((m,), dtype=csgraph.indices.dtype)\n _cupy_count_components(labels, count, root_labels, size=m)\n n = int(count[0])\n if not return_labels:\n return n\n _cupy_adjust_labels(n, cupy.sort(root_labels[:n]), labels)\n return n, labels\n\n\n_cupy_count_components = cupy.ElementwiseKernel(\n '',\n 'raw I labels, raw int32 count, raw int32 root_labels',\n '''\n int j = i;\n while (j != labels[j]) { j = labels[j]; }\n if (j != i) {\n labels[i] = j;\n } else {\n int k = atomicAdd(&count[0], 1);\n root_labels[k] = i;\n }\n ''',\n '_cupy_count_components')\n\n\n_cupy_adjust_labels = cupy.ElementwiseKernel(\n 'int32 n_root_labels, raw I root_labels',\n 'I labels',\n '''\n int cur_label = labels;\n int j_min = 0;\n int j_max = n_root_labels - 1;\n int j = (j_min + j_max) / 2;\n while (j_min < j_max) {\n if (cur_label == root_labels[j]) break;\n if (cur_label < root_labels[j]) {\n j_max = j - 1;\n } else {\n j_min = j + 1;\n }\n j = (j_min + j_max) / 2;\n }\n labels = j;\n ''',\n '_cupy_adjust_labels')\n", "path": "cupyx/scipy/sparse/csgraph/_traversal.py"}]} | 1,918 | 440 |
gh_patches_debug_2892 | rasdani/github-patches | git_diff | joke2k__faker-435 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Published packages include docs/ as a module
The published wheel and sdist on PyPI for at least version 0.7.5 include `docs/__init__.py` as a top-level module in addition to `faker`. This conflicts with some other packages we use (PyICU) and seems like bad package hygiene, especially since the `docs` dir in this repository is definitely not a module. My guess is that a `__init__.py` made it in there on the maintainer's machine before running `setup.py` and it was erroneously discovered as a module.
We're going to republish the package to our own internal repository, but I think it would help the community to `git clean` as necessary and re-publish a new version, and consider adding necessary exclusions to the `setup.py` or `MANIFEST.in`.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # coding=utf-8
3
4 import os
5 import io
6
7 from setuptools import setup, find_packages
8
9 here = os.path.abspath(os.path.dirname(__file__))
10 README = io.open(os.path.join(here, 'README.rst'), encoding="utf8").read()
11
12
13 version = '0.7.5'
14
15 # this module can be zip-safe if the zipimporter implements iter_modules or if
16 # pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
17 try:
18 import pkgutil
19 import zipimport
20 zip_safe = hasattr(zipimport.zipimporter, "iter_modules") or \
21 zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
22 except (ImportError, AttributeError):
23 zip_safe = False
24
25 setup(
26 name='Faker',
27 version=version,
28 description="Faker is a Python package that generates fake data for you.",
29 long_description=README,
30 entry_points={
31 'console_scripts': ['faker=faker.cli:execute_from_command_line'],
32 },
33 classifiers=[
34 # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
35 'Development Status :: 3 - Alpha',
36 'Environment :: Console',
37 'Intended Audience :: Developers',
38 'Programming Language :: Python',
39 'Programming Language :: Python :: 2',
40 'Programming Language :: Python :: 2.7',
41 'Programming Language :: Python :: 3',
42 'Programming Language :: Python :: 3.4',
43 'Programming Language :: Python :: 3.5',
44 'Topic :: Software Development :: Libraries :: Python Modules',
45 'Topic :: Software Development :: Testing',
46 'Topic :: Utilities',
47 'License :: OSI Approved :: MIT License'
48 ],
49 keywords='faker fixtures data test mock generator',
50 author='joke2k',
51 author_email='[email protected]',
52 url='https://github.com/joke2k/faker',
53 license='MIT License',
54 packages=find_packages(),
55 platforms=["any"],
56 test_suite='faker.tests',
57 zip_safe=zip_safe,
58 install_requires=[
59 "python-dateutil>=2.4",
60 "six",
61 ],
62 extras_require={
63 ':python_version=="2.7"': [
64 'ipaddress',
65 ],
66 ':python_version=="3.0"': [
67 'importlib',
68 ],
69 ':python_version=="3.2"': [
70 'ipaddress',
71 ],
72 }
73 )
74
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -51,7 +51,7 @@
author_email='[email protected]',
url='https://github.com/joke2k/faker',
license='MIT License',
- packages=find_packages(),
+ packages=find_packages(exclude=("docs",)),
platforms=["any"],
test_suite='faker.tests',
zip_safe=zip_safe,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -51,7 +51,7 @@\n author_email='[email protected]',\n url='https://github.com/joke2k/faker',\n license='MIT License',\n- packages=find_packages(),\n+ packages=find_packages(exclude=(\"docs\",)),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n", "issue": "Published packages include docs/ as a module\nThe published wheel and sdist on PyPI for at least version 0.7.5 include `docs/__init__.py` as a top-level module in addition to `faker`. This conflicts with some other packages we use (PyICU) and seems like bad package hygiene, especially since the `docs` dir in this repository is definitely not a module. My guess is that a `__init__.py` made it in there on the maintainer's machine before running `setup.py` and it was erroneously discovered as a module.\r\n\r\nWe're going to republish the package to our own internal repository, but I think it would help the community to `git clean` as necessary and re-publish a new version, and consider adding necessary exclusions to the `setup.py` or `MANIFEST.in`.\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport io\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = io.open(os.path.join(here, 'README.rst'), encoding=\"utf8\").read()\n\n\nversion = '0.7.5'\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='Faker',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README,\n entry_points={\n 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n },\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='https://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n install_requires=[\n \"python-dateutil>=2.4\",\n \"six\",\n ],\n extras_require={\n ':python_version==\"2.7\"': [\n 'ipaddress',\n ],\n ':python_version==\"3.0\"': [\n 'importlib',\n ],\n ':python_version==\"3.2\"': [\n 'ipaddress',\n ],\n }\n)\n", "path": "setup.py"}]} | 1,372 | 100 |
gh_patches_debug_13673 | rasdani/github-patches | git_diff | meltano__meltano-6779 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: exit code null on snowplow telemetry
In investigating https://github.com/meltano/internal-data/issues/26 I saw a large increase in bad events. Looks like all of the ones from 2.6.0 and 2.5.0 are from:
`$.exit_code: null found, integer expected`
And I dove in on each one and it's from `add` and `discover` events.
queried using:
```sql
select *
from "RAW"."SNOWPLOW"."EVENTS_BAD"
where date_trunc('week', uploaded_at) > '2022-08-22'
and jsontext ilike '%2.5.0%';
```
</issue>
<code>
[start of src/meltano/core/tracking/schemas.py]
1 """Meltano Iglu schemas metadata & utilities."""
2
3 from __future__ import annotations
4
5 from dataclasses import dataclass
6
7 DEFAULT_VENDOR = "com.meltano"
8
9
10 @dataclass
11 class IgluSchema:
12 """Dataclass to store the name, version, vendor, and URL for an Iglu schema."""
13
14 name: str
15 version: str
16 vendor: str = DEFAULT_VENDOR
17
18 @property
19 def url(self) -> str:
20 """Construct an iglu schema URL.
21
22 Returns:
23 The URL to the schema.
24 """
25 return f"iglu:{self.vendor}/{self.name}/jsonschema/{self.version}"
26
27
28 CliContextSchema = IgluSchema("cli_context", "1-1-0")
29 CliEventSchema = IgluSchema("cli_event", "1-0-1")
30 BlockEventSchema = IgluSchema("block_event", "1-0-0")
31 EnvironmentContextSchema = IgluSchema("environment_context", "1-0-0")
32 ExceptionContextSchema = IgluSchema("exception_context", "1-0-0")
33 ExitEventSchema = IgluSchema("exit_event", "1-0-0")
34 PluginsContextSchema = IgluSchema("plugins_context", "1-0-0")
35 ProjectContextSchema = IgluSchema("project_context", "1-1-0")
36 TelemetryStateChangeEventSchema = IgluSchema("telemetry_state_change_event", "1-0-0")
37
[end of src/meltano/core/tracking/schemas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/meltano/core/tracking/schemas.py b/src/meltano/core/tracking/schemas.py
--- a/src/meltano/core/tracking/schemas.py
+++ b/src/meltano/core/tracking/schemas.py
@@ -30,7 +30,7 @@
BlockEventSchema = IgluSchema("block_event", "1-0-0")
EnvironmentContextSchema = IgluSchema("environment_context", "1-0-0")
ExceptionContextSchema = IgluSchema("exception_context", "1-0-0")
-ExitEventSchema = IgluSchema("exit_event", "1-0-0")
+ExitEventSchema = IgluSchema("exit_event", "1-0-1")
PluginsContextSchema = IgluSchema("plugins_context", "1-0-0")
ProjectContextSchema = IgluSchema("project_context", "1-1-0")
TelemetryStateChangeEventSchema = IgluSchema("telemetry_state_change_event", "1-0-0")
| {"golden_diff": "diff --git a/src/meltano/core/tracking/schemas.py b/src/meltano/core/tracking/schemas.py\n--- a/src/meltano/core/tracking/schemas.py\n+++ b/src/meltano/core/tracking/schemas.py\n@@ -30,7 +30,7 @@\n BlockEventSchema = IgluSchema(\"block_event\", \"1-0-0\")\n EnvironmentContextSchema = IgluSchema(\"environment_context\", \"1-0-0\")\n ExceptionContextSchema = IgluSchema(\"exception_context\", \"1-0-0\")\n-ExitEventSchema = IgluSchema(\"exit_event\", \"1-0-0\")\n+ExitEventSchema = IgluSchema(\"exit_event\", \"1-0-1\")\n PluginsContextSchema = IgluSchema(\"plugins_context\", \"1-0-0\")\n ProjectContextSchema = IgluSchema(\"project_context\", \"1-1-0\")\n TelemetryStateChangeEventSchema = IgluSchema(\"telemetry_state_change_event\", \"1-0-0\")\n", "issue": "bug: exit code null on snowplow telemetry\nIn investigating https://github.com/meltano/internal-data/issues/26 I saw a large increase in bad events. Looks like all of the ones from 2.6.0 and 2.5.0 are from:\r\n\r\n`$.exit_code: null found, integer expected`\r\n\r\nAnd I dove in on each one and it's from `add` and `discover` events.\r\n\r\nqueried using:\r\n\r\n```sql\r\nselect *\r\nfrom \"RAW\".\"SNOWPLOW\".\"EVENTS_BAD\"\r\nwhere date_trunc('week', uploaded_at) > '2022-08-22'\r\nand jsontext ilike '%2.5.0%';\r\n```\n", "before_files": [{"content": "\"\"\"Meltano Iglu schemas metadata & utilities.\"\"\"\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\nDEFAULT_VENDOR = \"com.meltano\"\n\n\n@dataclass\nclass IgluSchema:\n \"\"\"Dataclass to store the name, version, vendor, and URL for an Iglu schema.\"\"\"\n\n name: str\n version: str\n vendor: str = DEFAULT_VENDOR\n\n @property\n def url(self) -> str:\n \"\"\"Construct an iglu schema URL.\n\n Returns:\n The URL to the schema.\n \"\"\"\n return f\"iglu:{self.vendor}/{self.name}/jsonschema/{self.version}\"\n\n\nCliContextSchema = IgluSchema(\"cli_context\", \"1-1-0\")\nCliEventSchema = IgluSchema(\"cli_event\", \"1-0-1\")\nBlockEventSchema = IgluSchema(\"block_event\", \"1-0-0\")\nEnvironmentContextSchema = IgluSchema(\"environment_context\", \"1-0-0\")\nExceptionContextSchema = IgluSchema(\"exception_context\", \"1-0-0\")\nExitEventSchema = IgluSchema(\"exit_event\", \"1-0-0\")\nPluginsContextSchema = IgluSchema(\"plugins_context\", \"1-0-0\")\nProjectContextSchema = IgluSchema(\"project_context\", \"1-1-0\")\nTelemetryStateChangeEventSchema = IgluSchema(\"telemetry_state_change_event\", \"1-0-0\")\n", "path": "src/meltano/core/tracking/schemas.py"}]} | 1,085 | 224 |
gh_patches_debug_5224 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-146 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PyYAML security alert
Our application is getting a GitHub security alert from PyYAML, and this is the only package that has it as a dependency in our graph. It looks like this package no longer uses that package, but it is still declared as a dependency.
If this assessment is correct, the dependency should be removed from the `setup.py` and a new release upload to PyPI.
https://nvd.nist.gov/vuln/detail/CVE-2017-18342
</issue>
<code>
[start of setup.py]
1 import os
2 import sys
3 from glob import glob
4
5 from setuptools import Extension, find_packages, setup
6
7 long_description = (
8 "Scout Application Performance Monitoring Agent - https://scoutapp.com"
9 )
10 if os.path.exists("README.md"):
11 long_description = open("README.md").read()
12
13 # Try to compile the extensions, except for platforms or versions
14 # where our extensions are not supported
15 compile_extensions = True
16
17 setup_args = {
18 "name": "scout_apm",
19 "version": "2.0.0",
20 "description": "Scout Application Performance Monitoring Agent",
21 "long_description": long_description,
22 "long_description_content_type": "text/markdown",
23 "url": "https://github.com/scoutapp/scout_apm_python",
24 "author": "Scout",
25 "author_email": "[email protected]",
26 "license": "MIT",
27 "zip_safe": False,
28 "python_requires": ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
29 "packages": find_packages("src"),
30 "package_dir": {"": "src"},
31 "py_modules": [os.splitext(os.basename(path))[0] for path in glob("src/*.py")],
32 "ext_modules": [
33 Extension("scout_apm.core.objtrace", ["src/scout_apm/core/ext/objtrace.c"])
34 ],
35 "entry_points": {
36 "console_scripts": [
37 "core-agent-manager = scout_apm.core.cli.core_agent_manager:main"
38 ]
39 },
40 "install_requires": ["psutil", "PyYAML", "requests"],
41 "keywords": "apm performance monitoring development",
42 "classifiers": [
43 "Development Status :: 5 - Production/Stable",
44 "Intended Audience :: Developers",
45 "Topic :: System :: Monitoring",
46 "License :: OSI Approved :: MIT License",
47 "Operating System :: MacOS",
48 "Operating System :: POSIX",
49 "Operating System :: POSIX :: Linux",
50 "Programming Language :: Python :: 2",
51 "Programming Language :: Python :: 2.7",
52 "Programming Language :: Python :: 3",
53 "Programming Language :: Python :: 3.4",
54 "Programming Language :: Python :: 3.5",
55 "Programming Language :: Python :: 3.6",
56 "Programming Language :: Python :: 3.7",
57 ],
58 }
59
60 if sys.version_info <= (3, 0):
61 compile_extensions = False
62
63 if sys.platform.startswith("java"):
64 compile_extensions = False
65
66 if "__pypy__" in sys.builtin_module_names:
67 compile_extensions = False
68
69 if not compile_extensions:
70 del setup_args["ext_modules"]
71
72 setup(**setup_args)
73
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -37,7 +37,7 @@
"core-agent-manager = scout_apm.core.cli.core_agent_manager:main"
]
},
- "install_requires": ["psutil", "PyYAML", "requests"],
+ "install_requires": ["psutil", "requests"],
"keywords": "apm performance monitoring development",
"classifiers": [
"Development Status :: 5 - Production/Stable",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,7 +37,7 @@\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n- \"install_requires\": [\"psutil\", \"PyYAML\", \"requests\"],\n+ \"install_requires\": [\"psutil\", \"requests\"],\n \"keywords\": \"apm performance monitoring development\",\n \"classifiers\": [\n \"Development Status :: 5 - Production/Stable\",\n", "issue": "PyYAML security alert\nOur application is getting a GitHub security alert from PyYAML, and this is the only package that has it as a dependency in our graph. It looks like this package no longer uses that package, but it is still declared as a dependency.\r\n\r\nIf this assessment is correct, the dependency should be removed from the `setup.py` and a new release upload to PyPI.\r\n\r\nhttps://nvd.nist.gov/vuln/detail/CVE-2017-18342\n", "before_files": [{"content": "import os\nimport sys\nfrom glob import glob\n\nfrom setuptools import Extension, find_packages, setup\n\nlong_description = (\n \"Scout Application Performance Monitoring Agent - https://scoutapp.com\"\n)\nif os.path.exists(\"README.md\"):\n long_description = open(\"README.md\").read()\n\n# Try to compile the extensions, except for platforms or versions\n# where our extensions are not supported\ncompile_extensions = True\n\nsetup_args = {\n \"name\": \"scout_apm\",\n \"version\": \"2.0.0\",\n \"description\": \"Scout Application Performance Monitoring Agent\",\n \"long_description\": long_description,\n \"long_description_content_type\": \"text/markdown\",\n \"url\": \"https://github.com/scoutapp/scout_apm_python\",\n \"author\": \"Scout\",\n \"author_email\": \"[email protected]\",\n \"license\": \"MIT\",\n \"zip_safe\": False,\n \"python_requires\": \">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n \"packages\": find_packages(\"src\"),\n \"package_dir\": {\"\": \"src\"},\n \"py_modules\": [os.splitext(os.basename(path))[0] for path in glob(\"src/*.py\")],\n \"ext_modules\": [\n Extension(\"scout_apm.core.objtrace\", [\"src/scout_apm/core/ext/objtrace.c\"])\n ],\n \"entry_points\": {\n \"console_scripts\": [\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n \"install_requires\": [\"psutil\", \"PyYAML\", \"requests\"],\n \"keywords\": \"apm performance monitoring development\",\n \"classifiers\": [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Topic :: System :: Monitoring\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n}\n\nif sys.version_info <= (3, 0):\n compile_extensions = False\n\nif sys.platform.startswith(\"java\"):\n compile_extensions = False\n\nif \"__pypy__\" in sys.builtin_module_names:\n compile_extensions = False\n\nif not compile_extensions:\n del setup_args[\"ext_modules\"]\n\nsetup(**setup_args)\n", "path": "setup.py"}]} | 1,366 | 114 |
gh_patches_debug_4897 | rasdani/github-patches | git_diff | bridgecrewio__checkov-592 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_GCP_29 - Checks failed after GCP resource renamed
**Describe the bug**
Google has renamed the value
`bucket_policy_only ` to `uniform_bucket_level_access`.
When adding the new value in the configuration the check CKV_GCP_29 ( Ensure that Cloud Storage buckets have uniform bucket-level access enabled ) still fails as it is still looking for the old value
**To Reproduce**
Steps to reproduce the behavior:
1. On tearragoat, add the value `uniform_bucket_level_access = true` and the checks will still fail
**Expected behavior**
The check should pass.
</issue>
<code>
[start of checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py]
1 from checkov.common.models.enums import CheckCategories
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3
4
5 class GoogleStorageBucketUniformAccess(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure that Cloud Storage buckets have uniform bucket-level access enabled"
8 id = "CKV_GCP_29"
9 supported_resources = ['google_storage_bucket']
10 categories = [CheckCategories.GENERAL_SECURITY]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return 'bucket_policy_only/[0]'
15
16
17 check = GoogleStorageBucketUniformAccess()
18
[end of checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py b/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py
--- a/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py
+++ b/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py
@@ -11,7 +11,7 @@
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
- return 'bucket_policy_only/[0]'
+ return 'uniform_bucket_level_access/[0]/bucket_policy_only/[0]'
check = GoogleStorageBucketUniformAccess()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py b/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py\n--- a/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py\n+++ b/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py\n@@ -11,7 +11,7 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def get_inspected_key(self):\n- return 'bucket_policy_only/[0]'\n+ return 'uniform_bucket_level_access/[0]/bucket_policy_only/[0]'\n \n \n check = GoogleStorageBucketUniformAccess()\n", "issue": "CKV_GCP_29 - Checks failed after GCP resource renamed\n**Describe the bug**\r\nGoogle has renamed the value\r\n`bucket_policy_only ` to `uniform_bucket_level_access`.\r\n\r\nWhen adding the new value in the configuration the check CKV_GCP_29 ( Ensure that Cloud Storage buckets have uniform bucket-level access enabled ) still fails as it is still looking for the old value\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. On tearragoat, add the value `uniform_bucket_level_access = true` and the checks will still fail\r\n\r\n\r\n**Expected behavior**\r\nThe check should pass.\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass GoogleStorageBucketUniformAccess(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Cloud Storage buckets have uniform bucket-level access enabled\"\n id = \"CKV_GCP_29\"\n supported_resources = ['google_storage_bucket']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'bucket_policy_only/[0]'\n\n\ncheck = GoogleStorageBucketUniformAccess()\n", "path": "checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py"}]} | 860 | 155 |
gh_patches_debug_20051 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-1111 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Harris Teeter
Mostly southeastern https://www.harristeeter.com/store/#/app/store-locator
</issue>
<code>
[start of locations/spiders/harristeeter.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 import re
5
6 from locations.items import GeojsonPointItem
7
8 DAYS = ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su']
9
10
11 class HarristeeterSpider(scrapy.Spider):
12 name = "harristeeter"
13 allowed_domains = ["harristeeter.com"]
14 start_urls = (
15 'https://www.harristeeter.com/store/#/app/store-locator',
16 )
17
18 handle_httpstatus_list = [401]
19 custom_settings = {
20 'DEFAULT_REQUEST_HEADERS' : {
21 'Accept': 'application/json, text/plain, */*',
22 'Accept-Encoding': 'gzip, deflate, br',
23 'Connection': 'keep-alive',
24 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
25 }
26 }
27
28
29 def store_hours(self, store_hours):
30 res=''
31 for day in store_hours:
32 match = re.search(r'(\w*)(\s*-\s*(\w*))?\s*(\d{1,2})(:(\d{1,2}))?\s*(am|pm|mp)?\s*-\s*(\d{1,2})(:(\d{1,2}))?\s*(am|pm|mp)',day.replace('Midnight','12:00pm'))
33
34 if not match:
35 continue
36 res += match[1][:2]
37
38 try:
39 res += match[2].replace(' ','')[:3]+' '
40 except Exception:
41 res += ' '
42
43 if match[5]:
44 first_minutes = match[5]
45 else:
46 first_minutes = ':00'
47
48 if match[9]:
49 second_minutes = match[9]
50 else:
51 second_minutes = ':00'
52
53 res += str(int(match[4])+(12 if match[7] in ['pm','mp'] else 0)) +first_minutes+'-'
54 res += str(int(match[8])+(12 if match[10] in ['pm','mp'] else 0)) +second_minutes+';'
55
56 return res.rstrip(';').strip()
57
58 def parse(self, response):
59 yield scrapy.Request('https://www.harristeeter.com/api/checkLogin',
60 method='POST',
61 callback=self.check_login)
62
63
64 def check_login(self, response):
65
66 yield scrapy.Request(
67 'https://www.harristeeter.com/store/#/app/store-locator',
68 callback=self.get_store_locator)
69
70 def get_store_locator(self, response):
71
72 yield scrapy.Request(
73 'https://www.harristeeter.com/api/v1/stores/search?Address=98011&Radius=20000&AllStores=true',
74 callback=self.parse_shop
75 )
76
77 def parse_shop(self, response):
78 shops = json.loads(response.text)['Data']
79
80 for shop in shops:
81 props = {
82 'opening_hours': shop['StoreHours'].replace('Open 24 hrs', 'Mo-Su 0:00-24:00'),
83 'phone': shop['Telephone'],
84 'country': shop['Country'],
85 'ref': shop['Title'],
86 'addr_full': shop['Street'],
87 'postcode': shop.get('ZipCode'),
88 'city': shop.get('City'),
89 'state': shop.get('State'),
90 'lat': float(shop['Latitude']),
91 'lon': float(shop['Longitude']),
92 }
93
94 yield GeojsonPointItem(**props)
95
[end of locations/spiders/harristeeter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/harristeeter.py b/locations/spiders/harristeeter.py
--- a/locations/spiders/harristeeter.py
+++ b/locations/spiders/harristeeter.py
@@ -79,16 +79,17 @@
for shop in shops:
props = {
- 'opening_hours': shop['StoreHours'].replace('Open 24 hrs', 'Mo-Su 0:00-24:00'),
- 'phone': shop['Telephone'],
- 'country': shop['Country'],
- 'ref': shop['Title'],
+ 'ref': shop['StoreNumber'],
'addr_full': shop['Street'],
- 'postcode': shop.get('ZipCode'),
'city': shop.get('City'),
'state': shop.get('State'),
+ 'postcode': shop.get('PostalCode'),
+ 'country': shop['Country'],
+ 'name': shop['StoreName'],
+ 'phone': shop['Telephone'],
'lat': float(shop['Latitude']),
'lon': float(shop['Longitude']),
+ 'opening_hours': shop['StoreHours'].replace('Open 24 Hours', 'Mo-Su 0:00-24:00')
}
yield GeojsonPointItem(**props)
| {"golden_diff": "diff --git a/locations/spiders/harristeeter.py b/locations/spiders/harristeeter.py\n--- a/locations/spiders/harristeeter.py\n+++ b/locations/spiders/harristeeter.py\n@@ -79,16 +79,17 @@\n \n for shop in shops:\n props = {\n- 'opening_hours': shop['StoreHours'].replace('Open 24 hrs', 'Mo-Su 0:00-24:00'),\n- 'phone': shop['Telephone'],\n- 'country': shop['Country'],\n- 'ref': shop['Title'],\n+ 'ref': shop['StoreNumber'],\n 'addr_full': shop['Street'],\n- 'postcode': shop.get('ZipCode'),\n 'city': shop.get('City'),\n 'state': shop.get('State'),\n+ 'postcode': shop.get('PostalCode'),\n+ 'country': shop['Country'],\n+ 'name': shop['StoreName'],\n+ 'phone': shop['Telephone'],\n 'lat': float(shop['Latitude']),\n 'lon': float(shop['Longitude']),\n+ 'opening_hours': shop['StoreHours'].replace('Open 24 Hours', 'Mo-Su 0:00-24:00')\n }\n \n yield GeojsonPointItem(**props)\n", "issue": "Harris Teeter\nMostly southeastern https://www.harristeeter.com/store/#/app/store-locator\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\n\nDAYS = ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su']\n\n\nclass HarristeeterSpider(scrapy.Spider):\n name = \"harristeeter\"\n allowed_domains = [\"harristeeter.com\"]\n start_urls = (\n 'https://www.harristeeter.com/store/#/app/store-locator',\n )\n\n handle_httpstatus_list = [401]\n custom_settings = {\n 'DEFAULT_REQUEST_HEADERS' : {\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',\n }\n }\n\n\n def store_hours(self, store_hours):\n res=''\n for day in store_hours:\n match = re.search(r'(\\w*)(\\s*-\\s*(\\w*))?\\s*(\\d{1,2})(:(\\d{1,2}))?\\s*(am|pm|mp)?\\s*-\\s*(\\d{1,2})(:(\\d{1,2}))?\\s*(am|pm|mp)',day.replace('Midnight','12:00pm'))\n\n if not match:\n continue\n res += match[1][:2]\n\n try:\n res += match[2].replace(' ','')[:3]+' '\n except Exception:\n res += ' '\n\n if match[5]:\n first_minutes = match[5]\n else:\n first_minutes = ':00'\n\n if match[9]:\n second_minutes = match[9]\n else:\n second_minutes = ':00'\n\n res += str(int(match[4])+(12 if match[7] in ['pm','mp'] else 0)) +first_minutes+'-'\n res += str(int(match[8])+(12 if match[10] in ['pm','mp'] else 0)) +second_minutes+';'\n\n return res.rstrip(';').strip()\n\n def parse(self, response):\n yield scrapy.Request('https://www.harristeeter.com/api/checkLogin',\n method='POST',\n callback=self.check_login)\n\n\n def check_login(self, response):\n\n yield scrapy.Request(\n 'https://www.harristeeter.com/store/#/app/store-locator',\n callback=self.get_store_locator)\n\n def get_store_locator(self, response):\n\n yield scrapy.Request(\n 'https://www.harristeeter.com/api/v1/stores/search?Address=98011&Radius=20000&AllStores=true',\n callback=self.parse_shop\n )\n\n def parse_shop(self, response):\n shops = json.loads(response.text)['Data']\n\n for shop in shops:\n props = {\n 'opening_hours': shop['StoreHours'].replace('Open 24 hrs', 'Mo-Su 0:00-24:00'),\n 'phone': shop['Telephone'],\n 'country': shop['Country'],\n 'ref': shop['Title'],\n 'addr_full': shop['Street'],\n 'postcode': shop.get('ZipCode'),\n 'city': shop.get('City'),\n 'state': shop.get('State'),\n 'lat': float(shop['Latitude']),\n 'lon': float(shop['Longitude']),\n }\n\n yield GeojsonPointItem(**props)\n", "path": "locations/spiders/harristeeter.py"}]} | 1,563 | 290 |
gh_patches_debug_17150 | rasdani/github-patches | git_diff | Kinto__kinto-492 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
kinto start takes too much time.
It can sometimes take more than 2 seconds, as reported by @n1k0 on JS clients integration tests.
I investigated a bit and found out, that on my machine, loading the entrypoint takes more than 1 second already:
```
$ time python -c "from pkg_resources import load_entry_point; load_entry_point('kinto', 'console_scripts', 'kinto')"
python -c 0,96s user 0,16s system 99% cpu 1,132 total
```
In comparison, `pserve` takes 200msec:
```
$ time python -c "from pkg_resources import load_entry_point; load_entry_point('pyramid', 'console_scripts', 'pcreate')"
python -c 0,18s user 0,09s system 98% cpu 0,272 total
```
I realized that moving `import requests` from `cliquet.initialization` imports [PR](https://github.com/mozilla-services/cliquet/pull/674), and remove `import pip` from `kinto.__main__` I could reduce by half:
```
$ time python -c "from pkg_resources import load_entry_point; load_entry_point('kinto', 'console_scripts', 'kinto')"
python -c 0,36s user 0,18s system 98% cpu 0,543 total
```
I knew this was not going to speed up the `kinto start` command too much. I tracked down and noticed the `__main__:main` was executed twice because of `--reload` argument.
</issue>
<code>
[start of kinto/__main__.py]
1 from __future__ import print_function
2 import argparse
3 import os
4 import sys
5
6 from six.moves import input
7 from cliquet.scripts import cliquet
8 from pyramid.scripts import pserve
9 from pyramid.paster import bootstrap
10 from kinto import __version__
11 from kinto.config import init
12
13 CONFIG_FILE = 'config/kinto.ini'
14
15
16 def main(args=None):
17 """The main routine."""
18 if args is None:
19 args = sys.argv[1:]
20
21 parser = argparse.ArgumentParser(description="Kinto commands")
22 parser.add_argument('--ini',
23 help='Application configuration file',
24 dest='ini_file',
25 required=False,
26 default=CONFIG_FILE)
27 parser.add_argument('--backend',
28 help='Specify backend',
29 dest='backend',
30 required=False,
31 default=None)
32
33 parser.add_argument('-v', '--version',
34 action='version', version=__version__,
35 help='Print the Kinto version and exit.')
36
37 subparsers = parser.add_subparsers(title='subcommands',
38 description='valid subcommands',
39 help='init/start/migrate')
40
41 parser_init = subparsers.add_parser('init')
42 parser_init.set_defaults(which='init')
43
44 parser_migrate = subparsers.add_parser('migrate')
45 parser_migrate.set_defaults(which='migrate')
46
47 parser_start = subparsers.add_parser('start')
48 parser_start.set_defaults(which='start')
49
50 args = vars(parser.parse_args())
51 config_file = args['ini_file']
52
53 if args['which'] == 'init':
54 if os.path.exists(config_file):
55 print("%s already exist." % config_file, file=sys.stderr)
56 sys.exit(1)
57
58 backend = args['backend']
59 if not backend:
60 while True:
61 prompt = ("Select the backend you would like to use: "
62 "(1 - postgresql, 2 - redis, default - memory) ")
63 answer = input(prompt).strip()
64 try:
65 backends = {"1": "postgresql", "2": "redis", "": "memory"}
66 backend = backends[answer]
67 break
68 except KeyError:
69 pass
70
71 init(config_file, backend)
72
73 # Install postgresql libraries if necessary
74 if backend == "postgresql":
75 try:
76 import psycopg2 # NOQA
77 except ImportError:
78 import pip
79 pip.main(['install', "cliquet[postgresql]"])
80
81 elif args['which'] == 'migrate':
82 env = bootstrap(config_file)
83 cliquet.init_schema(env)
84
85 elif args['which'] == 'start':
86 pserve_argv = ['pserve', config_file, '--reload']
87 pserve.main(pserve_argv)
88
89
90 if __name__ == "__main__":
91 main()
92
[end of kinto/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/__main__.py b/kinto/__main__.py
--- a/kinto/__main__.py
+++ b/kinto/__main__.py
@@ -45,6 +45,11 @@
parser_migrate.set_defaults(which='migrate')
parser_start = subparsers.add_parser('start')
+ parser_start.add_argument('--reload',
+ action='store_true',
+ help='Restart when code or config changes',
+ required=False,
+ default=False)
parser_start.set_defaults(which='start')
args = vars(parser.parse_args())
@@ -83,7 +88,9 @@
cliquet.init_schema(env)
elif args['which'] == 'start':
- pserve_argv = ['pserve', config_file, '--reload']
+ pserve_argv = ['pserve', config_file]
+ if args['reload']:
+ pserve_argv.append('--reload')
pserve.main(pserve_argv)
| {"golden_diff": "diff --git a/kinto/__main__.py b/kinto/__main__.py\n--- a/kinto/__main__.py\n+++ b/kinto/__main__.py\n@@ -45,6 +45,11 @@\n parser_migrate.set_defaults(which='migrate')\n \n parser_start = subparsers.add_parser('start')\n+ parser_start.add_argument('--reload',\n+ action='store_true',\n+ help='Restart when code or config changes',\n+ required=False,\n+ default=False)\n parser_start.set_defaults(which='start')\n \n args = vars(parser.parse_args())\n@@ -83,7 +88,9 @@\n cliquet.init_schema(env)\n \n elif args['which'] == 'start':\n- pserve_argv = ['pserve', config_file, '--reload']\n+ pserve_argv = ['pserve', config_file]\n+ if args['reload']:\n+ pserve_argv.append('--reload')\n pserve.main(pserve_argv)\n", "issue": "kinto start takes too much time.\nIt can sometimes take more than 2 seconds, as reported by @n1k0 on JS clients integration tests.\n\nI investigated a bit and found out, that on my machine, loading the entrypoint takes more than 1 second already:\n\n```\n$ time python -c \"from pkg_resources import load_entry_point; load_entry_point('kinto', 'console_scripts', 'kinto')\"\npython -c 0,96s user 0,16s system 99% cpu 1,132 total\n```\n\nIn comparison, `pserve` takes 200msec: \n\n```\n$ time python -c \"from pkg_resources import load_entry_point; load_entry_point('pyramid', 'console_scripts', 'pcreate')\"\npython -c 0,18s user 0,09s system 98% cpu 0,272 total\n```\n\nI realized that moving `import requests` from `cliquet.initialization` imports [PR](https://github.com/mozilla-services/cliquet/pull/674), and remove `import pip` from `kinto.__main__` I could reduce by half:\n\n```\n$ time python -c \"from pkg_resources import load_entry_point; load_entry_point('kinto', 'console_scripts', 'kinto')\"\npython -c 0,36s user 0,18s system 98% cpu 0,543 total\n```\n\nI knew this was not going to speed up the `kinto start` command too much. I tracked down and noticed the `__main__:main` was executed twice because of `--reload` argument.\n\n", "before_files": [{"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\n\nfrom six.moves import input\nfrom cliquet.scripts import cliquet\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nCONFIG_FILE = 'config/kinto.ini'\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto commands\")\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=CONFIG_FILE)\n parser.add_argument('--backend',\n help='Specify backend',\n dest='backend',\n required=False,\n default=None)\n\n parser.add_argument('-v', '--version',\n action='version', version=__version__,\n help='Print the Kinto version and exit.')\n\n subparsers = parser.add_subparsers(title='subcommands',\n description='valid subcommands',\n help='init/start/migrate')\n\n parser_init = subparsers.add_parser('init')\n parser_init.set_defaults(which='init')\n\n parser_migrate = subparsers.add_parser('migrate')\n parser_migrate.set_defaults(which='migrate')\n\n parser_start = subparsers.add_parser('start')\n parser_start.set_defaults(which='start')\n\n args = vars(parser.parse_args())\n config_file = args['ini_file']\n\n if args['which'] == 'init':\n if os.path.exists(config_file):\n print(\"%s already exist.\" % config_file, file=sys.stderr)\n sys.exit(1)\n\n backend = args['backend']\n if not backend:\n while True:\n prompt = (\"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend)\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"cliquet[postgresql]\"])\n\n elif args['which'] == 'migrate':\n env = bootstrap(config_file)\n cliquet.init_schema(env)\n\n elif args['which'] == 'start':\n pserve_argv = ['pserve', config_file, '--reload']\n pserve.main(pserve_argv)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "kinto/__main__.py"}]} | 1,659 | 211 |
gh_patches_debug_28526 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2553 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Search in EUTF akvo site
Partner team had a training and workshop with EUTF last week and discovered that search terms in EUTF akvo site returned unrelated results.
Search for tombouctou shows up a project of SNV in EUTF akvo page, which is confusing for the partner as they expect to see their own projects only on their akvo site.
<img width="1070" alt="screen shot 2017-02-06 at 15 56 41" src="https://cloud.githubusercontent.com/assets/21127166/22652066/45bdf606-ec85-11e6-9c05-25d421b329c1.png">
What the partner expects is to see just projects where they are one of the participating partners.
If the search does not match any of their projects, it should then not return anything.
</issue>
<code>
[start of akvo/rest/views/typeahead.py]
1 # -*- coding: utf-8 -*-
2
3 """Akvo RSR is covered by the GNU Affero General Public License.
4 See more details in the license.txt file located at the root folder of the
5 Akvo RSR module. For additional details on the GNU license please
6 see < http://www.gnu.org/licenses/agpl.html >.
7 """
8
9 from akvo.rest.serializers import (TypeaheadCountrySerializer,
10 TypeaheadOrganisationSerializer,
11 TypeaheadProjectSerializer,
12 TypeaheadProjectUpdateSerializer)
13
14 from akvo.codelists.models import Country, Version
15 from akvo.rsr.models import Organisation, Project, ProjectUpdate
16
17 from django.conf import settings
18
19 from rest_framework.decorators import api_view
20 from rest_framework.response import Response
21
22
23 def rejig(queryset, serializer):
24 """Rearrange & add queryset count to the response data."""
25 return {
26 'count': queryset.count(),
27 'results': serializer.data
28 }
29
30
31 @api_view(['GET'])
32 def typeahead_country(request):
33 iati_version = Version.objects.get(code=settings.IATI_VERSION)
34 countries = Country.objects.filter(version=iati_version)
35 return Response(
36 rejig(countries, TypeaheadCountrySerializer(countries, many=True))
37 )
38
39
40 @api_view(['GET'])
41 def typeahead_organisation(request):
42 organisations = Organisation.objects.all()
43 return Response(
44 rejig(organisations, TypeaheadOrganisationSerializer(organisations,
45 many=True))
46 )
47
48
49 @api_view(['GET'])
50 def typeahead_user_organisations(request):
51 user = request.user
52 is_admin = user.is_active and (user.is_superuser or user.is_admin)
53 organisations = user.approved_organisations() if not is_admin else Organisation.objects.all()
54 return Response(
55 rejig(organisations, TypeaheadOrganisationSerializer(organisations,
56 many=True))
57 )
58
59
60 @api_view(['GET'])
61 def typeahead_project(request):
62 projects = Project.objects.all().exclude(title='')
63 return Response(
64 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
65 )
66
67
68 @api_view(['GET'])
69 def typeahead_user_projects(request):
70 user = request.user
71 is_admin = user.is_active and (user.is_superuser or user.is_admin)
72 if is_admin:
73 projects = Project.objects.all()
74 else:
75 projects = user.approved_organisations().all_projects()
76 projects = projects.exclude(title='')
77 return Response(
78 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
79 )
80
81
82 @api_view(['GET'])
83 def typeahead_impact_projects(request):
84 user = request.user
85 projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()
86 projects = projects.published().filter(is_impact_project=True).order_by('title')
87
88 return Response(
89 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
90 )
91
92
93 @api_view(['GET'])
94 def typeahead_projectupdate(request):
95 updates = ProjectUpdate.objects.all()
96 return Response(
97 rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))
98 )
99
[end of akvo/rest/views/typeahead.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/views/typeahead.py b/akvo/rest/views/typeahead.py
--- a/akvo/rest/views/typeahead.py
+++ b/akvo/rest/views/typeahead.py
@@ -13,6 +13,7 @@
from akvo.codelists.models import Country, Version
from akvo.rsr.models import Organisation, Project, ProjectUpdate
+from akvo.rsr.views.project import _project_directory_coll
from django.conf import settings
@@ -59,7 +60,39 @@
@api_view(['GET'])
def typeahead_project(request):
- projects = Project.objects.all().exclude(title='')
+ """Return the typeaheads for projects.
+
+ Without any query parameters, it returns the info for all the projects in
+ the current context -- changes depending on whether we are on a partner
+ site, or the RSR site.
+
+ If a project query parameter with a project id is passed, the info for all
+ projects associated with partners for the specified project is returned.
+
+ NOTE: The unauthenticated user gets information about all the projects when
+ using this API endpoint. More permission checking will need to be added,
+ if the amount of data being returned is changed.
+
+ """
+ project_id = request.GET.get('project', None)
+ if project_id is None:
+ project = None
+
+ else:
+ try:
+ project = Project.objects.get(id=project_id)
+ except Project.DoesNotExist:
+ project = None
+
+ if project is None:
+ # Search bar - organization projects, published
+ projects = _project_directory_coll(request)
+
+ else:
+ # Project editor - all projects of partners for this project
+ projects = Project.objects.of_partners(project.partners.distinct()).distinct()
+
+ projects = projects.exclude(title='')
return Response(
rejig(projects, TypeaheadProjectSerializer(projects, many=True))
)
| {"golden_diff": "diff --git a/akvo/rest/views/typeahead.py b/akvo/rest/views/typeahead.py\n--- a/akvo/rest/views/typeahead.py\n+++ b/akvo/rest/views/typeahead.py\n@@ -13,6 +13,7 @@\n \n from akvo.codelists.models import Country, Version\n from akvo.rsr.models import Organisation, Project, ProjectUpdate\n+from akvo.rsr.views.project import _project_directory_coll\n \n from django.conf import settings\n \n@@ -59,7 +60,39 @@\n \n @api_view(['GET'])\n def typeahead_project(request):\n- projects = Project.objects.all().exclude(title='')\n+ \"\"\"Return the typeaheads for projects.\n+\n+ Without any query parameters, it returns the info for all the projects in\n+ the current context -- changes depending on whether we are on a partner\n+ site, or the RSR site.\n+\n+ If a project query parameter with a project id is passed, the info for all\n+ projects associated with partners for the specified project is returned.\n+\n+ NOTE: The unauthenticated user gets information about all the projects when\n+ using this API endpoint. More permission checking will need to be added,\n+ if the amount of data being returned is changed.\n+\n+ \"\"\"\n+ project_id = request.GET.get('project', None)\n+ if project_id is None:\n+ project = None\n+\n+ else:\n+ try:\n+ project = Project.objects.get(id=project_id)\n+ except Project.DoesNotExist:\n+ project = None\n+\n+ if project is None:\n+ # Search bar - organization projects, published\n+ projects = _project_directory_coll(request)\n+\n+ else:\n+ # Project editor - all projects of partners for this project\n+ projects = Project.objects.of_partners(project.partners.distinct()).distinct()\n+\n+ projects = projects.exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n", "issue": "Search in EUTF akvo site\nPartner team had a training and workshop with EUTF last week and discovered that search terms in EUTF akvo site returned unrelated results.\r\n\r\nSearch for tombouctou shows up a project of SNV in EUTF akvo page, which is confusing for the partner as they expect to see their own projects only on their akvo site. \r\n\r\n<img width=\"1070\" alt=\"screen shot 2017-02-06 at 15 56 41\" src=\"https://cloud.githubusercontent.com/assets/21127166/22652066/45bdf606-ec85-11e6-9c05-25d421b329c1.png\">\r\n\r\nWhat the partner expects is to see just projects where they are one of the participating partners. \r\nIf the search does not match any of their projects, it should then not return anything. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rest.serializers import (TypeaheadCountrySerializer,\n TypeaheadOrganisationSerializer,\n TypeaheadProjectSerializer,\n TypeaheadProjectUpdateSerializer)\n\nfrom akvo.codelists.models import Country, Version\nfrom akvo.rsr.models import Organisation, Project, ProjectUpdate\n\nfrom django.conf import settings\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n\ndef rejig(queryset, serializer):\n \"\"\"Rearrange & add queryset count to the response data.\"\"\"\n return {\n 'count': queryset.count(),\n 'results': serializer.data\n }\n\n\n@api_view(['GET'])\ndef typeahead_country(request):\n iati_version = Version.objects.get(code=settings.IATI_VERSION)\n countries = Country.objects.filter(version=iati_version)\n return Response(\n rejig(countries, TypeaheadCountrySerializer(countries, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_organisation(request):\n organisations = Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_organisations(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n organisations = user.approved_organisations() if not is_admin else Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_project(request):\n projects = Project.objects.all().exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_projects(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n if is_admin:\n projects = Project.objects.all()\n else:\n projects = user.approved_organisations().all_projects()\n projects = projects.exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_impact_projects(request):\n user = request.user\n projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()\n projects = projects.published().filter(is_impact_project=True).order_by('title')\n\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_projectupdate(request):\n updates = ProjectUpdate.objects.all()\n return Response(\n rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))\n )\n", "path": "akvo/rest/views/typeahead.py"}]} | 1,604 | 435 |
gh_patches_debug_33599 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-1116 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Require pyspark minimal version is v3.2.0 to cut duplicates codes
Since [pyspark v3.2.0](https://github.com/apache/spark/blob/5d45a415f3a29898d92380380cfd82bfc7f579ea/python/pyspark/pandas/extensions.py#L28-L64), it has contained `CachedAccessor`, `_register_accessor`, `_register_accessor`
janitor requires pyspark minimal version is v3.1.2 at present.
Compared to v3.1.2, v3.2.0 is a minor version.
https://github.com/pyjanitor-devs/pyjanitor/blob/4867b20273e99cb2f4c32278123d5c23c2ccaa67/janitor/spark/backend.py#L9-L37
Note: The pyspark in the [setup.py](https://github.com/pyjanitor-devs/pyjanitor/blob/4867b20273e99cb2f4c32278123d5c23c2ccaa67/.requirements/spark.txt#L9) file requires v3.2.1 but ci ([environment-dev.yml](https://github.com/pyjanitor-devs/pyjanitor/blob/4867b20273e99cb2f4c32278123d5c23c2ccaa67/environment-dev.yml#L41)) requires v3.1.2.
</issue>
<code>
[start of janitor/spark/backend.py]
1 """ Backend functions for pyspark."""
2
3 import warnings
4 from functools import wraps
5
6 from janitor.utils import import_message
7
8
9 class CachedAccessor:
10 """
11 Custom property-like object (descriptor) for caching accessors.
12
13 Parameters
14 ----------
15 name : str
16 The namespace this will be accessed under, e.g. `df.foo`
17 accessor : cls
18 The class with the extension methods.
19
20 NOTE
21 ----
22 Modified based on pandas.core.accessor.
23 """
24
25 def __init__(self, name, accessor):
26 self._name = name
27 self._accessor = accessor
28
29 def __get__(self, obj, cls):
30 if obj is None:
31 # we're accessing the attribute of the class, i.e., Dataset.geo
32 return self._accessor
33 accessor_obj = self._accessor(obj)
34 # Replace the property with the accessor object. Inspired by:
35 # http://www.pydanny.com/cached-property.html
36 setattr(obj, self._name, accessor_obj)
37 return accessor_obj
38
39
40 def _register_accessor(name, cls):
41 """
42 NOTE
43 ----
44 Modified based on pandas.core.accessor.
45 """
46
47 def decorator(accessor):
48 if hasattr(cls, name):
49 warnings.warn(
50 "registration of accessor {!r} under name {!r} for type "
51 "{!r} is overriding a preexisting attribute with the same "
52 "name.".format(accessor, name, cls),
53 UserWarning,
54 stacklevel=2,
55 )
56 setattr(cls, name, CachedAccessor(name, accessor))
57 return accessor
58
59 return decorator
60
61
62 def register_dataframe_accessor(name):
63 """
64 NOTE
65 ----
66 Modified based on pandas.core.accessor.
67
68 .. # noqa: DAR101 name
69 .. # noqa: DAR201
70 """
71 try:
72 from pyspark.sql import DataFrame
73 except ImportError:
74 import_message(
75 submodule="spark",
76 package="pyspark",
77 conda_channel="conda-forge",
78 pip_install=True,
79 )
80
81 return _register_accessor(name, DataFrame)
82
83
84 def register_dataframe_method(method):
85 """Register a function as a method attached to the Pyspark DataFrame.
86
87 NOTE
88 ----
89 Modified based on pandas_flavor.register.
90
91 .. # noqa: DAR101 method
92 .. # noqa: DAR201
93 """
94
95 def inner(*args, **kwargs):
96 class AccessorMethod:
97 def __init__(self, pyspark_obj):
98 self._obj = pyspark_obj
99
100 @wraps(method)
101 def __call__(self, *args, **kwargs):
102 return method(self._obj, *args, **kwargs)
103
104 register_dataframe_accessor(method.__name__)(AccessorMethod)
105
106 return method
107
108 return inner()
109
[end of janitor/spark/backend.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/janitor/spark/backend.py b/janitor/spark/backend.py
--- a/janitor/spark/backend.py
+++ b/janitor/spark/backend.py
@@ -1,84 +1,20 @@
""" Backend functions for pyspark."""
-import warnings
from functools import wraps
-from janitor.utils import import_message
+try:
+ from pyspark.pandas.extensions import register_dataframe_accessor
-class CachedAccessor:
- """
- Custom property-like object (descriptor) for caching accessors.
-
- Parameters
- ----------
- name : str
- The namespace this will be accessed under, e.g. `df.foo`
- accessor : cls
- The class with the extension methods.
-
- NOTE
- ----
- Modified based on pandas.core.accessor.
- """
-
- def __init__(self, name, accessor):
- self._name = name
- self._accessor = accessor
-
- def __get__(self, obj, cls):
- if obj is None:
- # we're accessing the attribute of the class, i.e., Dataset.geo
- return self._accessor
- accessor_obj = self._accessor(obj)
- # Replace the property with the accessor object. Inspired by:
- # http://www.pydanny.com/cached-property.html
- setattr(obj, self._name, accessor_obj)
- return accessor_obj
-
-
-def _register_accessor(name, cls):
- """
- NOTE
- ----
- Modified based on pandas.core.accessor.
- """
-
- def decorator(accessor):
- if hasattr(cls, name):
- warnings.warn(
- "registration of accessor {!r} under name {!r} for type "
- "{!r} is overriding a preexisting attribute with the same "
- "name.".format(accessor, name, cls),
- UserWarning,
- stacklevel=2,
- )
- setattr(cls, name, CachedAccessor(name, accessor))
- return accessor
-
- return decorator
-
-
-def register_dataframe_accessor(name):
- """
- NOTE
- ----
- Modified based on pandas.core.accessor.
-
- .. # noqa: DAR101 name
- .. # noqa: DAR201
- """
- try:
- from pyspark.sql import DataFrame
- except ImportError:
- import_message(
- submodule="spark",
- package="pyspark",
- conda_channel="conda-forge",
- pip_install=True,
- )
+except ImportError:
+ from janitor.utils import import_message
- return _register_accessor(name, DataFrame)
+ import_message(
+ submodule="spark",
+ package="pyspark",
+ conda_channel="conda-forge",
+ pip_install=True,
+ )
def register_dataframe_method(method):
| {"golden_diff": "diff --git a/janitor/spark/backend.py b/janitor/spark/backend.py\n--- a/janitor/spark/backend.py\n+++ b/janitor/spark/backend.py\n@@ -1,84 +1,20 @@\n \"\"\" Backend functions for pyspark.\"\"\"\n \n-import warnings\n from functools import wraps\n \n-from janitor.utils import import_message\n \n+try:\n+ from pyspark.pandas.extensions import register_dataframe_accessor\n \n-class CachedAccessor:\n- \"\"\"\n- Custom property-like object (descriptor) for caching accessors.\n-\n- Parameters\n- ----------\n- name : str\n- The namespace this will be accessed under, e.g. `df.foo`\n- accessor : cls\n- The class with the extension methods.\n-\n- NOTE\n- ----\n- Modified based on pandas.core.accessor.\n- \"\"\"\n-\n- def __init__(self, name, accessor):\n- self._name = name\n- self._accessor = accessor\n-\n- def __get__(self, obj, cls):\n- if obj is None:\n- # we're accessing the attribute of the class, i.e., Dataset.geo\n- return self._accessor\n- accessor_obj = self._accessor(obj)\n- # Replace the property with the accessor object. Inspired by:\n- # http://www.pydanny.com/cached-property.html\n- setattr(obj, self._name, accessor_obj)\n- return accessor_obj\n-\n-\n-def _register_accessor(name, cls):\n- \"\"\"\n- NOTE\n- ----\n- Modified based on pandas.core.accessor.\n- \"\"\"\n-\n- def decorator(accessor):\n- if hasattr(cls, name):\n- warnings.warn(\n- \"registration of accessor {!r} under name {!r} for type \"\n- \"{!r} is overriding a preexisting attribute with the same \"\n- \"name.\".format(accessor, name, cls),\n- UserWarning,\n- stacklevel=2,\n- )\n- setattr(cls, name, CachedAccessor(name, accessor))\n- return accessor\n-\n- return decorator\n-\n-\n-def register_dataframe_accessor(name):\n- \"\"\"\n- NOTE\n- ----\n- Modified based on pandas.core.accessor.\n-\n- .. # noqa: DAR101 name\n- .. # noqa: DAR201\n- \"\"\"\n- try:\n- from pyspark.sql import DataFrame\n- except ImportError:\n- import_message(\n- submodule=\"spark\",\n- package=\"pyspark\",\n- conda_channel=\"conda-forge\",\n- pip_install=True,\n- )\n+except ImportError:\n+ from janitor.utils import import_message\n \n- return _register_accessor(name, DataFrame)\n+ import_message(\n+ submodule=\"spark\",\n+ package=\"pyspark\",\n+ conda_channel=\"conda-forge\",\n+ pip_install=True,\n+ )\n \n \n def register_dataframe_method(method):\n", "issue": "Require pyspark minimal version is v3.2.0 to cut duplicates codes\nSince [pyspark v3.2.0](https://github.com/apache/spark/blob/5d45a415f3a29898d92380380cfd82bfc7f579ea/python/pyspark/pandas/extensions.py#L28-L64), it has contained `CachedAccessor`, `_register_accessor`, `_register_accessor`\r\n\r\njanitor requires pyspark minimal version is v3.1.2 at present.\r\nCompared to v3.1.2, v3.2.0 is a minor version.\r\n\r\nhttps://github.com/pyjanitor-devs/pyjanitor/blob/4867b20273e99cb2f4c32278123d5c23c2ccaa67/janitor/spark/backend.py#L9-L37\r\n\r\nNote: The pyspark in the [setup.py](https://github.com/pyjanitor-devs/pyjanitor/blob/4867b20273e99cb2f4c32278123d5c23c2ccaa67/.requirements/spark.txt#L9) file requires v3.2.1 but ci ([environment-dev.yml](https://github.com/pyjanitor-devs/pyjanitor/blob/4867b20273e99cb2f4c32278123d5c23c2ccaa67/environment-dev.yml#L41)) requires v3.1.2.\n", "before_files": [{"content": "\"\"\" Backend functions for pyspark.\"\"\"\n\nimport warnings\nfrom functools import wraps\n\nfrom janitor.utils import import_message\n\n\nclass CachedAccessor:\n \"\"\"\n Custom property-like object (descriptor) for caching accessors.\n\n Parameters\n ----------\n name : str\n The namespace this will be accessed under, e.g. `df.foo`\n accessor : cls\n The class with the extension methods.\n\n NOTE\n ----\n Modified based on pandas.core.accessor.\n \"\"\"\n\n def __init__(self, name, accessor):\n self._name = name\n self._accessor = accessor\n\n def __get__(self, obj, cls):\n if obj is None:\n # we're accessing the attribute of the class, i.e., Dataset.geo\n return self._accessor\n accessor_obj = self._accessor(obj)\n # Replace the property with the accessor object. Inspired by:\n # http://www.pydanny.com/cached-property.html\n setattr(obj, self._name, accessor_obj)\n return accessor_obj\n\n\ndef _register_accessor(name, cls):\n \"\"\"\n NOTE\n ----\n Modified based on pandas.core.accessor.\n \"\"\"\n\n def decorator(accessor):\n if hasattr(cls, name):\n warnings.warn(\n \"registration of accessor {!r} under name {!r} for type \"\n \"{!r} is overriding a preexisting attribute with the same \"\n \"name.\".format(accessor, name, cls),\n UserWarning,\n stacklevel=2,\n )\n setattr(cls, name, CachedAccessor(name, accessor))\n return accessor\n\n return decorator\n\n\ndef register_dataframe_accessor(name):\n \"\"\"\n NOTE\n ----\n Modified based on pandas.core.accessor.\n\n .. # noqa: DAR101 name\n .. # noqa: DAR201\n \"\"\"\n try:\n from pyspark.sql import DataFrame\n except ImportError:\n import_message(\n submodule=\"spark\",\n package=\"pyspark\",\n conda_channel=\"conda-forge\",\n pip_install=True,\n )\n\n return _register_accessor(name, DataFrame)\n\n\ndef register_dataframe_method(method):\n \"\"\"Register a function as a method attached to the Pyspark DataFrame.\n\n NOTE\n ----\n Modified based on pandas_flavor.register.\n\n .. # noqa: DAR101 method\n .. # noqa: DAR201\n \"\"\"\n\n def inner(*args, **kwargs):\n class AccessorMethod:\n def __init__(self, pyspark_obj):\n self._obj = pyspark_obj\n\n @wraps(method)\n def __call__(self, *args, **kwargs):\n return method(self._obj, *args, **kwargs)\n\n register_dataframe_accessor(method.__name__)(AccessorMethod)\n\n return method\n\n return inner()\n", "path": "janitor/spark/backend.py"}]} | 1,731 | 637 |
gh_patches_debug_21302 | rasdani/github-patches | git_diff | TheAlgorithms__Python-9108 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove duplicate GCD implementation
### Feature description
[`greatest_common_divisor.py`](https://github.com/TheAlgorithms/Python/blob/master/maths/greatest_common_divisor.py) and [`euclidean_gcd.py`](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_gcd.py) both have basically the same two implementations of the Euclidean algorithm for calculating the GCD of 2 numbers. Thus, one of them should probably be removed as a duplicate.
</issue>
<code>
[start of maths/euclidean_gcd.py]
1 """ https://en.wikipedia.org/wiki/Euclidean_algorithm """
2
3
4 def euclidean_gcd(a: int, b: int) -> int:
5 """
6 Examples:
7 >>> euclidean_gcd(3, 5)
8 1
9
10 >>> euclidean_gcd(6, 3)
11 3
12 """
13 while b:
14 a, b = b, a % b
15 return a
16
17
18 def euclidean_gcd_recursive(a: int, b: int) -> int:
19 """
20 Recursive method for euclicedan gcd algorithm
21
22 Examples:
23 >>> euclidean_gcd_recursive(3, 5)
24 1
25
26 >>> euclidean_gcd_recursive(6, 3)
27 3
28 """
29 return a if b == 0 else euclidean_gcd_recursive(b, a % b)
30
31
32 def main():
33 print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3, 5)}")
34 print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5, 3)}")
35 print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1, 3)}")
36 print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3, 6)}")
37 print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6, 3)}")
38
39 print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5)}")
40 print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3)}")
41 print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3)}")
42 print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6)}")
43 print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3)}")
44
45
46 if __name__ == "__main__":
47 main()
48
[end of maths/euclidean_gcd.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/maths/euclidean_gcd.py b/maths/euclidean_gcd.py
deleted file mode 100644
--- a/maths/euclidean_gcd.py
+++ /dev/null
@@ -1,47 +0,0 @@
-""" https://en.wikipedia.org/wiki/Euclidean_algorithm """
-
-
-def euclidean_gcd(a: int, b: int) -> int:
- """
- Examples:
- >>> euclidean_gcd(3, 5)
- 1
-
- >>> euclidean_gcd(6, 3)
- 3
- """
- while b:
- a, b = b, a % b
- return a
-
-
-def euclidean_gcd_recursive(a: int, b: int) -> int:
- """
- Recursive method for euclicedan gcd algorithm
-
- Examples:
- >>> euclidean_gcd_recursive(3, 5)
- 1
-
- >>> euclidean_gcd_recursive(6, 3)
- 3
- """
- return a if b == 0 else euclidean_gcd_recursive(b, a % b)
-
-
-def main():
- print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3, 5)}")
- print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5, 3)}")
- print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1, 3)}")
- print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3, 6)}")
- print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6, 3)}")
-
- print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5)}")
- print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3)}")
- print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3)}")
- print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6)}")
- print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3)}")
-
-
-if __name__ == "__main__":
- main()
| {"golden_diff": "diff --git a/maths/euclidean_gcd.py b/maths/euclidean_gcd.py\ndeleted file mode 100644\n--- a/maths/euclidean_gcd.py\n+++ /dev/null\n@@ -1,47 +0,0 @@\n-\"\"\" https://en.wikipedia.org/wiki/Euclidean_algorithm \"\"\"\n-\n-\n-def euclidean_gcd(a: int, b: int) -> int:\n- \"\"\"\n- Examples:\n- >>> euclidean_gcd(3, 5)\n- 1\n-\n- >>> euclidean_gcd(6, 3)\n- 3\n- \"\"\"\n- while b:\n- a, b = b, a % b\n- return a\n-\n-\n-def euclidean_gcd_recursive(a: int, b: int) -> int:\n- \"\"\"\n- Recursive method for euclicedan gcd algorithm\n-\n- Examples:\n- >>> euclidean_gcd_recursive(3, 5)\n- 1\n-\n- >>> euclidean_gcd_recursive(6, 3)\n- 3\n- \"\"\"\n- return a if b == 0 else euclidean_gcd_recursive(b, a % b)\n-\n-\n-def main():\n- print(f\"euclidean_gcd(3, 5) = {euclidean_gcd(3, 5)}\")\n- print(f\"euclidean_gcd(5, 3) = {euclidean_gcd(5, 3)}\")\n- print(f\"euclidean_gcd(1, 3) = {euclidean_gcd(1, 3)}\")\n- print(f\"euclidean_gcd(3, 6) = {euclidean_gcd(3, 6)}\")\n- print(f\"euclidean_gcd(6, 3) = {euclidean_gcd(6, 3)}\")\n-\n- print(f\"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5)}\")\n- print(f\"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3)}\")\n- print(f\"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3)}\")\n- print(f\"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6)}\")\n- print(f\"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3)}\")\n-\n-\n-if __name__ == \"__main__\":\n- main()\n", "issue": "Remove duplicate GCD implementation\n### Feature description\n\n[`greatest_common_divisor.py`](https://github.com/TheAlgorithms/Python/blob/master/maths/greatest_common_divisor.py) and [`euclidean_gcd.py`](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_gcd.py) both have basically the same two implementations of the Euclidean algorithm for calculating the GCD of 2 numbers. Thus, one of them should probably be removed as a duplicate.\n", "before_files": [{"content": "\"\"\" https://en.wikipedia.org/wiki/Euclidean_algorithm \"\"\"\n\n\ndef euclidean_gcd(a: int, b: int) -> int:\n \"\"\"\n Examples:\n >>> euclidean_gcd(3, 5)\n 1\n\n >>> euclidean_gcd(6, 3)\n 3\n \"\"\"\n while b:\n a, b = b, a % b\n return a\n\n\ndef euclidean_gcd_recursive(a: int, b: int) -> int:\n \"\"\"\n Recursive method for euclicedan gcd algorithm\n\n Examples:\n >>> euclidean_gcd_recursive(3, 5)\n 1\n\n >>> euclidean_gcd_recursive(6, 3)\n 3\n \"\"\"\n return a if b == 0 else euclidean_gcd_recursive(b, a % b)\n\n\ndef main():\n print(f\"euclidean_gcd(3, 5) = {euclidean_gcd(3, 5)}\")\n print(f\"euclidean_gcd(5, 3) = {euclidean_gcd(5, 3)}\")\n print(f\"euclidean_gcd(1, 3) = {euclidean_gcd(1, 3)}\")\n print(f\"euclidean_gcd(3, 6) = {euclidean_gcd(3, 6)}\")\n print(f\"euclidean_gcd(6, 3) = {euclidean_gcd(6, 3)}\")\n\n print(f\"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5)}\")\n print(f\"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3)}\")\n print(f\"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3)}\")\n print(f\"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6)}\")\n print(f\"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3)}\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "maths/euclidean_gcd.py"}]} | 1,209 | 573 |
gh_patches_debug_1750 | rasdani/github-patches | git_diff | locustio__locust-1839 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OOM error with master/slaves setup (zeromq, windows)
Hi !
### Describe the bug
An out of memory error occurs with ZeroMQ trying to allocate a crazy amount of memory in decoded_allocator, sometime up to several petabytes. This might very well be a ZeroMQ bug :
` OUT OF MEMORY (bundled\zeromq\src\decoder_allocators.cpp:89)`
I added some logs and recompiled pyzmq to check what's going on. Upon further investigation, _max_counters seems to take a crazy value at some point. See [zmq_logs.txt](https://github.com/locustio/locust/files/4618065/zmq_logs.txt)
As you can see, allocator instance 0x0000016A9270F700 is constructed with _max_counters=249, but before crash its value has changed to 1557249601288, which causes a malloc of several terabytes.
### Steps to reproduce
Sorry, I couldn't find a surefire way to reproduce this one. It seems kind of random. It sometime happens before the test is even started, sometime when the test is stopped. Sometime it doesn't happen at all. It does seem to happen more often when stopping a test in the web UI. Simply run the ps1 attached and do some stuff in the web UI.
### Environment
- OS: Windows 10.0.18362.778
- Python version: 3.6
- Locust version: 0.14.6
- Locust files : [test_locust.zip](https://github.com/locustio/locust/files/4618016/test_locust.zip)
I managed to repro the bug on two computers : my work computer and my personal computer. Both are on Windows 10/Python 3.6 that comes with VS2017, but my personal computer has a pristine python environent, just ran pip install locustio.
Am I doing something I'm not supposed to ?
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 import ast
3 import os
4 import re
5 import sys
6
7 from setuptools import find_packages, setup
8
9 ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
10
11 # parse version from locust/__init__.py
12 _version_re = re.compile(r"__version__\s+=\s+(.*)")
13 _init_file = os.path.join(ROOT_PATH, "locust", "__init__.py")
14 with open(_init_file, "rb") as f:
15 version = str(ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1)))
16
17 setup(
18 name="locust",
19 version=version,
20 install_requires=[
21 "gevent>=20.9.0",
22 "flask>=2.0.0",
23 "Werkzeug>=2.0.0",
24 "requests>=2.9.1",
25 "msgpack>=0.6.2",
26 "pyzmq>=16.0.2",
27 "geventhttpclient>=1.4.4",
28 "ConfigArgParse>=1.0",
29 "psutil>=5.6.7",
30 "Flask-BasicAuth>=0.2.0",
31 "Flask-Cors>=3.0.10",
32 "roundrobin>=0.0.2",
33 ],
34 test_suite="locust.test",
35 tests_require=[
36 "cryptography",
37 "mock",
38 "pyquery",
39 ],
40 extras_require={
41 ":sys_platform == 'win32'": ["pywin32"],
42 },
43 )
44
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,7 +23,7 @@
"Werkzeug>=2.0.0",
"requests>=2.9.1",
"msgpack>=0.6.2",
- "pyzmq>=16.0.2",
+ "pyzmq>=22.2.1",
"geventhttpclient>=1.4.4",
"ConfigArgParse>=1.0",
"psutil>=5.6.7",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,7 +23,7 @@\n \"Werkzeug>=2.0.0\",\n \"requests>=2.9.1\",\n \"msgpack>=0.6.2\",\n- \"pyzmq>=16.0.2\",\n+ \"pyzmq>=22.2.1\",\n \"geventhttpclient>=1.4.4\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n", "issue": "OOM error with master/slaves setup (zeromq, windows)\nHi !\r\n \r\n### Describe the bug\r\nAn out of memory error occurs with ZeroMQ trying to allocate a crazy amount of memory in decoded_allocator, sometime up to several petabytes. This might very well be a ZeroMQ bug : \r\n` OUT OF MEMORY (bundled\\zeromq\\src\\decoder_allocators.cpp:89)`\r\n \r\nI added some logs and recompiled pyzmq to check what's going on. Upon further investigation, _max_counters seems to take a crazy value at some point. See [zmq_logs.txt](https://github.com/locustio/locust/files/4618065/zmq_logs.txt)\r\nAs you can see, allocator instance 0x0000016A9270F700 is constructed with _max_counters=249, but before crash its value has changed to 1557249601288, which causes a malloc of several terabytes.\r\n \r\n \r\n### Steps to reproduce\r\nSorry, I couldn't find a surefire way to reproduce this one. It seems kind of random. It sometime happens before the test is even started, sometime when the test is stopped. Sometime it doesn't happen at all. It does seem to happen more often when stopping a test in the web UI. Simply run the ps1 attached and do some stuff in the web UI.\r\n \r\n### Environment\r\n \r\n- OS: Windows 10.0.18362.778\r\n- Python version: 3.6\r\n- Locust version: 0.14.6\r\n- Locust files : [test_locust.zip](https://github.com/locustio/locust/files/4618016/test_locust.zip)\r\n \r\nI managed to repro the bug on two computers : my work computer and my personal computer. Both are on Windows 10/Python 3.6 that comes with VS2017, but my personal computer has a pristine python environent, just ran pip install locustio.\r\n\r\nAm I doing something I'm not supposed to ?\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r\"__version__\\s+=\\s+(.*)\")\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, \"rb\") as f:\n version = str(ast.literal_eval(_version_re.search(f.read().decode(\"utf-8\")).group(1)))\n\nsetup(\n name=\"locust\",\n version=version,\n install_requires=[\n \"gevent>=20.9.0\",\n \"flask>=2.0.0\",\n \"Werkzeug>=2.0.0\",\n \"requests>=2.9.1\",\n \"msgpack>=0.6.2\",\n \"pyzmq>=16.0.2\",\n \"geventhttpclient>=1.4.4\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\",\n \"Flask-Cors>=3.0.10\",\n \"roundrobin>=0.0.2\",\n ],\n test_suite=\"locust.test\",\n tests_require=[\n \"cryptography\",\n \"mock\",\n \"pyquery\",\n ],\n extras_require={\n \":sys_platform == 'win32'\": [\"pywin32\"],\n },\n)\n", "path": "setup.py"}]} | 1,413 | 128 |
gh_patches_debug_13788 | rasdani/github-patches | git_diff | pyca__cryptography-1615 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Backend loading code produces a warning with the latest setuptools
The use `load(require=False)` (specifically the `require` kwarg) is deprecated. /cc @dstufft
</issue>
<code>
[start of src/cryptography/hazmat/backends/__init__.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import pkg_resources
8
9 from cryptography.hazmat.backends.multibackend import MultiBackend
10
11
12 _available_backends_list = None
13
14
15 def _available_backends():
16 global _available_backends_list
17
18 if _available_backends_list is None:
19 _available_backends_list = [
20 backend.load(require=False)
21 for backend in pkg_resources.iter_entry_points(
22 "cryptography.backends"
23 )
24 ]
25
26 return _available_backends_list
27
28 _default_backend = None
29
30
31 def default_backend():
32 global _default_backend
33
34 if _default_backend is None:
35 _default_backend = MultiBackend(_available_backends())
36
37 return _default_backend
38
[end of src/cryptography/hazmat/backends/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/backends/__init__.py b/src/cryptography/hazmat/backends/__init__.py
--- a/src/cryptography/hazmat/backends/__init__.py
+++ b/src/cryptography/hazmat/backends/__init__.py
@@ -17,8 +17,13 @@
if _available_backends_list is None:
_available_backends_list = [
- backend.load(require=False)
- for backend in pkg_resources.iter_entry_points(
+ # setuptools 11.3 deprecated support for the require parameter to
+ # load(), and introduced the new resolve() method instead.
+ # This can be removed if/when we can assume setuptools>=11.3. At
+ # some point we may wish to add a warning, to push people along,
+ # but at present this would result in too many warnings.
+ ep.resolve() if hasattr(ep, "resolve") else ep.load(require=False)
+ for ep in pkg_resources.iter_entry_points(
"cryptography.backends"
)
]
| {"golden_diff": "diff --git a/src/cryptography/hazmat/backends/__init__.py b/src/cryptography/hazmat/backends/__init__.py\n--- a/src/cryptography/hazmat/backends/__init__.py\n+++ b/src/cryptography/hazmat/backends/__init__.py\n@@ -17,8 +17,13 @@\n \n if _available_backends_list is None:\n _available_backends_list = [\n- backend.load(require=False)\n- for backend in pkg_resources.iter_entry_points(\n+ # setuptools 11.3 deprecated support for the require parameter to\n+ # load(), and introduced the new resolve() method instead.\n+ # This can be removed if/when we can assume setuptools>=11.3. At\n+ # some point we may wish to add a warning, to push people along,\n+ # but at present this would result in too many warnings.\n+ ep.resolve() if hasattr(ep, \"resolve\") else ep.load(require=False)\n+ for ep in pkg_resources.iter_entry_points(\n \"cryptography.backends\"\n )\n ]\n", "issue": "Backend loading code produces a warning with the latest setuptools\nThe use `load(require=False)` (specifically the `require` kwarg) is deprecated. /cc @dstufft \n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport pkg_resources\n\nfrom cryptography.hazmat.backends.multibackend import MultiBackend\n\n\n_available_backends_list = None\n\n\ndef _available_backends():\n global _available_backends_list\n\n if _available_backends_list is None:\n _available_backends_list = [\n backend.load(require=False)\n for backend in pkg_resources.iter_entry_points(\n \"cryptography.backends\"\n )\n ]\n\n return _available_backends_list\n\n_default_backend = None\n\n\ndef default_backend():\n global _default_backend\n\n if _default_backend is None:\n _default_backend = MultiBackend(_available_backends())\n\n return _default_backend\n", "path": "src/cryptography/hazmat/backends/__init__.py"}]} | 859 | 235 |
gh_patches_debug_580 | rasdani/github-patches | git_diff | pex-tool__pex-1191 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.26
On the docket:
+ [x] Pex requirement parsing is tripped up by files in the CWD with the same name as requirements' project names. #1188
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.25"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.25"
+__version__ = "2.1.26"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.25\"\n+__version__ = \"2.1.26\"\n", "issue": "Release 2.1.26\nOn the docket:\r\n+ [x] Pex requirement parsing is tripped up by files in the CWD with the same name as requirements' project names. #1188\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.25\"\n", "path": "pex/version.py"}]} | 631 | 97 |
gh_patches_debug_2211 | rasdani/github-patches | git_diff | rasterio__rasterio-883 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Single int indexes param in sample method
According to docs the `indexes` param in the `sample` method can be a "list of ints or a single int".
However passing a single int raises this exception: `IndexError: too many indices for array`.
</issue>
<code>
[start of rasterio/sample.py]
1 # Workaround for issue #378. A pure Python generator.
2
3 def sample_gen(dataset, xy, indexes=None):
4 index = dataset.index
5 read = dataset.read
6 for x, y in xy:
7 r, c = index(x, y)
8 window = ((r, r+1), (c, c+1))
9 data = read(indexes, window=window, masked=False, boundless=True)
10 yield data[:,0,0]
11
[end of rasterio/sample.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rasterio/sample.py b/rasterio/sample.py
--- a/rasterio/sample.py
+++ b/rasterio/sample.py
@@ -3,6 +3,10 @@
def sample_gen(dataset, xy, indexes=None):
index = dataset.index
read = dataset.read
+
+ if isinstance(indexes, int):
+ indexes = [indexes]
+
for x, y in xy:
r, c = index(x, y)
window = ((r, r+1), (c, c+1))
| {"golden_diff": "diff --git a/rasterio/sample.py b/rasterio/sample.py\n--- a/rasterio/sample.py\n+++ b/rasterio/sample.py\n@@ -3,6 +3,10 @@\n def sample_gen(dataset, xy, indexes=None):\n index = dataset.index\n read = dataset.read\n+\n+ if isinstance(indexes, int):\n+ indexes = [indexes]\n+\n for x, y in xy:\n r, c = index(x, y)\n window = ((r, r+1), (c, c+1))\n", "issue": "Single int indexes param in sample method\nAccording to docs the `indexes` param in the `sample` method can be a \"list of ints or a single int\".\n\nHowever passing a single int raises this exception: `IndexError: too many indices for array`.\n\n", "before_files": [{"content": "# Workaround for issue #378. A pure Python generator.\n\ndef sample_gen(dataset, xy, indexes=None):\n index = dataset.index\n read = dataset.read\n for x, y in xy:\n r, c = index(x, y)\n window = ((r, r+1), (c, c+1))\n data = read(indexes, window=window, masked=False, boundless=True)\n yield data[:,0,0]\n", "path": "rasterio/sample.py"}]} | 700 | 119 |
gh_patches_debug_13896 | rasdani/github-patches | git_diff | ARM-DOE__ACT-553 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Accessor not available in dataset
I fetched the latest updates after the lazy_loading PR and ran pytest and am seeing a lot of errors with accessors not loading. Clean, QCFilter, and QCTests are no longer available in the datasets for some reason.
FAILED test_io.py::test_io_mfdataset - AttributeError: 'Dataset' object has no attribute 'clean'
FAILED test_io.py::test_io_write - AttributeError: 'Dataset' object has no attribute 'clean'
FAILED test_io.py::test_clean_cf_qc - AttributeError: 'Dataset' object has no attribute 'clean'
</issue>
<code>
[start of act/qc/__init__.py]
1 """
2 This module contains procedures for working with QC information
3 and for applying tests to data.
4
5 """
6
7 import lazy_loader as lazy
8
9 # We need to import clean first to register the accessor
10 from .clean import *
11
12 __getattr__, __dir__, __all__ = lazy.attach(
13 __name__,
14 submodules=[
15 'add_supplemental_qc',
16 'arm',
17 'bsrn_tests',
18 'comparison_tests',
19 'qcfilter',
20 'qctests',
21 'radiometer_tests',
22 'sp2',
23 ],
24 submod_attrs={
25 'arm': ['add_dqr_to_qc'],
26 'qcfilter': ['QCFilter'],
27 'qctests': ['QCTests'],
28 'radiometer_tests': ['fft_shading_test'],
29 'bsrn_tests': ['QCTests'],
30 'comparison_tests': ['QCTests'],
31 'add_supplemental_qc': ['read_yaml_supplemental_qc'],
32 'sp2': ['SP2ParticleCriteria', 'get_waveform_statistics'],
33 },
34 )
35
[end of act/qc/__init__.py]
[start of act/__init__.py]
1 """
2 ACT: The Atmospheric Community Toolkit
3 ======================================
4
5 """
6
7 import lazy_loader as lazy
8 # No more pandas warnings
9 from pandas.plotting import register_matplotlib_converters
10
11 from . import tests
12 from ._version import get_versions
13
14 register_matplotlib_converters()
15
16 # Import the lazy loaded modules
17 submodules = [
18 'corrections',
19 'discovery',
20 'io',
21 'qc',
22 'utils',
23 'retrievals',
24 'plotting',
25 ]
26 __getattr__, __dir__, _ = lazy.attach(__name__, submodules)
27
28 # Version for source builds
29 vdict = get_versions()
30 __version__ = vdict['version']
31
[end of act/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/act/__init__.py b/act/__init__.py
--- a/act/__init__.py
+++ b/act/__init__.py
@@ -5,11 +5,13 @@
"""
import lazy_loader as lazy
+
# No more pandas warnings
from pandas.plotting import register_matplotlib_converters
from . import tests
from ._version import get_versions
+from .qc import QCFilter, QCTests, clean
register_matplotlib_converters()
diff --git a/act/qc/__init__.py b/act/qc/__init__.py
--- a/act/qc/__init__.py
+++ b/act/qc/__init__.py
@@ -8,6 +8,8 @@
# We need to import clean first to register the accessor
from .clean import *
+from .qcfilter import QCFilter
+from .qctests import QCTests
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
| {"golden_diff": "diff --git a/act/__init__.py b/act/__init__.py\n--- a/act/__init__.py\n+++ b/act/__init__.py\n@@ -5,11 +5,13 @@\n \"\"\"\n \n import lazy_loader as lazy\n+\n # No more pandas warnings\n from pandas.plotting import register_matplotlib_converters\n \n from . import tests\n from ._version import get_versions\n+from .qc import QCFilter, QCTests, clean\n \n register_matplotlib_converters()\n \ndiff --git a/act/qc/__init__.py b/act/qc/__init__.py\n--- a/act/qc/__init__.py\n+++ b/act/qc/__init__.py\n@@ -8,6 +8,8 @@\n \n # We need to import clean first to register the accessor\n from .clean import *\n+from .qcfilter import QCFilter\n+from .qctests import QCTests\n \n __getattr__, __dir__, __all__ = lazy.attach(\n __name__,\n", "issue": "Accessor not available in dataset\nI fetched the latest updates after the lazy_loading PR and ran pytest and am seeing a lot of errors with accessors not loading. Clean, QCFilter, and QCTests are no longer available in the datasets for some reason.\r\n\r\nFAILED test_io.py::test_io_mfdataset - AttributeError: 'Dataset' object has no attribute 'clean'\r\nFAILED test_io.py::test_io_write - AttributeError: 'Dataset' object has no attribute 'clean'\r\nFAILED test_io.py::test_clean_cf_qc - AttributeError: 'Dataset' object has no attribute 'clean'\n", "before_files": [{"content": "\"\"\"\nThis module contains procedures for working with QC information\nand for applying tests to data.\n\n\"\"\"\n\nimport lazy_loader as lazy\n\n# We need to import clean first to register the accessor\nfrom .clean import *\n\n__getattr__, __dir__, __all__ = lazy.attach(\n __name__,\n submodules=[\n 'add_supplemental_qc',\n 'arm',\n 'bsrn_tests',\n 'comparison_tests',\n 'qcfilter',\n 'qctests',\n 'radiometer_tests',\n 'sp2',\n ],\n submod_attrs={\n 'arm': ['add_dqr_to_qc'],\n 'qcfilter': ['QCFilter'],\n 'qctests': ['QCTests'],\n 'radiometer_tests': ['fft_shading_test'],\n 'bsrn_tests': ['QCTests'],\n 'comparison_tests': ['QCTests'],\n 'add_supplemental_qc': ['read_yaml_supplemental_qc'],\n 'sp2': ['SP2ParticleCriteria', 'get_waveform_statistics'],\n },\n)\n", "path": "act/qc/__init__.py"}, {"content": "\"\"\"\nACT: The Atmospheric Community Toolkit\n======================================\n\n\"\"\"\n\nimport lazy_loader as lazy\n# No more pandas warnings\nfrom pandas.plotting import register_matplotlib_converters\n\nfrom . import tests\nfrom ._version import get_versions\n\nregister_matplotlib_converters()\n\n# Import the lazy loaded modules\nsubmodules = [\n 'corrections',\n 'discovery',\n 'io',\n 'qc',\n 'utils',\n 'retrievals',\n 'plotting',\n]\n__getattr__, __dir__, _ = lazy.attach(__name__, submodules)\n\n# Version for source builds\nvdict = get_versions()\n__version__ = vdict['version']\n", "path": "act/__init__.py"}]} | 1,154 | 217 |
gh_patches_debug_7560 | rasdani/github-patches | git_diff | Qiskit__qiskit-1875 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Registers cannot be index with negative integers or slices
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
It would be nice if the behavior mimicked python lists more. e.g.
```python
q = QuantumRegister(5)
q[-1]
q[-3:-1]
etc.
```
</issue>
<code>
[start of qiskit/circuit/register.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright 2017, IBM.
4 #
5 # This source code is licensed under the Apache License, Version 2.0 found in
6 # the LICENSE.txt file in the root directory of this source tree.
7
8 """
9 Base register reference object.
10 """
11 import re
12 import logging
13 import itertools
14
15 from qiskit.exceptions import QiskitError, QiskitIndexError
16
17 logger = logging.getLogger(__name__)
18
19
20 class Register:
21 """Implement a generic register."""
22
23 # Counter for the number of instances in this class.
24 instances_counter = itertools.count()
25 # Prefix to use for auto naming.
26 prefix = 'reg'
27
28 def __init__(self, size, name=None):
29 """Create a new generic register.
30 """
31
32 if name is None:
33 name = '%s%i' % (self.prefix, next(self.instances_counter))
34
35 if not isinstance(name, str):
36 raise QiskitError("The circuit name should be a string "
37 "(or None for autogenerate a name).")
38
39 test = re.compile('[a-z][a-zA-Z0-9_]*')
40 if test.match(name) is None:
41 raise QiskitError("%s is an invalid OPENQASM register name." % name)
42
43 self.name = name
44 self.size = size
45 if size <= 0:
46 raise QiskitError("register size must be positive")
47
48 def __repr__(self):
49 """Return the official string representing the register."""
50 return "%s(%d, '%s')" % (self.__class__.__qualname__,
51 self.size, self.name)
52
53 def __len__(self):
54 """Return register size"""
55 return self.size
56
57 def check_range(self, j):
58 """Check that j is a valid index into self."""
59 if isinstance(j, int):
60 if j < 0 or j >= self.size:
61 raise QiskitIndexError("register index out of range")
62 elif isinstance(j, slice):
63 if j.start < 0 or j.stop >= self.size or (j.step is not None and
64 j.step <= 0):
65 raise QiskitIndexError("register index slice out of range")
66
67 def __getitem__(self, key):
68 """
69 Arg:
70 key (int|slice|list): index of the bit/qubit to be retrieved.
71
72 Returns:
73 tuple[Register, int]: a tuple in the form `(self, key)` if key is int.
74 If key is a slice, return a `list((self,key))`.
75
76 Raises:
77 QiskitError: if the `key` is not an integer.
78 QiskitIndexError: if the `key` is not in the range
79 `(0, self.size)`.
80 """
81 if not isinstance(key, (int, slice, list)):
82 raise QiskitError("expected integer or slice index into register")
83 self.check_range(key)
84 if isinstance(key, slice):
85 return [(self, ind) for ind in range(*key.indices(len(self)))]
86 elif isinstance(key, list): # list of qubit indices
87 if max(key) < len(self):
88 return [(self, ind) for ind in key]
89 else:
90 raise QiskitError('register index out of range')
91 else:
92 return self, key
93
94 def __iter__(self):
95 """
96 Returns:
97 iterator: an iterator over the bits/qubits of the register, in the
98 form `tuple (Register, int)`.
99 """
100 return zip([self]*self.size, range(self.size))
101
102 def __eq__(self, other):
103 """Two Registers are the same if they are of the same type
104 (i.e. quantum/classical), and have the same name and size.
105
106 Args:
107 other (Register): other Register
108
109 Returns:
110 bool: are self and other equal.
111 """
112 res = False
113 if type(self) is type(other) and \
114 self.name == other.name and \
115 self.size == other.size:
116 res = True
117 return res
118
119 def __hash__(self):
120 """Make object hashable, based on the name and size to hash."""
121 return hash((type(self), self.name, self.size))
122
[end of qiskit/circuit/register.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qiskit/circuit/register.py b/qiskit/circuit/register.py
--- a/qiskit/circuit/register.py
+++ b/qiskit/circuit/register.py
@@ -80,6 +80,8 @@
"""
if not isinstance(key, (int, slice, list)):
raise QiskitError("expected integer or slice index into register")
+ if isinstance(key, int) and key < 0:
+ key = self.size + key
self.check_range(key)
if isinstance(key, slice):
return [(self, ind) for ind in range(*key.indices(len(self)))]
| {"golden_diff": "diff --git a/qiskit/circuit/register.py b/qiskit/circuit/register.py\n--- a/qiskit/circuit/register.py\n+++ b/qiskit/circuit/register.py\n@@ -80,6 +80,8 @@\n \"\"\"\n if not isinstance(key, (int, slice, list)):\n raise QiskitError(\"expected integer or slice index into register\")\n+ if isinstance(key, int) and key < 0:\n+ key = self.size + key\n self.check_range(key)\n if isinstance(key, slice):\n return [(self, ind) for ind in range(*key.indices(len(self)))]\n", "issue": "Registers cannot be index with negative integers or slices\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\n\r\nIt would be nice if the behavior mimicked python lists more. e.g.\r\n\r\n```python\r\nq = QuantumRegister(5)\r\nq[-1]\r\nq[-3:-1]\r\netc.\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"\nBase register reference object.\n\"\"\"\nimport re\nimport logging\nimport itertools\n\nfrom qiskit.exceptions import QiskitError, QiskitIndexError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Register:\n \"\"\"Implement a generic register.\"\"\"\n\n # Counter for the number of instances in this class.\n instances_counter = itertools.count()\n # Prefix to use for auto naming.\n prefix = 'reg'\n\n def __init__(self, size, name=None):\n \"\"\"Create a new generic register.\n \"\"\"\n\n if name is None:\n name = '%s%i' % (self.prefix, next(self.instances_counter))\n\n if not isinstance(name, str):\n raise QiskitError(\"The circuit name should be a string \"\n \"(or None for autogenerate a name).\")\n\n test = re.compile('[a-z][a-zA-Z0-9_]*')\n if test.match(name) is None:\n raise QiskitError(\"%s is an invalid OPENQASM register name.\" % name)\n\n self.name = name\n self.size = size\n if size <= 0:\n raise QiskitError(\"register size must be positive\")\n\n def __repr__(self):\n \"\"\"Return the official string representing the register.\"\"\"\n return \"%s(%d, '%s')\" % (self.__class__.__qualname__,\n self.size, self.name)\n\n def __len__(self):\n \"\"\"Return register size\"\"\"\n return self.size\n\n def check_range(self, j):\n \"\"\"Check that j is a valid index into self.\"\"\"\n if isinstance(j, int):\n if j < 0 or j >= self.size:\n raise QiskitIndexError(\"register index out of range\")\n elif isinstance(j, slice):\n if j.start < 0 or j.stop >= self.size or (j.step is not None and\n j.step <= 0):\n raise QiskitIndexError(\"register index slice out of range\")\n\n def __getitem__(self, key):\n \"\"\"\n Arg:\n key (int|slice|list): index of the bit/qubit to be retrieved.\n\n Returns:\n tuple[Register, int]: a tuple in the form `(self, key)` if key is int.\n If key is a slice, return a `list((self,key))`.\n\n Raises:\n QiskitError: if the `key` is not an integer.\n QiskitIndexError: if the `key` is not in the range\n `(0, self.size)`.\n \"\"\"\n if not isinstance(key, (int, slice, list)):\n raise QiskitError(\"expected integer or slice index into register\")\n self.check_range(key)\n if isinstance(key, slice):\n return [(self, ind) for ind in range(*key.indices(len(self)))]\n elif isinstance(key, list): # list of qubit indices\n if max(key) < len(self):\n return [(self, ind) for ind in key]\n else:\n raise QiskitError('register index out of range')\n else:\n return self, key\n\n def __iter__(self):\n \"\"\"\n Returns:\n iterator: an iterator over the bits/qubits of the register, in the\n form `tuple (Register, int)`.\n \"\"\"\n return zip([self]*self.size, range(self.size))\n\n def __eq__(self, other):\n \"\"\"Two Registers are the same if they are of the same type\n (i.e. quantum/classical), and have the same name and size.\n\n Args:\n other (Register): other Register\n\n Returns:\n bool: are self and other equal.\n \"\"\"\n res = False\n if type(self) is type(other) and \\\n self.name == other.name and \\\n self.size == other.size:\n res = True\n return res\n\n def __hash__(self):\n \"\"\"Make object hashable, based on the name and size to hash.\"\"\"\n return hash((type(self), self.name, self.size))\n", "path": "qiskit/circuit/register.py"}]} | 1,818 | 137 |
gh_patches_debug_29560 | rasdani/github-patches | git_diff | mlflow__mlflow-9384 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enable `PT027`
### Summary
- Enable [PT027](https://beta.ruff.rs/docs/rules/pytest-unittest-raises-assertion/).
- Remove `unittest-assert-raises`.
```diff
diff --git a/pylintrc b/pylintrc
index 9148d110e..342dfc943 100644
--- a/pylintrc
+++ b/pylintrc
@@ -79,7 +79,6 @@ enable=signature-differs,
# Built-in rules
# --------------
# Custom rules
- unittest-assert-raises,
lazy-builtin-import,
useless-assignment,
diff --git a/pyproject.toml b/pyproject.toml
index 6c64df56e..120e8420c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -32,6 +32,7 @@ select = [
"PT022",
"PT023",
"PT026",
+ "PT027",
"RUF010",
"UP004",
"UP008",
```
### Notes
- Make sure to open a PR from a **non-master** branch.
- Sign off the commit using the `-s` flag when making a commit:
```sh
git commit -s -m "..."
# ^^ make sure to use this
```
- Include `#{issue_number}` (e.g. `#123`) in the PR description when opening a PR.
</issue>
<code>
[start of pylint_plugins/__init__.py]
1 from pylint_plugins.unittest_assert_raises import UnittestAssertRaises
2 from pylint_plugins.import_checker import ImportChecker
3 from pylint_plugins.assign_checker import AssignChecker
4
5
6 def register(linter):
7 linter.register_checker(UnittestAssertRaises(linter))
8 linter.register_checker(ImportChecker(linter))
9 linter.register_checker(AssignChecker(linter))
10
[end of pylint_plugins/__init__.py]
[start of pylint_plugins/errors.py]
1 from typing import NamedTuple, Dict, Tuple
2 from functools import reduce
3
4
5 class Message(NamedTuple):
6 id: str
7 name: str
8 message: str
9 reason: str
10
11 def to_dict(self) -> Dict[str, Tuple[str, str, str]]:
12 return {self.id: (self.message, self.name, self.reason)}
13
14
15 def to_msgs(*messages: Message) -> Dict[str, Tuple[str, str, str]]:
16 return reduce(lambda x, y: {**x, **y.to_dict()}, messages, {})
17
18
19 UNITTEST_PYTEST_RAISES = Message(
20 id="W0003",
21 name="unittest-assert-raises",
22 message="Use `pytest.raises` instead of `unittest.TestCase.assertRaises`.",
23 reason="To enforce 'pytest-raises-multiple-statements' Message.",
24 )
25
26
27 LAZY_BUILTIN_IMPORT = Message(
28 id="W0007",
29 name="lazy-builtin-import",
30 message="Import built-in module(s) (%s) at the top of the file.",
31 reason="There is no reason they should be imported inside a function.",
32 )
33
34 USELESS_ASSIGNMENT = Message(
35 id="W0008",
36 name="useless-assignment",
37 message="Useless assignment. Use immediate return instead.",
38 reason="For simplicity and readability",
39 )
40
[end of pylint_plugins/errors.py]
[start of pylint_plugins/unittest_assert_raises.py]
1 import astroid
2 from pylint.interfaces import IAstroidChecker
3 from pylint.checkers import BaseChecker
4
5 from pylint_plugins.errors import UNITTEST_PYTEST_RAISES, to_msgs
6
7
8 def _is_unittest_assert_raises(node: astroid.Call):
9 return isinstance(node.func, astroid.Attribute) and (
10 node.func.as_string() in ("self.assertRaises", "self.assertRaisesRegex")
11 )
12
13
14 class UnittestAssertRaises(BaseChecker):
15 __implements__ = IAstroidChecker
16
17 name = "unittest-assert-raises"
18 msgs = to_msgs(UNITTEST_PYTEST_RAISES)
19 priority = -1
20
21 def visit_call(self, node: astroid.Call):
22 if _is_unittest_assert_raises(node):
23 self.add_message(UNITTEST_PYTEST_RAISES.name, node=node)
24
[end of pylint_plugins/unittest_assert_raises.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pylint_plugins/__init__.py b/pylint_plugins/__init__.py
--- a/pylint_plugins/__init__.py
+++ b/pylint_plugins/__init__.py
@@ -1,9 +1,7 @@
-from pylint_plugins.unittest_assert_raises import UnittestAssertRaises
from pylint_plugins.import_checker import ImportChecker
from pylint_plugins.assign_checker import AssignChecker
def register(linter):
- linter.register_checker(UnittestAssertRaises(linter))
linter.register_checker(ImportChecker(linter))
linter.register_checker(AssignChecker(linter))
diff --git a/pylint_plugins/errors.py b/pylint_plugins/errors.py
--- a/pylint_plugins/errors.py
+++ b/pylint_plugins/errors.py
@@ -16,14 +16,6 @@
return reduce(lambda x, y: {**x, **y.to_dict()}, messages, {})
-UNITTEST_PYTEST_RAISES = Message(
- id="W0003",
- name="unittest-assert-raises",
- message="Use `pytest.raises` instead of `unittest.TestCase.assertRaises`.",
- reason="To enforce 'pytest-raises-multiple-statements' Message.",
-)
-
-
LAZY_BUILTIN_IMPORT = Message(
id="W0007",
name="lazy-builtin-import",
diff --git a/pylint_plugins/unittest_assert_raises.py b/pylint_plugins/unittest_assert_raises.py
deleted file mode 100644
--- a/pylint_plugins/unittest_assert_raises.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import astroid
-from pylint.interfaces import IAstroidChecker
-from pylint.checkers import BaseChecker
-
-from pylint_plugins.errors import UNITTEST_PYTEST_RAISES, to_msgs
-
-
-def _is_unittest_assert_raises(node: astroid.Call):
- return isinstance(node.func, astroid.Attribute) and (
- node.func.as_string() in ("self.assertRaises", "self.assertRaisesRegex")
- )
-
-
-class UnittestAssertRaises(BaseChecker):
- __implements__ = IAstroidChecker
-
- name = "unittest-assert-raises"
- msgs = to_msgs(UNITTEST_PYTEST_RAISES)
- priority = -1
-
- def visit_call(self, node: astroid.Call):
- if _is_unittest_assert_raises(node):
- self.add_message(UNITTEST_PYTEST_RAISES.name, node=node)
| {"golden_diff": "diff --git a/pylint_plugins/__init__.py b/pylint_plugins/__init__.py\n--- a/pylint_plugins/__init__.py\n+++ b/pylint_plugins/__init__.py\n@@ -1,9 +1,7 @@\n-from pylint_plugins.unittest_assert_raises import UnittestAssertRaises\n from pylint_plugins.import_checker import ImportChecker\n from pylint_plugins.assign_checker import AssignChecker\n \n \n def register(linter):\n- linter.register_checker(UnittestAssertRaises(linter))\n linter.register_checker(ImportChecker(linter))\n linter.register_checker(AssignChecker(linter))\ndiff --git a/pylint_plugins/errors.py b/pylint_plugins/errors.py\n--- a/pylint_plugins/errors.py\n+++ b/pylint_plugins/errors.py\n@@ -16,14 +16,6 @@\n return reduce(lambda x, y: {**x, **y.to_dict()}, messages, {})\n \n \n-UNITTEST_PYTEST_RAISES = Message(\n- id=\"W0003\",\n- name=\"unittest-assert-raises\",\n- message=\"Use `pytest.raises` instead of `unittest.TestCase.assertRaises`.\",\n- reason=\"To enforce 'pytest-raises-multiple-statements' Message.\",\n-)\n-\n-\n LAZY_BUILTIN_IMPORT = Message(\n id=\"W0007\",\n name=\"lazy-builtin-import\",\ndiff --git a/pylint_plugins/unittest_assert_raises.py b/pylint_plugins/unittest_assert_raises.py\ndeleted file mode 100644\n--- a/pylint_plugins/unittest_assert_raises.py\n+++ /dev/null\n@@ -1,23 +0,0 @@\n-import astroid\n-from pylint.interfaces import IAstroidChecker\n-from pylint.checkers import BaseChecker\n-\n-from pylint_plugins.errors import UNITTEST_PYTEST_RAISES, to_msgs\n-\n-\n-def _is_unittest_assert_raises(node: astroid.Call):\n- return isinstance(node.func, astroid.Attribute) and (\n- node.func.as_string() in (\"self.assertRaises\", \"self.assertRaisesRegex\")\n- )\n-\n-\n-class UnittestAssertRaises(BaseChecker):\n- __implements__ = IAstroidChecker\n-\n- name = \"unittest-assert-raises\"\n- msgs = to_msgs(UNITTEST_PYTEST_RAISES)\n- priority = -1\n-\n- def visit_call(self, node: astroid.Call):\n- if _is_unittest_assert_raises(node):\n- self.add_message(UNITTEST_PYTEST_RAISES.name, node=node)\n", "issue": "Enable `PT027`\n### Summary\n\n- Enable [PT027](https://beta.ruff.rs/docs/rules/pytest-unittest-raises-assertion/).\r\n- Remove `unittest-assert-raises`.\r\n\r\n```diff\r\ndiff --git a/pylintrc b/pylintrc\r\nindex 9148d110e..342dfc943 100644\r\n--- a/pylintrc\r\n+++ b/pylintrc\r\n@@ -79,7 +79,6 @@ enable=signature-differs,\r\n # Built-in rules\r\n # --------------\r\n # Custom rules\r\n- unittest-assert-raises,\r\n lazy-builtin-import,\r\n useless-assignment,\r\n \r\ndiff --git a/pyproject.toml b/pyproject.toml\r\nindex 6c64df56e..120e8420c 100644\r\n--- a/pyproject.toml\r\n+++ b/pyproject.toml\r\n@@ -32,6 +32,7 @@ select = [\r\n \"PT022\",\r\n \"PT023\",\r\n \"PT026\",\r\n+ \"PT027\",\r\n \"RUF010\",\r\n \"UP004\",\r\n \"UP008\",\r\n```\n\n### Notes\n\n- Make sure to open a PR from a **non-master** branch.\r\n- Sign off the commit using the `-s` flag when making a commit:\r\n\r\n ```sh\r\n git commit -s -m \"...\"\r\n # ^^ make sure to use this\r\n ```\r\n\r\n- Include `#{issue_number}` (e.g. `#123`) in the PR description when opening a PR.\r\n\n", "before_files": [{"content": "from pylint_plugins.unittest_assert_raises import UnittestAssertRaises\nfrom pylint_plugins.import_checker import ImportChecker\nfrom pylint_plugins.assign_checker import AssignChecker\n\n\ndef register(linter):\n linter.register_checker(UnittestAssertRaises(linter))\n linter.register_checker(ImportChecker(linter))\n linter.register_checker(AssignChecker(linter))\n", "path": "pylint_plugins/__init__.py"}, {"content": "from typing import NamedTuple, Dict, Tuple\nfrom functools import reduce\n\n\nclass Message(NamedTuple):\n id: str\n name: str\n message: str\n reason: str\n\n def to_dict(self) -> Dict[str, Tuple[str, str, str]]:\n return {self.id: (self.message, self.name, self.reason)}\n\n\ndef to_msgs(*messages: Message) -> Dict[str, Tuple[str, str, str]]:\n return reduce(lambda x, y: {**x, **y.to_dict()}, messages, {})\n\n\nUNITTEST_PYTEST_RAISES = Message(\n id=\"W0003\",\n name=\"unittest-assert-raises\",\n message=\"Use `pytest.raises` instead of `unittest.TestCase.assertRaises`.\",\n reason=\"To enforce 'pytest-raises-multiple-statements' Message.\",\n)\n\n\nLAZY_BUILTIN_IMPORT = Message(\n id=\"W0007\",\n name=\"lazy-builtin-import\",\n message=\"Import built-in module(s) (%s) at the top of the file.\",\n reason=\"There is no reason they should be imported inside a function.\",\n)\n\nUSELESS_ASSIGNMENT = Message(\n id=\"W0008\",\n name=\"useless-assignment\",\n message=\"Useless assignment. Use immediate return instead.\",\n reason=\"For simplicity and readability\",\n)\n", "path": "pylint_plugins/errors.py"}, {"content": "import astroid\nfrom pylint.interfaces import IAstroidChecker\nfrom pylint.checkers import BaseChecker\n\nfrom pylint_plugins.errors import UNITTEST_PYTEST_RAISES, to_msgs\n\n\ndef _is_unittest_assert_raises(node: astroid.Call):\n return isinstance(node.func, astroid.Attribute) and (\n node.func.as_string() in (\"self.assertRaises\", \"self.assertRaisesRegex\")\n )\n\n\nclass UnittestAssertRaises(BaseChecker):\n __implements__ = IAstroidChecker\n\n name = \"unittest-assert-raises\"\n msgs = to_msgs(UNITTEST_PYTEST_RAISES)\n priority = -1\n\n def visit_call(self, node: astroid.Call):\n if _is_unittest_assert_raises(node):\n self.add_message(UNITTEST_PYTEST_RAISES.name, node=node)\n", "path": "pylint_plugins/unittest_assert_raises.py"}]} | 1,602 | 528 |
gh_patches_debug_42048 | rasdani/github-patches | git_diff | joke2k__faker-1243 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update python_requires in setup.py
https://github.com/joke2k/faker/blob/146f205b942d15c95160df35d3e431624697d079/setup.py#L65
Finnish IBAN should be 18 characters of length
* Faker version: 4.1.1
Finnish IBAN should be 18 characters of length. Currently returned Finnish IBAN has 20 characters.
### Steps to reproduce
```
from faker import Faker
>>> fake = Faker('fi_FI')
>>> fin_iban = fake.iban()
>>> fin_iban
'FI807370583252728936'
>>> len(fin_iban)
20
```
### Expected behavior
```
>>> len(fin_iban)
18
```
### Actual behavior
```
>>> len(fin_iban)
20
```
</issue>
<code>
[start of faker/generator.py]
1 import random as random_module
2 import re
3
4 _re_token = re.compile(r'\{\{(\s?)(\w+)(\s?)\}\}')
5 random = random_module.Random()
6 mod_random = random # compat with name released in 0.8
7
8
9 class Generator:
10
11 __config = {}
12
13 def __init__(self, **config):
14 self.providers = []
15 self.__config = dict(
16 list(self.__config.items()) + list(config.items()))
17 self.__random = random
18
19 def add_provider(self, provider):
20
21 if isinstance(provider, type):
22 provider = provider(self)
23
24 self.providers.insert(0, provider)
25
26 for method_name in dir(provider):
27 # skip 'private' method
28 if method_name.startswith('_'):
29 continue
30
31 faker_function = getattr(provider, method_name)
32
33 if callable(faker_function):
34 # add all faker method to generator
35 self.set_formatter(method_name, faker_function)
36
37 def provider(self, name):
38 try:
39 lst = [p for p in self.get_providers()
40 if p.__provider__ == name.lower()]
41 return lst[0]
42 except IndexError:
43 return None
44
45 def get_providers(self):
46 """Returns added providers."""
47 return self.providers
48
49 @property
50 def random(self):
51 return self.__random
52
53 @random.setter
54 def random(self, value):
55 self.__random = value
56
57 def seed_instance(self, seed=None):
58 """Calls random.seed"""
59 if self.__random == random:
60 # create per-instance random obj when first time seed_instance() is
61 # called
62 self.__random = random_module.Random()
63 self.__random.seed(seed)
64 return self
65
66 @classmethod
67 def seed(cls, seed=None):
68 random.seed(seed)
69
70 def format(self, formatter, *args, **kwargs):
71 """
72 This is a secure way to make a fake from another Provider.
73 """
74 # TODO: data export?
75 return self.get_formatter(formatter)(*args, **kwargs)
76
77 def get_formatter(self, formatter):
78 try:
79 return getattr(self, formatter)
80 except AttributeError:
81 if 'locale' in self.__config:
82 msg = 'Unknown formatter "{}" with locale "{}"'.format(
83 formatter, self.__config['locale'],
84 )
85 else:
86 raise AttributeError('Unknown formatter "{}"'.format(
87 formatter,
88 ))
89 raise AttributeError(msg)
90
91 def set_formatter(self, name, method):
92 """
93 This method adds a provider method to generator.
94 Override this method to add some decoration or logging stuff.
95 """
96 setattr(self, name, method)
97
98 def parse(self, text):
99 """
100 Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')
101 with the result from the token method call.
102 """
103 return _re_token.sub(self.__format_token, text)
104
105 def __format_token(self, matches):
106 formatter = list(matches.groups())
107 formatter[1] = str(self.format(formatter[1]))
108 return ''.join(formatter)
109
[end of faker/generator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/generator.py b/faker/generator.py
--- a/faker/generator.py
+++ b/faker/generator.py
@@ -1,14 +1,16 @@
import random as random_module
import re
-_re_token = re.compile(r'\{\{(\s?)(\w+)(\s?)\}\}')
+_re_token = re.compile(r'\{\{\s*(\w+)(:\s*\w+?)?\s*\}\}')
random = random_module.Random()
mod_random = random # compat with name released in 0.8
class Generator:
- __config = {}
+ __config = {
+ 'arguments': {},
+ }
def __init__(self, **config):
self.providers = []
@@ -71,7 +73,6 @@
"""
This is a secure way to make a fake from another Provider.
"""
- # TODO: data export?
return self.get_formatter(formatter)(*args, **kwargs)
def get_formatter(self, formatter):
@@ -95,14 +96,84 @@
"""
setattr(self, name, method)
+ def set_arguments(self, group, argument, value=None):
+ """
+ Creates an argument group, with an individual argument or a dictionary
+ of arguments. Used with the Generator.parse method.
+
+ generator.set_arguments('small', 'max_value', 10)
+ generator.set_arguments('small', {'min_value': 5, 'max_value': 10})
+ """
+ if group not in self.__config['arguments']:
+ self.__config['arguments'][group] = {}
+
+ if isinstance(argument, dict):
+ self.__config['arguments'][group] = argument
+ elif not isinstance(argument, str):
+ raise ValueError("Arguments must be either a string or dictionary")
+ else:
+ self.__config['arguments'][group][argument] = value
+
+ def get_arguments(self, group, argument=None):
+ """
+ Get the value of an argument configured within a argument group, or
+ the entire group as a dictionary.
+
+ generator.get_arguments('small', 'max_value')
+ generator.get_arguments('small')
+ """
+ if group in self.__config['arguments'] and argument:
+ result = self.__config['arguments'][group].get(argument)
+ else:
+ result = self.__config['arguments'].get(group)
+
+ return result
+
+ def del_arguments(self, group, argument=None):
+ """
+ Delete an argument from an argument group or the entire
+ argument group.
+
+ generator.del_arguments('small')
+ generator.del_arguments('small', 'max_value')
+ """
+ if group in self.__config['arguments']:
+ if argument:
+ result = self.__config['arguments'][group].pop(argument)
+ else:
+ result = self.__config['arguments'].pop(group)
+ else:
+ result = None
+
+ return result
+
def parse(self, text):
"""
Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')
- with the result from the token method call.
+ with the result from the token method call. Arguments can be
+ parsed by using an argument group. '{{ tokenName:group }}'
+
+ Example:
+
+ generator.set_arguments('red_rgb', {'hue': 'red', 'color_format': 'rgb'})
+ generator.set_arguments('small', 'max_value', 10)
+
+ generator.parse('{{ color:red_rgb }} - {{ pyint:small }}')
"""
return _re_token.sub(self.__format_token, text)
def __format_token(self, matches):
- formatter = list(matches.groups())
- formatter[1] = str(self.format(formatter[1]))
- return ''.join(formatter)
+ formatter, argument_group = list(matches.groups())
+ argument_group = argument_group.lstrip(":").strip() if argument_group else ''
+
+ if argument_group:
+ try:
+ arguments = self.__config['arguments'][argument_group]
+ except KeyError:
+ raise AttributeError('Unknown argument group "{}"'.format(argument_group))
+
+ formatted = str(self.format(formatter, **arguments))
+ else:
+ formatted = str(self.format(formatter))
+
+ return ''.join(formatted)
| {"golden_diff": "diff --git a/faker/generator.py b/faker/generator.py\n--- a/faker/generator.py\n+++ b/faker/generator.py\n@@ -1,14 +1,16 @@\n import random as random_module\n import re\n \n-_re_token = re.compile(r'\\{\\{(\\s?)(\\w+)(\\s?)\\}\\}')\n+_re_token = re.compile(r'\\{\\{\\s*(\\w+)(:\\s*\\w+?)?\\s*\\}\\}')\n random = random_module.Random()\n mod_random = random # compat with name released in 0.8\n \n \n class Generator:\n \n- __config = {}\n+ __config = {\n+ 'arguments': {},\n+ }\n \n def __init__(self, **config):\n self.providers = []\n@@ -71,7 +73,6 @@\n \"\"\"\n This is a secure way to make a fake from another Provider.\n \"\"\"\n- # TODO: data export?\n return self.get_formatter(formatter)(*args, **kwargs)\n \n def get_formatter(self, formatter):\n@@ -95,14 +96,84 @@\n \"\"\"\n setattr(self, name, method)\n \n+ def set_arguments(self, group, argument, value=None):\n+ \"\"\"\n+ Creates an argument group, with an individual argument or a dictionary\n+ of arguments. Used with the Generator.parse method.\n+\n+ generator.set_arguments('small', 'max_value', 10)\n+ generator.set_arguments('small', {'min_value': 5, 'max_value': 10})\n+ \"\"\"\n+ if group not in self.__config['arguments']:\n+ self.__config['arguments'][group] = {}\n+\n+ if isinstance(argument, dict):\n+ self.__config['arguments'][group] = argument\n+ elif not isinstance(argument, str):\n+ raise ValueError(\"Arguments must be either a string or dictionary\")\n+ else:\n+ self.__config['arguments'][group][argument] = value\n+\n+ def get_arguments(self, group, argument=None):\n+ \"\"\"\n+ Get the value of an argument configured within a argument group, or\n+ the entire group as a dictionary.\n+\n+ generator.get_arguments('small', 'max_value')\n+ generator.get_arguments('small')\n+ \"\"\"\n+ if group in self.__config['arguments'] and argument:\n+ result = self.__config['arguments'][group].get(argument)\n+ else:\n+ result = self.__config['arguments'].get(group)\n+\n+ return result\n+\n+ def del_arguments(self, group, argument=None):\n+ \"\"\"\n+ Delete an argument from an argument group or the entire\n+ argument group.\n+\n+ generator.del_arguments('small')\n+ generator.del_arguments('small', 'max_value')\n+ \"\"\"\n+ if group in self.__config['arguments']:\n+ if argument:\n+ result = self.__config['arguments'][group].pop(argument)\n+ else:\n+ result = self.__config['arguments'].pop(group)\n+ else:\n+ result = None\n+\n+ return result\n+\n def parse(self, text):\n \"\"\"\n Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')\n- with the result from the token method call.\n+ with the result from the token method call. Arguments can be\n+ parsed by using an argument group. '{{ tokenName:group }}'\n+\n+ Example:\n+\n+ generator.set_arguments('red_rgb', {'hue': 'red', 'color_format': 'rgb'})\n+ generator.set_arguments('small', 'max_value', 10)\n+\n+ generator.parse('{{ color:red_rgb }} - {{ pyint:small }}')\n \"\"\"\n return _re_token.sub(self.__format_token, text)\n \n def __format_token(self, matches):\n- formatter = list(matches.groups())\n- formatter[1] = str(self.format(formatter[1]))\n- return ''.join(formatter)\n+ formatter, argument_group = list(matches.groups())\n+ argument_group = argument_group.lstrip(\":\").strip() if argument_group else ''\n+\n+ if argument_group:\n+ try:\n+ arguments = self.__config['arguments'][argument_group]\n+ except KeyError:\n+ raise AttributeError('Unknown argument group \"{}\"'.format(argument_group))\n+\n+ formatted = str(self.format(formatter, **arguments))\n+ else:\n+ formatted = str(self.format(formatter))\n+\n+ return ''.join(formatted)\n", "issue": "Update python_requires in setup.py\nhttps://github.com/joke2k/faker/blob/146f205b942d15c95160df35d3e431624697d079/setup.py#L65\nFinnish IBAN should be 18 characters of length\n* Faker version: 4.1.1\r\n\r\nFinnish IBAN should be 18 characters of length. Currently returned Finnish IBAN has 20 characters.\r\n\r\n### Steps to reproduce\r\n\r\n```\r\nfrom faker import Faker\r\n>>> fake = Faker('fi_FI')\r\n>>> fin_iban = fake.iban()\r\n>>> fin_iban\r\n'FI807370583252728936'\r\n>>> len(fin_iban)\r\n20\r\n```\r\n\r\n### Expected behavior\r\n\r\n```\r\n>>> len(fin_iban)\r\n18\r\n```\r\n\r\n### Actual behavior\r\n\r\n```\r\n>>> len(fin_iban)\r\n20\r\n```\r\n\n", "before_files": [{"content": "import random as random_module\nimport re\n\n_re_token = re.compile(r'\\{\\{(\\s?)(\\w+)(\\s?)\\}\\}')\nrandom = random_module.Random()\nmod_random = random # compat with name released in 0.8\n\n\nclass Generator:\n\n __config = {}\n\n def __init__(self, **config):\n self.providers = []\n self.__config = dict(\n list(self.__config.items()) + list(config.items()))\n self.__random = random\n\n def add_provider(self, provider):\n\n if isinstance(provider, type):\n provider = provider(self)\n\n self.providers.insert(0, provider)\n\n for method_name in dir(provider):\n # skip 'private' method\n if method_name.startswith('_'):\n continue\n\n faker_function = getattr(provider, method_name)\n\n if callable(faker_function):\n # add all faker method to generator\n self.set_formatter(method_name, faker_function)\n\n def provider(self, name):\n try:\n lst = [p for p in self.get_providers()\n if p.__provider__ == name.lower()]\n return lst[0]\n except IndexError:\n return None\n\n def get_providers(self):\n \"\"\"Returns added providers.\"\"\"\n return self.providers\n\n @property\n def random(self):\n return self.__random\n\n @random.setter\n def random(self, value):\n self.__random = value\n\n def seed_instance(self, seed=None):\n \"\"\"Calls random.seed\"\"\"\n if self.__random == random:\n # create per-instance random obj when first time seed_instance() is\n # called\n self.__random = random_module.Random()\n self.__random.seed(seed)\n return self\n\n @classmethod\n def seed(cls, seed=None):\n random.seed(seed)\n\n def format(self, formatter, *args, **kwargs):\n \"\"\"\n This is a secure way to make a fake from another Provider.\n \"\"\"\n # TODO: data export?\n return self.get_formatter(formatter)(*args, **kwargs)\n\n def get_formatter(self, formatter):\n try:\n return getattr(self, formatter)\n except AttributeError:\n if 'locale' in self.__config:\n msg = 'Unknown formatter \"{}\" with locale \"{}\"'.format(\n formatter, self.__config['locale'],\n )\n else:\n raise AttributeError('Unknown formatter \"{}\"'.format(\n formatter,\n ))\n raise AttributeError(msg)\n\n def set_formatter(self, name, method):\n \"\"\"\n This method adds a provider method to generator.\n Override this method to add some decoration or logging stuff.\n \"\"\"\n setattr(self, name, method)\n\n def parse(self, text):\n \"\"\"\n Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')\n with the result from the token method call.\n \"\"\"\n return _re_token.sub(self.__format_token, text)\n\n def __format_token(self, matches):\n formatter = list(matches.groups())\n formatter[1] = str(self.format(formatter[1]))\n return ''.join(formatter)\n", "path": "faker/generator.py"}]} | 1,628 | 961 |
gh_patches_debug_9667 | rasdani/github-patches | git_diff | comic__grand-challenge.org-3259 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unsupported upload interface type breaks later file uploads widgets
I have a reader study with a 3d-liver-model interface (.obj kind). The following likely also extends to archive items.
We don't support uploading these via the UI.
However, when one of these interfaces is present all subsequent file upload widgets are not loaded on page: https://grand-challenge.org/reader-studies/chris-test-reader-study-the-second-coming/display-sets/create-single/

I've since removed the display set with the interface to quickly do a workaround.
Not sure if this needs to be fixed or is so corner case that we can safely ignore it.
</issue>
<code>
[start of app/grandchallenge/uploads/widgets.py]
1 from django.forms.widgets import HiddenInput, MultipleHiddenInput
2
3
4 class UserUploadWidgetMixin:
5 template_name = "uploads/widget.html"
6 input_type = None
7
8 def __init__(self, *args, allowed_file_types=None, **kwargs):
9 super().__init__(*args, **kwargs)
10 self.allowed_file_types = allowed_file_types
11
12 def get_context(self, *args, **kwargs):
13 context = super().get_context(*args, **kwargs)
14 context["widget"]["allowed_file_types"] = {
15 "id": f"{context['widget']['attrs']['id']}AllowedFileTypes",
16 "value": self.allowed_file_types,
17 }
18 return context
19
20 class Media:
21 css = {"all": ("vendored/uppy/uppy.min.css",)}
22 js = (
23 "vendored/uppy/uppy.min.js",
24 "js/user_upload.js",
25 )
26
27
28 class UserUploadSingleWidget(UserUploadWidgetMixin, HiddenInput):
29 pass
30
31
32 class UserUploadMultipleWidget(UserUploadWidgetMixin, MultipleHiddenInput):
33 def get_context(self, name, value, attrs):
34 context = super().get_context(name, value, attrs)
35 context["widget"]["attrs"]["multiple"] = True
36 return context
37
[end of app/grandchallenge/uploads/widgets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/uploads/widgets.py b/app/grandchallenge/uploads/widgets.py
--- a/app/grandchallenge/uploads/widgets.py
+++ b/app/grandchallenge/uploads/widgets.py
@@ -11,8 +11,10 @@
def get_context(self, *args, **kwargs):
context = super().get_context(*args, **kwargs)
+ widget_id = f'X_{context["widget"]["attrs"]["id"]}'
+ context["widget"]["attrs"]["id"] = widget_id
context["widget"]["allowed_file_types"] = {
- "id": f"{context['widget']['attrs']['id']}AllowedFileTypes",
+ "id": f"{widget_id}AllowedFileTypes",
"value": self.allowed_file_types,
}
return context
| {"golden_diff": "diff --git a/app/grandchallenge/uploads/widgets.py b/app/grandchallenge/uploads/widgets.py\n--- a/app/grandchallenge/uploads/widgets.py\n+++ b/app/grandchallenge/uploads/widgets.py\n@@ -11,8 +11,10 @@\n \n def get_context(self, *args, **kwargs):\n context = super().get_context(*args, **kwargs)\n+ widget_id = f'X_{context[\"widget\"][\"attrs\"][\"id\"]}'\n+ context[\"widget\"][\"attrs\"][\"id\"] = widget_id\n context[\"widget\"][\"allowed_file_types\"] = {\n- \"id\": f\"{context['widget']['attrs']['id']}AllowedFileTypes\",\n+ \"id\": f\"{widget_id}AllowedFileTypes\",\n \"value\": self.allowed_file_types,\n }\n return context\n", "issue": "Unsupported upload interface type breaks later file uploads widgets\nI have a reader study with a 3d-liver-model interface (.obj kind). The following likely also extends to archive items.\r\n\r\nWe don't support uploading these via the UI.\r\n\r\nHowever, when one of these interfaces is present all subsequent file upload widgets are not loaded on page: https://grand-challenge.org/reader-studies/chris-test-reader-study-the-second-coming/display-sets/create-single/\r\n\r\n\r\n\r\nI've since removed the display set with the interface to quickly do a workaround.\r\n\r\nNot sure if this needs to be fixed or is so corner case that we can safely ignore it.\n", "before_files": [{"content": "from django.forms.widgets import HiddenInput, MultipleHiddenInput\n\n\nclass UserUploadWidgetMixin:\n template_name = \"uploads/widget.html\"\n input_type = None\n\n def __init__(self, *args, allowed_file_types=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.allowed_file_types = allowed_file_types\n\n def get_context(self, *args, **kwargs):\n context = super().get_context(*args, **kwargs)\n context[\"widget\"][\"allowed_file_types\"] = {\n \"id\": f\"{context['widget']['attrs']['id']}AllowedFileTypes\",\n \"value\": self.allowed_file_types,\n }\n return context\n\n class Media:\n css = {\"all\": (\"vendored/uppy/uppy.min.css\",)}\n js = (\n \"vendored/uppy/uppy.min.js\",\n \"js/user_upload.js\",\n )\n\n\nclass UserUploadSingleWidget(UserUploadWidgetMixin, HiddenInput):\n pass\n\n\nclass UserUploadMultipleWidget(UserUploadWidgetMixin, MultipleHiddenInput):\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n context[\"widget\"][\"attrs\"][\"multiple\"] = True\n return context\n", "path": "app/grandchallenge/uploads/widgets.py"}]} | 1,056 | 171 |
gh_patches_debug_19141 | rasdani/github-patches | git_diff | pypi__warehouse-2849 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Blacklisting project does not purge the cache
</issue>
<code>
[start of warehouse/admin/utils.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from packaging.utils import canonicalize_name
14 from pyramid.httpexceptions import HTTPSeeOther
15
16 from warehouse.packaging.models import (
17 Project, Release, Dependency, File, Role, JournalEntry, release_classifiers
18 )
19
20
21 def confirm_project(project, request):
22 confirm = request.POST.get("confirm")
23 project_name = project.normalized_name
24 if not confirm:
25 request.session.flash(
26 "Must confirm the request.",
27 queue="error",
28 )
29 raise HTTPSeeOther(
30 request.route_path(
31 'admin.project.detail',
32 project_name=project_name
33 )
34 )
35 if canonicalize_name(confirm) != project.normalized_name:
36 request.session.flash(
37 f"{confirm!r} is not the same as {project.normalized_name!r}",
38 queue="error",
39 )
40 raise HTTPSeeOther(
41 request.route_path(
42 'admin.project.detail',
43 project_name=project_name
44 )
45 )
46
47
48 def remove_project(project, request):
49 # TODO: We don't actually delete files from the data store. We should add
50 # some kind of garbage collection at some point.
51
52 request.db.add(
53 JournalEntry(
54 name=project.name,
55 action="remove",
56 submitted_by=request.user,
57 submitted_from=request.remote_addr,
58 )
59 )
60 request.db.query(Role).filter(Role.project == project).delete()
61 request.db.query(File).filter(File.name == project.name).delete()
62 (request.db.query(Dependency).filter(Dependency.name == project.name)
63 .delete())
64 (request.db.execute(release_classifiers.delete()
65 .where(release_classifiers.c.name ==
66 project.name)))
67 request.db.query(Release).filter(Release.name == project.name).delete()
68 request.db.query(Project).filter(Project.name == project.name).delete()
69
70 request.session.flash(
71 f"Successfully deleted the project {project.name!r}.",
72 queue="success",
73 )
74
[end of warehouse/admin/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/admin/utils.py b/warehouse/admin/utils.py
--- a/warehouse/admin/utils.py
+++ b/warehouse/admin/utils.py
@@ -64,8 +64,22 @@
(request.db.execute(release_classifiers.delete()
.where(release_classifiers.c.name ==
project.name)))
- request.db.query(Release).filter(Release.name == project.name).delete()
- request.db.query(Project).filter(Project.name == project.name).delete()
+
+ # Load the following objects into the session and individually delete them
+ # so they are included in `session.deleted` and their cache keys are purged
+
+ # Delete releases first, otherwise they will get cascade-deleted by the
+ # project deletion and won't be purged
+ for release in (
+ request.db.query(Release)
+ .filter(Release.name == project.name)
+ .all()):
+ request.db.delete(release)
+
+ # Finally, delete the project
+ request.db.delete(
+ request.db.query(Project).filter(Project.name == project.name).one()
+ )
request.session.flash(
f"Successfully deleted the project {project.name!r}.",
| {"golden_diff": "diff --git a/warehouse/admin/utils.py b/warehouse/admin/utils.py\n--- a/warehouse/admin/utils.py\n+++ b/warehouse/admin/utils.py\n@@ -64,8 +64,22 @@\n (request.db.execute(release_classifiers.delete()\n .where(release_classifiers.c.name ==\n project.name)))\n- request.db.query(Release).filter(Release.name == project.name).delete()\n- request.db.query(Project).filter(Project.name == project.name).delete()\n+\n+ # Load the following objects into the session and individually delete them\n+ # so they are included in `session.deleted` and their cache keys are purged\n+\n+ # Delete releases first, otherwise they will get cascade-deleted by the\n+ # project deletion and won't be purged\n+ for release in (\n+ request.db.query(Release)\n+ .filter(Release.name == project.name)\n+ .all()):\n+ request.db.delete(release)\n+\n+ # Finally, delete the project\n+ request.db.delete(\n+ request.db.query(Project).filter(Project.name == project.name).one()\n+ )\n \n request.session.flash(\n f\"Successfully deleted the project {project.name!r}.\",\n", "issue": "Blacklisting project does not purge the cache\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom packaging.utils import canonicalize_name\nfrom pyramid.httpexceptions import HTTPSeeOther\n\nfrom warehouse.packaging.models import (\n Project, Release, Dependency, File, Role, JournalEntry, release_classifiers\n)\n\n\ndef confirm_project(project, request):\n confirm = request.POST.get(\"confirm\")\n project_name = project.normalized_name\n if not confirm:\n request.session.flash(\n \"Must confirm the request.\",\n queue=\"error\",\n )\n raise HTTPSeeOther(\n request.route_path(\n 'admin.project.detail',\n project_name=project_name\n )\n )\n if canonicalize_name(confirm) != project.normalized_name:\n request.session.flash(\n f\"{confirm!r} is not the same as {project.normalized_name!r}\",\n queue=\"error\",\n )\n raise HTTPSeeOther(\n request.route_path(\n 'admin.project.detail',\n project_name=project_name\n )\n )\n\n\ndef remove_project(project, request):\n # TODO: We don't actually delete files from the data store. We should add\n # some kind of garbage collection at some point.\n\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"remove\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n request.db.query(Role).filter(Role.project == project).delete()\n request.db.query(File).filter(File.name == project.name).delete()\n (request.db.query(Dependency).filter(Dependency.name == project.name)\n .delete())\n (request.db.execute(release_classifiers.delete()\n .where(release_classifiers.c.name ==\n project.name)))\n request.db.query(Release).filter(Release.name == project.name).delete()\n request.db.query(Project).filter(Project.name == project.name).delete()\n\n request.session.flash(\n f\"Successfully deleted the project {project.name!r}.\",\n queue=\"success\",\n )\n", "path": "warehouse/admin/utils.py"}]} | 1,205 | 264 |
gh_patches_debug_12370 | rasdani/github-patches | git_diff | google__openhtf-181 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add ability to pass in dut_serial via command line config
Add ability to pass in dut_serial via command line. This would be a useful feature when doing loop test
</issue>
<code>
[start of openhtf/exe/triggers.py]
1 # Copyright 2014 Google Inc. All Rights Reserved.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Module for handling the triggering of test start/stop.
16
17 In order for the TestExecutor (see exe/__init__.py) to know when to start a
18 test, it needs a way to know when a DUT has been connected. Also, the test
19 can't restart until the DUT is removed and re-appears. The serial for the
20 TestRun can be read from the DUT, or from the frontend.
21
22 This module provides some built-in triggers. Custom implementations of test
23 start and stop triggers must follow the following interface:
24
25 TestStart:
26 Args:
27 None
28 Returns:
29 DUT identifier, or None if it is not known at test start time.
30
31 TestStop:
32 Args:
33 dut_id: DUT identifier of the test that is stopping.
34 Returns:
35 Blocks until the test can re-start, then returns None.
36 """
37
38 import logging
39 import time
40
41 from openhtf.io import user_input
42
43 _LOG = logging.getLogger(__name__)
44
45
46 def AutoStart(): # pylint: disable=invalid-name
47 """Start the test immediately with a dummy DUT ID."""
48 return 'UNKNOWN_DUT_ID'
49
50
51 def AutoStop(dummy_dut_id): # pylint: disable=invalid-name
52 """Stop the test immediately regardless of DUT ID given."""
53 pass
54
55
56 # pylint: disable=invalid-name
57 def PromptForTestStart(message='Provide a DUT ID in order to start the test.',
58 text_input=True):
59 """Make a test start trigger based on prompting the user for input."""
60 def trigger(): # pylint: disable=missing-docstring
61 prompt_manager = user_input.get_prompt_manager()
62 return prompt_manager.DisplayPrompt(message, text_input=text_input)
63 return trigger
64
65
66 def PromptForTestStop(message='Hit ENTER to complete the test.',
67 text_input=False):
68 """Make a test stop trigger based on prompting the user for a response."""
69 def trigger(dummy_dut_id): # pylint: disable=missing-docstring
70 prompt_manager = user_input.get_prompt_manager()
71 return prompt_manager.DisplayPrompt(message, text_input=text_input)
72 return trigger
73
[end of openhtf/exe/triggers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openhtf/exe/triggers.py b/openhtf/exe/triggers.py
--- a/openhtf/exe/triggers.py
+++ b/openhtf/exe/triggers.py
@@ -38,14 +38,20 @@
import logging
import time
+import gflags
+
from openhtf.io import user_input
-_LOG = logging.getLogger(__name__)
+gflags.DEFINE_string('dut_serial', 'UNKNOWN_DUT_ID',
+ 'DUT serial to start the test with. '
+ 'Only use if using the AutoStart trigger.')
+FLAGS = gflags.FLAGS
+_LOG = logging.getLogger(__name__)
def AutoStart(): # pylint: disable=invalid-name
"""Start the test immediately with a dummy DUT ID."""
- return 'UNKNOWN_DUT_ID'
+ return FLAGS.dut_serial
def AutoStop(dummy_dut_id): # pylint: disable=invalid-name
| {"golden_diff": "diff --git a/openhtf/exe/triggers.py b/openhtf/exe/triggers.py\n--- a/openhtf/exe/triggers.py\n+++ b/openhtf/exe/triggers.py\n@@ -38,14 +38,20 @@\n import logging\n import time\n \n+import gflags\n+\n from openhtf.io import user_input\n \n-_LOG = logging.getLogger(__name__)\n+gflags.DEFINE_string('dut_serial', 'UNKNOWN_DUT_ID',\n+ 'DUT serial to start the test with. '\n+ 'Only use if using the AutoStart trigger.')\n \n+FLAGS = gflags.FLAGS\n+_LOG = logging.getLogger(__name__)\n \n def AutoStart(): # pylint: disable=invalid-name\n \"\"\"Start the test immediately with a dummy DUT ID.\"\"\"\n- return 'UNKNOWN_DUT_ID'\n+ return FLAGS.dut_serial\n \n \n def AutoStop(dummy_dut_id): # pylint: disable=invalid-name\n", "issue": "Add ability to pass in dut_serial via command line config\nAdd ability to pass in dut_serial via command line. This would be a useful feature when doing loop test\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module for handling the triggering of test start/stop.\n\nIn order for the TestExecutor (see exe/__init__.py) to know when to start a\ntest, it needs a way to know when a DUT has been connected. Also, the test\ncan't restart until the DUT is removed and re-appears. The serial for the\nTestRun can be read from the DUT, or from the frontend.\n\nThis module provides some built-in triggers. Custom implementations of test\nstart and stop triggers must follow the following interface:\n\nTestStart:\n Args:\n None\n Returns:\n DUT identifier, or None if it is not known at test start time.\n\nTestStop:\n Args:\n dut_id: DUT identifier of the test that is stopping.\n Returns:\n Blocks until the test can re-start, then returns None.\n\"\"\"\n\nimport logging\nimport time\n\nfrom openhtf.io import user_input\n\n_LOG = logging.getLogger(__name__)\n\n\ndef AutoStart(): # pylint: disable=invalid-name\n \"\"\"Start the test immediately with a dummy DUT ID.\"\"\"\n return 'UNKNOWN_DUT_ID'\n\n\ndef AutoStop(dummy_dut_id): # pylint: disable=invalid-name\n \"\"\"Stop the test immediately regardless of DUT ID given.\"\"\"\n pass\n\n\n# pylint: disable=invalid-name\ndef PromptForTestStart(message='Provide a DUT ID in order to start the test.',\n text_input=True):\n \"\"\"Make a test start trigger based on prompting the user for input.\"\"\"\n def trigger(): # pylint: disable=missing-docstring\n prompt_manager = user_input.get_prompt_manager()\n return prompt_manager.DisplayPrompt(message, text_input=text_input)\n return trigger\n\n\ndef PromptForTestStop(message='Hit ENTER to complete the test.',\n text_input=False):\n \"\"\"Make a test stop trigger based on prompting the user for a response.\"\"\"\n def trigger(dummy_dut_id): # pylint: disable=missing-docstring\n prompt_manager = user_input.get_prompt_manager()\n return prompt_manager.DisplayPrompt(message, text_input=text_input)\n return trigger\n", "path": "openhtf/exe/triggers.py"}]} | 1,302 | 208 |
gh_patches_debug_12079 | rasdani/github-patches | git_diff | pretalx__pretalx-185 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Restructure CSS
- [x] Use `$brand_color`
- [ ] Break SCSS up into more files
</issue>
<code>
[start of src/pretalx/common/tasks.py]
1 import hashlib
2 import os
3
4 import django_libsass
5 import sass
6 from django.conf import settings
7 from django.core.files.base import ContentFile
8 from django.core.files.storage import default_storage
9 from django.templatetags.static import static
10
11 from pretalx.celery_app import app
12 from pretalx.event.models import Event
13
14
15 @app.task()
16 def regenerate_css(event_id: int):
17 event = Event.objects.get(pk=event_id)
18 local_apps = ['cfp', 'orga']
19
20 if not event.primary_color:
21 for local_app in local_apps:
22 event.settings.delete(f'{local_app}_css_file')
23 return
24
25 for local_app in local_apps:
26 sassrules = []
27 if event.primary_color:
28 sassrules.append('$brand-primary: {};'.format(event.primary_color))
29
30 path = os.path.join(settings.STATIC_ROOT, local_app, 'scss/main.scss')
31 sassrules.append(f'@import "{path}";')
32
33 cf = dict(django_libsass.CUSTOM_FUNCTIONS)
34 cf['static'] = static
35 css = sass.compile(
36 string="\n".join(sassrules),
37 output_style='compressed',
38 custom_functions=cf
39 )
40 checksum = hashlib.sha1(css.encode('utf-8')).hexdigest()
41 fname = f'{event.slug}/{local_app}.{checksum[:16]}.css'
42
43 if event.settings.get(f'{local_app}_css_checksum', '') != checksum:
44 newname = default_storage.save(fname, ContentFile(css.encode('utf-8')))
45 event.settings.set(f'{local_app}_css_file', f'/media/{newname}')
46 event.settings.set(f'{local_app}_css_checksum', checksum)
47
[end of src/pretalx/common/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pretalx/common/tasks.py b/src/pretalx/common/tasks.py
--- a/src/pretalx/common/tasks.py
+++ b/src/pretalx/common/tasks.py
@@ -23,12 +23,12 @@
return
for local_app in local_apps:
+ path = os.path.join(settings.STATIC_ROOT, local_app, 'scss/main.scss')
sassrules = []
+
if event.primary_color:
sassrules.append('$brand-primary: {};'.format(event.primary_color))
-
- path = os.path.join(settings.STATIC_ROOT, local_app, 'scss/main.scss')
- sassrules.append(f'@import "{path}";')
+ sassrules.append(f'@import "{path}";')
cf = dict(django_libsass.CUSTOM_FUNCTIONS)
cf['static'] = static
| {"golden_diff": "diff --git a/src/pretalx/common/tasks.py b/src/pretalx/common/tasks.py\n--- a/src/pretalx/common/tasks.py\n+++ b/src/pretalx/common/tasks.py\n@@ -23,12 +23,12 @@\n return\n \n for local_app in local_apps:\n+ path = os.path.join(settings.STATIC_ROOT, local_app, 'scss/main.scss')\n sassrules = []\n+\n if event.primary_color:\n sassrules.append('$brand-primary: {};'.format(event.primary_color))\n-\n- path = os.path.join(settings.STATIC_ROOT, local_app, 'scss/main.scss')\n- sassrules.append(f'@import \"{path}\";')\n+ sassrules.append(f'@import \"{path}\";')\n \n cf = dict(django_libsass.CUSTOM_FUNCTIONS)\n cf['static'] = static\n", "issue": "Restructure CSS\n- [x] Use `$brand_color`\r\n- [ ] Break SCSS up into more files \n", "before_files": [{"content": "import hashlib\nimport os\n\nimport django_libsass\nimport sass\nfrom django.conf import settings\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import default_storage\nfrom django.templatetags.static import static\n\nfrom pretalx.celery_app import app\nfrom pretalx.event.models import Event\n\n\[email protected]()\ndef regenerate_css(event_id: int):\n event = Event.objects.get(pk=event_id)\n local_apps = ['cfp', 'orga']\n\n if not event.primary_color:\n for local_app in local_apps:\n event.settings.delete(f'{local_app}_css_file')\n return\n\n for local_app in local_apps:\n sassrules = []\n if event.primary_color:\n sassrules.append('$brand-primary: {};'.format(event.primary_color))\n\n path = os.path.join(settings.STATIC_ROOT, local_app, 'scss/main.scss')\n sassrules.append(f'@import \"{path}\";')\n\n cf = dict(django_libsass.CUSTOM_FUNCTIONS)\n cf['static'] = static\n css = sass.compile(\n string=\"\\n\".join(sassrules),\n output_style='compressed',\n custom_functions=cf\n )\n checksum = hashlib.sha1(css.encode('utf-8')).hexdigest()\n fname = f'{event.slug}/{local_app}.{checksum[:16]}.css'\n\n if event.settings.get(f'{local_app}_css_checksum', '') != checksum:\n newname = default_storage.save(fname, ContentFile(css.encode('utf-8')))\n event.settings.set(f'{local_app}_css_file', f'/media/{newname}')\n event.settings.set(f'{local_app}_css_checksum', checksum)\n", "path": "src/pretalx/common/tasks.py"}]} | 1,012 | 187 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.