problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_15830 | rasdani/github-patches | git_diff | Parsl__parsl-666 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add "all" install option
We have several subpackages now. I think it would be good if we had an option to install all subpackages.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2
3 with open('parsl/version.py') as f:
4 exec(f.read())
5
6 with open('requirements.txt') as f:
7 install_requires = f.readlines()
8
9 setup(
10 name='parsl',
11 version=VERSION,
12 description='Simple data dependent workflows in Python',
13 long_description='Simple parallel workflows system for Python',
14 url='https://github.com/Parsl/parsl',
15 author='The Parsl Team',
16 author_email='[email protected]',
17 license='Apache 2.0',
18 download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),
19 package_data={'': ['LICENSE']},
20 packages=find_packages(),
21 install_requires=install_requires,
22 scripts = ['parsl/executors/high_throughput/process_worker_pool.py',
23 'parsl/executors/extreme_scale/mpi_worker_pool.py'],
24 extras_require = {
25 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],
26 'aws' : ['boto3'],
27 'jetstream' : ['python-novaclient'],
28 'extreme_scale' : ['mpi4py'],
29 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],
30 'google_cloud' : ['google-auth', 'google-api-python-client']
31 },
32 classifiers = [
33 # Maturity
34 'Development Status :: 3 - Alpha',
35 # Intended audience
36 'Intended Audience :: Developers',
37 # Licence, must match with licence above
38 'License :: OSI Approved :: Apache Software License',
39 # Python versions supported
40 'Programming Language :: Python :: 3.5',
41 'Programming Language :: Python :: 3.6',
42 ],
43 keywords=['Workflows', 'Scientific computing'],
44 )
45
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,10 +24,17 @@
extras_require = {
'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],
'aws' : ['boto3'],
- 'jetstream' : ['python-novaclient'],
+ # Jetstream is deprecated since the interface has not been maintained.
+ # 'jetstream' : ['python-novaclient'],
'extreme_scale' : ['mpi4py'],
'docs' : ['nbsphinx', 'sphinx_rtd_theme'],
- 'google_cloud' : ['google-auth', 'google-api-python-client']
+ 'google_cloud' : ['google-auth', 'google-api-python-client'],
+ 'all' : ['CMRESHandler', 'psutil', 'sqlalchemy',
+ 'boto3',
+ 'mpi4py',
+ 'nbsphinx', 'sphinx_rtd_theme',
+ 'google-auth', 'google-api-python-client']
+
},
classifiers = [
# Maturity
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,10 +24,17 @@\n extras_require = {\n 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],\n 'aws' : ['boto3'],\n- 'jetstream' : ['python-novaclient'],\n+ # Jetstream is deprecated since the interface has not been maintained.\n+ # 'jetstream' : ['python-novaclient'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n- 'google_cloud' : ['google-auth', 'google-api-python-client']\n+ 'google_cloud' : ['google-auth', 'google-api-python-client'],\n+ 'all' : ['CMRESHandler', 'psutil', 'sqlalchemy',\n+ 'boto3',\n+ 'mpi4py',\n+ 'nbsphinx', 'sphinx_rtd_theme',\n+ 'google-auth', 'google-api-python-client']\n+\n },\n classifiers = [\n # Maturity\n", "issue": "Add \"all\" install option\nWe have several subpackages now. I think it would be good if we had an option to install all subpackages. \n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n package_data={'': ['LICENSE']},\n packages=find_packages(),\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/extreme_scale/mpi_worker_pool.py'],\n extras_require = {\n 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],\n 'aws' : ['boto3'],\n 'jetstream' : ['python-novaclient'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n 'google_cloud' : ['google-auth', 'google-api-python-client']\n },\n classifiers = [\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n)\n", "path": "setup.py"}]} | 1,029 | 245 |
gh_patches_debug_15111 | rasdani/github-patches | git_diff | wagtail__wagtail-2621 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add default icon for TableBlock
As per https://github.com/torchbox/wagtail/pull/1705#issuecomment-216053655. Best to do this after #2417 is merged, to avoid conflicts.
</issue>
<code>
[start of wagtail/contrib/table_block/fields.py]
1 from __future__ import absolute_import, unicode_literals
2
3 import json
4
5 from django import forms
6 from django.template.loader import render_to_string
7 from django.utils import translation
8 from django.utils.functional import cached_property
9
10 from wagtail.utils.widgets import WidgetWithScript
11 from wagtail.wagtailcore.blocks import FieldBlock
12
13
14 class TableInput(WidgetWithScript, forms.HiddenInput):
15
16 def __init__(self, table_options=None, attrs=None):
17 self.table_options = table_options
18 super(TableInput, self).__init__(attrs=attrs)
19
20 def render(self, name, value, attrs=None):
21 original_field_html = super(TableInput, self).render(name, value, attrs)
22 return render_to_string("table_block/widgets/table.html", {
23 'original_field_html': original_field_html,
24 'attrs': attrs,
25 'value': value,
26 })
27
28 def render_js_init(self, id_, name, value):
29 return "initTable({0}, {1});".format(json.dumps(id_), json.dumps(self.table_options))
30
31
32
33 class TableBlock(FieldBlock):
34 def __init__(self, required=True, help_text=None, table_options=None, **kwargs):
35 # CharField's 'label' and 'initial' parameters are not exposed, as Block handles that functionality
36 # natively (via 'label' and 'default')
37 # CharField's 'max_length' and 'min_length' parameters are not exposed as table data needs to
38 # have arbitrary length
39 # table_options can contain any valid handsontable options: http://docs.handsontable.com/0.18.0/Options.html
40 self.field_options = {'required': required, 'help_text': help_text}
41
42 language = translation.get_language()
43 if language is not None and len(language) > 2:
44 language = language[:2]
45
46 default_table_options = {
47 'minSpareRows': 0,
48 'startRows': 3,
49 'startCols': 3,
50 'colHeaders': False,
51 'rowHeaders': False,
52 'contextMenu': True,
53 'editor': 'text',
54 'stretchH': 'all',
55 'height': 108,
56 'language': language,
57 'renderer': 'text',
58 'autoColumnSize': False,
59 }
60 if table_options is not None:
61 default_table_options.update(table_options)
62 self.table_options = default_table_options
63 super(TableBlock, self).__init__(**kwargs)
64
65 @cached_property
66 def field(self):
67 return forms.CharField(widget=TableInput(table_options=self.table_options), **self.field_options)
68
69 def value_from_form(self, value):
70 return json.loads(value)
71
72 def value_for_form(self, value):
73 return json.dumps(value)
74
75 def is_html_renderer(self):
76 return self.table_options['renderer'] == 'html'
77
78 def render(self, value):
79 template = getattr(self.meta, 'template', None)
80 if template and value:
81 table_header = value['data'][0] if value.get('data', None) and len(value['data']) > 0 and value.get('first_row_is_table_header', False) else None
82 first_col_is_header = value.get('first_col_is_header', False)
83 context = {
84 'self': value,
85 self.TEMPLATE_VAR: value,
86 'table_header': table_header,
87 'first_col_is_header': first_col_is_header,
88 'html_renderer': self.is_html_renderer(),
89 'data': value['data'][1:] if table_header else value.get('data', [])
90 }
91 return render_to_string(template, context)
92 else:
93 return self.render_basic(value)
94
95 @property
96 def media(self):
97 return forms.Media(
98 css={'all': ['table_block/css/vendor/handsontable-0.24.2.full.min.css']},
99 js=['table_block/js/vendor/handsontable-0.24.2.full.min.js', 'table_block/js/table.js']
100 )
101
102 class Meta:
103 default = None
104 template = 'table_block/blocks/table.html'
105
[end of wagtail/contrib/table_block/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/contrib/table_block/fields.py b/wagtail/contrib/table_block/fields.py
--- a/wagtail/contrib/table_block/fields.py
+++ b/wagtail/contrib/table_block/fields.py
@@ -29,7 +29,6 @@
return "initTable({0}, {1});".format(json.dumps(id_), json.dumps(self.table_options))
-
class TableBlock(FieldBlock):
def __init__(self, required=True, help_text=None, table_options=None, **kwargs):
# CharField's 'label' and 'initial' parameters are not exposed, as Block handles that functionality
@@ -102,3 +101,4 @@
class Meta:
default = None
template = 'table_block/blocks/table.html'
+ icon = "table"
| {"golden_diff": "diff --git a/wagtail/contrib/table_block/fields.py b/wagtail/contrib/table_block/fields.py\n--- a/wagtail/contrib/table_block/fields.py\n+++ b/wagtail/contrib/table_block/fields.py\n@@ -29,7 +29,6 @@\n return \"initTable({0}, {1});\".format(json.dumps(id_), json.dumps(self.table_options))\n \n \n-\n class TableBlock(FieldBlock):\n def __init__(self, required=True, help_text=None, table_options=None, **kwargs):\n # CharField's 'label' and 'initial' parameters are not exposed, as Block handles that functionality\n@@ -102,3 +101,4 @@\n class Meta:\n default = None\n template = 'table_block/blocks/table.html'\n+ icon = \"table\"\n", "issue": "Add default icon for TableBlock\nAs per https://github.com/torchbox/wagtail/pull/1705#issuecomment-216053655. Best to do this after #2417 is merged, to avoid conflicts.\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport json\n\nfrom django import forms\nfrom django.template.loader import render_to_string\nfrom django.utils import translation\nfrom django.utils.functional import cached_property\n\nfrom wagtail.utils.widgets import WidgetWithScript\nfrom wagtail.wagtailcore.blocks import FieldBlock\n\n\nclass TableInput(WidgetWithScript, forms.HiddenInput):\n\n def __init__(self, table_options=None, attrs=None):\n self.table_options = table_options\n super(TableInput, self).__init__(attrs=attrs)\n\n def render(self, name, value, attrs=None):\n original_field_html = super(TableInput, self).render(name, value, attrs)\n return render_to_string(\"table_block/widgets/table.html\", {\n 'original_field_html': original_field_html,\n 'attrs': attrs,\n 'value': value,\n })\n\n def render_js_init(self, id_, name, value):\n return \"initTable({0}, {1});\".format(json.dumps(id_), json.dumps(self.table_options))\n\n\n\nclass TableBlock(FieldBlock):\n def __init__(self, required=True, help_text=None, table_options=None, **kwargs):\n # CharField's 'label' and 'initial' parameters are not exposed, as Block handles that functionality\n # natively (via 'label' and 'default')\n # CharField's 'max_length' and 'min_length' parameters are not exposed as table data needs to\n # have arbitrary length\n # table_options can contain any valid handsontable options: http://docs.handsontable.com/0.18.0/Options.html\n self.field_options = {'required': required, 'help_text': help_text}\n\n language = translation.get_language()\n if language is not None and len(language) > 2:\n language = language[:2]\n\n default_table_options = {\n 'minSpareRows': 0,\n 'startRows': 3,\n 'startCols': 3,\n 'colHeaders': False,\n 'rowHeaders': False,\n 'contextMenu': True,\n 'editor': 'text',\n 'stretchH': 'all',\n 'height': 108,\n 'language': language,\n 'renderer': 'text',\n 'autoColumnSize': False,\n }\n if table_options is not None:\n default_table_options.update(table_options)\n self.table_options = default_table_options\n super(TableBlock, self).__init__(**kwargs)\n\n @cached_property\n def field(self):\n return forms.CharField(widget=TableInput(table_options=self.table_options), **self.field_options)\n\n def value_from_form(self, value):\n return json.loads(value)\n\n def value_for_form(self, value):\n return json.dumps(value)\n\n def is_html_renderer(self):\n return self.table_options['renderer'] == 'html'\n\n def render(self, value):\n template = getattr(self.meta, 'template', None)\n if template and value:\n table_header = value['data'][0] if value.get('data', None) and len(value['data']) > 0 and value.get('first_row_is_table_header', False) else None\n first_col_is_header = value.get('first_col_is_header', False)\n context = {\n 'self': value,\n self.TEMPLATE_VAR: value,\n 'table_header': table_header,\n 'first_col_is_header': first_col_is_header,\n 'html_renderer': self.is_html_renderer(),\n 'data': value['data'][1:] if table_header else value.get('data', [])\n }\n return render_to_string(template, context)\n else:\n return self.render_basic(value)\n\n @property\n def media(self):\n return forms.Media(\n css={'all': ['table_block/css/vendor/handsontable-0.24.2.full.min.css']},\n js=['table_block/js/vendor/handsontable-0.24.2.full.min.js', 'table_block/js/table.js']\n )\n\n class Meta:\n default = None\n template = 'table_block/blocks/table.html'\n", "path": "wagtail/contrib/table_block/fields.py"}]} | 1,689 | 181 |
gh_patches_debug_23780 | rasdani/github-patches | git_diff | pypa__setuptools-2256 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unexpected behavior when distutils is invoked before Setuptools
TL;DR We are forced to use distutils because setuptools has broken symlink processing and this causes `distutils.errors.DistutilsClassError: command class <class '__main__.SDistCommand'> must subclass Command`.
It works with `setuptools<48` and the changelog doesn't document any breaking behaviors for this version.
Repro:
```console
$ git clone https://github.com/ansible/ansible.git
$ cd ansible
$ pip install -U 'setuptools>=48'
$ python setup.py sdist
```
(tried under Python 3.8)
Ref: https://github.com/ansible/ansible/issues/70456
</issue>
<code>
[start of setuptools/distutils_patch.py]
1 """
2 Ensure that the local copy of distutils is preferred over stdlib.
3
4 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
5 for more motivation.
6 """
7
8 import sys
9 import re
10 import os
11 import importlib
12 import warnings
13
14
15 def clear_distutils():
16 if 'distutils' not in sys.modules:
17 return
18 warnings.warn("Setuptools is replacing distutils")
19 mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
20 for name in mods:
21 del sys.modules[name]
22
23
24 def enabled():
25 """
26 Allow selection of distutils by environment variable.
27 """
28 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
29 return which == 'local'
30
31
32 def ensure_local_distutils():
33 clear_distutils()
34 distutils = importlib.import_module('setuptools._distutils')
35 distutils.__name__ = 'distutils'
36 sys.modules['distutils'] = distutils
37
38 # sanity check that submodules load as expected
39 core = importlib.import_module('distutils.core')
40 assert '_distutils' in core.__file__, core.__file__
41
42
43 if enabled():
44 ensure_local_distutils()
45
[end of setuptools/distutils_patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setuptools/distutils_patch.py b/setuptools/distutils_patch.py
--- a/setuptools/distutils_patch.py
+++ b/setuptools/distutils_patch.py
@@ -12,10 +12,26 @@
import warnings
+is_pypy = '__pypy__' in sys.builtin_module_names
+
+
+def warn_distutils_present():
+ if 'distutils' not in sys.modules:
+ return
+ if is_pypy and sys.version_info < (3, 7):
+ # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
+ # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
+ return
+ warnings.warn(
+ "Distutils was imported before Setuptools. This usage is discouraged "
+ "and may exhibit undesirable behaviors or errors. Please use "
+ "Setuptools' objects directly or at least import Setuptools first.")
+
+
def clear_distutils():
if 'distutils' not in sys.modules:
return
- warnings.warn("Setuptools is replacing distutils")
+ warnings.warn("Setuptools is replacing distutils.")
mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
for name in mods:
del sys.modules[name]
@@ -40,5 +56,6 @@
assert '_distutils' in core.__file__, core.__file__
+warn_distutils_present()
if enabled():
ensure_local_distutils()
| {"golden_diff": "diff --git a/setuptools/distutils_patch.py b/setuptools/distutils_patch.py\n--- a/setuptools/distutils_patch.py\n+++ b/setuptools/distutils_patch.py\n@@ -12,10 +12,26 @@\n import warnings\n \n \n+is_pypy = '__pypy__' in sys.builtin_module_names\n+\n+\n+def warn_distutils_present():\n+ if 'distutils' not in sys.modules:\n+ return\n+ if is_pypy and sys.version_info < (3, 7):\n+ # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n+ # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n+ return\n+ warnings.warn(\n+ \"Distutils was imported before Setuptools. This usage is discouraged \"\n+ \"and may exhibit undesirable behaviors or errors. Please use \"\n+ \"Setuptools' objects directly or at least import Setuptools first.\")\n+\n+\n def clear_distutils():\n if 'distutils' not in sys.modules:\n return\n- warnings.warn(\"Setuptools is replacing distutils\")\n+ warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n@@ -40,5 +56,6 @@\n assert '_distutils' in core.__file__, core.__file__\n \n \n+warn_distutils_present()\n if enabled():\n ensure_local_distutils()\n", "issue": "Unexpected behavior when distutils is invoked before Setuptools\nTL;DR We are forced to use distutils because setuptools has broken symlink processing and this causes `distutils.errors.DistutilsClassError: command class <class '__main__.SDistCommand'> must subclass Command`.\r\n\r\nIt works with `setuptools<48` and the changelog doesn't document any breaking behaviors for this version.\r\n\r\nRepro:\r\n```console\r\n$ git clone https://github.com/ansible/ansible.git\r\n$ cd ansible\r\n$ pip install -U 'setuptools>=48'\r\n$ python setup.py sdist\r\n```\r\n(tried under Python 3.8)\r\n\r\nRef: https://github.com/ansible/ansible/issues/70456\n", "before_files": [{"content": "\"\"\"\nEnsure that the local copy of distutils is preferred over stdlib.\n\nSee https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\nfor more motivation.\n\"\"\"\n\nimport sys\nimport re\nimport os\nimport importlib\nimport warnings\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n distutils = importlib.import_module('setuptools._distutils')\n distutils.__name__ = 'distutils'\n sys.modules['distutils'] = distutils\n\n # sanity check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\nif enabled():\n ensure_local_distutils()\n", "path": "setuptools/distutils_patch.py"}]} | 1,046 | 379 |
gh_patches_debug_3506 | rasdani/github-patches | git_diff | vega__altair-692 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Two renderers with same name
Working with a fresh Anaconda installation of Jupyter 3.6. Followed Altair Notebook installation instructions. Basic example (flower petals scatter plot) rendered but with JSON underneath graph (issue #634). I thought this might be due to having selected the wrong renderer., so I went to list the available renderers.
`alt.renderers.names()`
returns
`['default', 'json', 'notebook', 'notebook']`
Secretly hoping the second `notebook` renderer solves #634. In any case, I think you shouldn't be able to have two renderers with the same name.
(hs teacher, maybe I'm missing something)
</issue>
<code>
[start of altair/utils/plugin_registry.py]
1 from typing import Callable, Generic, List, TypeVar, Union, cast
2
3 import entrypoints
4
5
6 PluginType = TypeVar('PluginType')
7
8
9 class PluginRegistry(Generic[PluginType]):
10 """A registry for plugins.
11
12 This is a plugin registry that allows plugins to be loaded/registered
13 in two ways:
14
15 1. Through an explicit call to ``.register(name, value)``.
16 2. By looking for other Python packages that are installed and provide
17 a setuptools entry point group.
18
19 When you create an instance of this class, provide the name of the
20 entry point group to use::
21
22 reg = PluginRegister('my_entrypoint_group')
23
24 """
25 # this is a mapping of name to error message to allow custom error messages
26 # in case an entrypoint is not found
27 entrypoint_err_messages = {}
28
29 def __init__(self, entry_point_group: str = '', plugin_type=object) -> None:
30 """Create a PluginRegistry for a named entry point group.
31
32 Parameters
33 ==========
34 entry_point_group: str
35 The name of the entry point group.
36 plugin_type: object
37 A type that will optionally be used for runtime type checking of
38 loaded plugins using isinstance.
39 """
40 self.entry_point_group = entry_point_group
41 self.plugin_type = plugin_type
42 self._active = None # type: None
43 self._active_name = '' # type: str
44 self._plugins = {} # type: dict
45 self._options = {} # type: dict
46
47 def register(self, name: str, value: Union[PluginType,None]) -> PluginType:
48 """Register a plugin by name and value.
49
50 This method is used for explicit registration of a plugin and shouldn't be
51 used to manage entry point managed plugins, which are auto-loaded.
52
53 Parameters
54 ==========
55 name: str
56 The name of the plugin.
57 value: PluginType or None
58 The actual plugin object to register or None to unregister that plugin.
59
60 Returns
61 =======
62 plugin: PluginType
63 The plugin that was registered or unregistered.
64 """
65 if value is None and name in self._plugins:
66 return self._plugins.pop(name)
67 else:
68 assert isinstance(value, self.plugin_type)
69 self._plugins[name] = value
70 return value
71
72
73 def names(self) -> List[str]:
74 """List the names of the registered and entry points plugins."""
75 exts = list(self._plugins.keys())
76 more_exts = [ep.name for ep in entrypoints.get_group_all(self.entry_point_group)]
77 exts.extend(more_exts)
78 return exts
79
80
81 def enable(self, name: str) -> None:
82 """Enable a plugin by name."""
83 if name not in self._plugins:
84 try:
85 ep = entrypoints.get_single(self.entry_point_group, name)
86 except entrypoints.NoSuchEntryPoint as err:
87 if name in self.entrypoint_err_messages:
88 raise ValueError(self.entrypoint_err_messages[name])
89 else:
90 raise
91 value = cast(PluginType, ep.load())
92 assert isinstance(value, self.plugin_type)
93 self.register(name, value)
94 self._active_name = name
95 self._active = self._plugins[name]
96
97 @property
98 def active(self) -> str:
99 """Return the name of the currently active plugin"""
100 return self._active_name
101
102 def get(self) -> PluginType:
103 """Return the currently active plugin."""
104 return self._active
105
106 def __repr__(self) -> str:
107 return ("{0}(active={1!r}, registered={2!r})"
108 "".format(self.__class__.__name__,
109 self._active_name,
110 list(self.names())))
111
[end of altair/utils/plugin_registry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/altair/utils/plugin_registry.py b/altair/utils/plugin_registry.py
--- a/altair/utils/plugin_registry.py
+++ b/altair/utils/plugin_registry.py
@@ -75,7 +75,7 @@
exts = list(self._plugins.keys())
more_exts = [ep.name for ep in entrypoints.get_group_all(self.entry_point_group)]
exts.extend(more_exts)
- return exts
+ return sorted(set(exts))
def enable(self, name: str) -> None:
| {"golden_diff": "diff --git a/altair/utils/plugin_registry.py b/altair/utils/plugin_registry.py\n--- a/altair/utils/plugin_registry.py\n+++ b/altair/utils/plugin_registry.py\n@@ -75,7 +75,7 @@\n exts = list(self._plugins.keys())\n more_exts = [ep.name for ep in entrypoints.get_group_all(self.entry_point_group)]\n exts.extend(more_exts)\n- return exts\n+ return sorted(set(exts))\n \n \n def enable(self, name: str) -> None:\n", "issue": "Two renderers with same name\nWorking with a fresh Anaconda installation of Jupyter 3.6. Followed Altair Notebook installation instructions. Basic example (flower petals scatter plot) rendered but with JSON underneath graph (issue #634). I thought this might be due to having selected the wrong renderer., so I went to list the available renderers. \r\n\r\n`alt.renderers.names()`\r\nreturns\r\n`['default', 'json', 'notebook', 'notebook']`\r\n\r\nSecretly hoping the second `notebook` renderer solves #634. In any case, I think you shouldn't be able to have two renderers with the same name. \r\n\r\n(hs teacher, maybe I'm missing something)\n", "before_files": [{"content": "from typing import Callable, Generic, List, TypeVar, Union, cast\n\nimport entrypoints\n\n\nPluginType = TypeVar('PluginType')\n\n\nclass PluginRegistry(Generic[PluginType]):\n \"\"\"A registry for plugins.\n\n This is a plugin registry that allows plugins to be loaded/registered\n in two ways:\n\n 1. Through an explicit call to ``.register(name, value)``.\n 2. By looking for other Python packages that are installed and provide\n a setuptools entry point group.\n\n When you create an instance of this class, provide the name of the\n entry point group to use::\n\n reg = PluginRegister('my_entrypoint_group')\n\n \"\"\"\n # this is a mapping of name to error message to allow custom error messages\n # in case an entrypoint is not found\n entrypoint_err_messages = {}\n\n def __init__(self, entry_point_group: str = '', plugin_type=object) -> None:\n \"\"\"Create a PluginRegistry for a named entry point group.\n\n Parameters\n ==========\n entry_point_group: str\n The name of the entry point group.\n plugin_type: object\n A type that will optionally be used for runtime type checking of\n loaded plugins using isinstance.\n \"\"\"\n self.entry_point_group = entry_point_group\n self.plugin_type = plugin_type\n self._active = None # type: None\n self._active_name = '' # type: str\n self._plugins = {} # type: dict\n self._options = {} # type: dict\n\n def register(self, name: str, value: Union[PluginType,None]) -> PluginType:\n \"\"\"Register a plugin by name and value.\n\n This method is used for explicit registration of a plugin and shouldn't be\n used to manage entry point managed plugins, which are auto-loaded.\n\n Parameters\n ==========\n name: str\n The name of the plugin.\n value: PluginType or None\n The actual plugin object to register or None to unregister that plugin.\n\n Returns\n =======\n plugin: PluginType\n The plugin that was registered or unregistered.\n \"\"\"\n if value is None and name in self._plugins:\n return self._plugins.pop(name)\n else:\n assert isinstance(value, self.plugin_type)\n self._plugins[name] = value\n return value\n\n\n def names(self) -> List[str]:\n \"\"\"List the names of the registered and entry points plugins.\"\"\"\n exts = list(self._plugins.keys())\n more_exts = [ep.name for ep in entrypoints.get_group_all(self.entry_point_group)]\n exts.extend(more_exts)\n return exts\n\n\n def enable(self, name: str) -> None:\n \"\"\"Enable a plugin by name.\"\"\"\n if name not in self._plugins:\n try:\n ep = entrypoints.get_single(self.entry_point_group, name)\n except entrypoints.NoSuchEntryPoint as err:\n if name in self.entrypoint_err_messages:\n raise ValueError(self.entrypoint_err_messages[name])\n else:\n raise\n value = cast(PluginType, ep.load())\n assert isinstance(value, self.plugin_type)\n self.register(name, value)\n self._active_name = name\n self._active = self._plugins[name]\n\n @property\n def active(self) -> str:\n \"\"\"Return the name of the currently active plugin\"\"\"\n return self._active_name\n\n def get(self) -> PluginType:\n \"\"\"Return the currently active plugin.\"\"\"\n return self._active\n\n def __repr__(self) -> str:\n return (\"{0}(active={1!r}, registered={2!r})\"\n \"\".format(self.__class__.__name__,\n self._active_name,\n list(self.names())))\n", "path": "altair/utils/plugin_registry.py"}]} | 1,733 | 120 |
gh_patches_debug_51314 | rasdani/github-patches | git_diff | scikit-image__scikit-image-2643 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
module 'skimage.filters' has no attribute 'denoise_tv_chambolle'
There are a couple of undefined symbols in [`filters` module](https://github.com/scikit-image/scikit-image/blob/master/skimage/filters/__init__.py#L46-L48)
Thus `from skimage.filters import *` gives:
```
AttributeError: module 'skimage.filters' has no attribute 'denoise_tv_chambolle'
```
</issue>
<code>
[start of skimage/filters/__init__.py]
1 from .lpi_filter import inverse, wiener, LPIFilter2D
2 from ._gaussian import gaussian
3 from .edges import (sobel, sobel_h, sobel_v,
4 scharr, scharr_h, scharr_v,
5 prewitt, prewitt_h, prewitt_v,
6 roberts, roberts_pos_diag, roberts_neg_diag,
7 laplace)
8 from ._rank_order import rank_order
9 from ._gabor import gabor_kernel, gabor
10 from ._frangi import frangi, hessian
11 from .thresholding import (threshold_local,
12 threshold_adaptive, threshold_otsu, threshold_yen,
13 threshold_isodata, threshold_li, threshold_minimum,
14 threshold_mean, threshold_triangle,
15 threshold_niblack, threshold_sauvola,
16 try_all_threshold)
17 from . import rank
18 from .rank import median
19
20 from .._shared.utils import deprecated, copy_func
21
22
23 gaussian_filter = copy_func(gaussian, name='gaussian_filter')
24 gaussian_filter = deprecated('skimage.filters.gaussian')(gaussian_filter)
25 gabor_filter = copy_func(gabor, name='gabor_filter')
26 gabor_filter = deprecated('skimage.filters.gabor')(gabor_filter)
27
28 __all__ = ['inverse',
29 'wiener',
30 'LPIFilter2D',
31 'gaussian',
32 'median',
33 'sobel',
34 'sobel_h',
35 'sobel_v',
36 'scharr',
37 'scharr_h',
38 'scharr_v',
39 'prewitt',
40 'prewitt_h',
41 'prewitt_v',
42 'roberts',
43 'roberts_pos_diag',
44 'roberts_neg_diag',
45 'laplace',
46 'denoise_tv_chambolle',
47 'denoise_bilateral',
48 'denoise_tv_bregman',
49 'rank_order',
50 'gabor_kernel',
51 'gabor',
52 'try_all_threshold',
53 'frangi',
54 'hessian',
55 'threshold_adaptive',
56 'threshold_otsu',
57 'threshold_yen',
58 'threshold_isodata',
59 'threshold_li',
60 'threshold_minimum',
61 'threshold_mean',
62 'threshold_niblack',
63 'threshold_sauvola',
64 'threshold_triangle',
65 'rank']
66
[end of skimage/filters/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py
--- a/skimage/filters/__init__.py
+++ b/skimage/filters/__init__.py
@@ -43,9 +43,6 @@
'roberts_pos_diag',
'roberts_neg_diag',
'laplace',
- 'denoise_tv_chambolle',
- 'denoise_bilateral',
- 'denoise_tv_bregman',
'rank_order',
'gabor_kernel',
'gabor',
| {"golden_diff": "diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py\n--- a/skimage/filters/__init__.py\n+++ b/skimage/filters/__init__.py\n@@ -43,9 +43,6 @@\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n- 'denoise_tv_chambolle',\n- 'denoise_bilateral',\n- 'denoise_tv_bregman',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n", "issue": "module 'skimage.filters' has no attribute 'denoise_tv_chambolle' \nThere are a couple of undefined symbols in [`filters` module](https://github.com/scikit-image/scikit-image/blob/master/skimage/filters/__init__.py#L46-L48)\r\n\r\nThus `from skimage.filters import *` gives:\r\n\r\n```\r\nAttributeError: module 'skimage.filters' has no attribute 'denoise_tv_chambolle'\r\n```\n", "before_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import gaussian\nfrom .edges import (sobel, sobel_h, sobel_v,\n scharr, scharr_h, scharr_v,\n prewitt, prewitt_h, prewitt_v,\n roberts, roberts_pos_diag, roberts_neg_diag,\n laplace)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor\nfrom ._frangi import frangi, hessian\nfrom .thresholding import (threshold_local,\n threshold_adaptive, threshold_otsu, threshold_yen,\n threshold_isodata, threshold_li, threshold_minimum,\n threshold_mean, threshold_triangle,\n threshold_niblack, threshold_sauvola,\n try_all_threshold)\nfrom . import rank\nfrom .rank import median\n\nfrom .._shared.utils import deprecated, copy_func\n\n\ngaussian_filter = copy_func(gaussian, name='gaussian_filter')\ngaussian_filter = deprecated('skimage.filters.gaussian')(gaussian_filter)\ngabor_filter = copy_func(gabor, name='gabor_filter')\ngabor_filter = deprecated('skimage.filters.gabor')(gabor_filter)\n\n__all__ = ['inverse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian',\n 'median',\n 'sobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n 'denoise_tv_chambolle',\n 'denoise_bilateral',\n 'denoise_tv_bregman',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n 'try_all_threshold',\n 'frangi',\n 'hessian',\n 'threshold_adaptive',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'threshold_li',\n 'threshold_minimum',\n 'threshold_mean',\n 'threshold_niblack',\n 'threshold_sauvola',\n 'threshold_triangle',\n 'rank']\n", "path": "skimage/filters/__init__.py"}]} | 1,250 | 126 |
gh_patches_debug_32234 | rasdani/github-patches | git_diff | modin-project__modin-2252 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[REFACTOR]: remove dead code in modin/pandas/index folder
<!--
General questions should be asked on the mailing list [email protected].
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:
- **Modin installed from (source or binary)**:
- **Modin version**:
- **Python version**:
- **Exact command to reproduce**:
<!--
You can obtain the Modin version with
python -c "import modin; print(modin.__version__)"
-->
### Describe the problem
<!-- Describe the problem clearly here. -->
### Source code / logs
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
</issue>
<code>
[start of modin/pandas/index/partitioned_index.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14
15 class PartitionedIndex(object):
16
17 _index_lengths_cache = None
18
19 def _get_partition_lengths(self):
20 if self._index_lengths_cache is None:
21 self._index_lengths_cache = [
22 obj.apply(len).get() for obj in self.index_partitions[:0]
23 ]
24 return self._index_lengths_cache
25
26 def _set_partition_lengths(self, new_value):
27 self._partition_length_cache = new_value
28
29 index_lengths = property(_get_partition_lengths, _set_partition_lengths)
30
31 def __getitem__(self, key):
32 cls = type(self)
33 return cls(self.index_partitions[key])
34
35
36 class RayPartitionedIndex(PartitionedIndex):
37 def __init__(self, index_partitions):
38 self.index_partitions = index_partitions
39
[end of modin/pandas/index/partitioned_index.py]
[start of modin/pandas/index/__init__.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
[end of modin/pandas/index/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modin/pandas/index/__init__.py b/modin/pandas/index/__init__.py
deleted file mode 100644
--- a/modin/pandas/index/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# Licensed to Modin Development Team under one or more contributor license agreements.
-# See the NOTICE file distributed with this work for additional information regarding
-# copyright ownership. The Modin Development Team licenses this file to you under the
-# Apache License, Version 2.0 (the "License"); you may not use this file except in
-# compliance with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software distributed under
-# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
-# ANY KIND, either express or implied. See the License for the specific language
-# governing permissions and limitations under the License.
diff --git a/modin/pandas/index/partitioned_index.py b/modin/pandas/index/partitioned_index.py
deleted file mode 100644
--- a/modin/pandas/index/partitioned_index.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Licensed to Modin Development Team under one or more contributor license agreements.
-# See the NOTICE file distributed with this work for additional information regarding
-# copyright ownership. The Modin Development Team licenses this file to you under the
-# Apache License, Version 2.0 (the "License"); you may not use this file except in
-# compliance with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software distributed under
-# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
-# ANY KIND, either express or implied. See the License for the specific language
-# governing permissions and limitations under the License.
-
-
-class PartitionedIndex(object):
-
- _index_lengths_cache = None
-
- def _get_partition_lengths(self):
- if self._index_lengths_cache is None:
- self._index_lengths_cache = [
- obj.apply(len).get() for obj in self.index_partitions[:0]
- ]
- return self._index_lengths_cache
-
- def _set_partition_lengths(self, new_value):
- self._partition_length_cache = new_value
-
- index_lengths = property(_get_partition_lengths, _set_partition_lengths)
-
- def __getitem__(self, key):
- cls = type(self)
- return cls(self.index_partitions[key])
-
-
-class RayPartitionedIndex(PartitionedIndex):
- def __init__(self, index_partitions):
- self.index_partitions = index_partitions
| {"golden_diff": "diff --git a/modin/pandas/index/__init__.py b/modin/pandas/index/__init__.py\ndeleted file mode 100644\n--- a/modin/pandas/index/__init__.py\n+++ /dev/null\n@@ -1,12 +0,0 @@\n-# Licensed to Modin Development Team under one or more contributor license agreements.\n-# See the NOTICE file distributed with this work for additional information regarding\n-# copyright ownership. The Modin Development Team licenses this file to you under the\n-# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n-# compliance with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software distributed under\n-# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n-# ANY KIND, either express or implied. See the License for the specific language\n-# governing permissions and limitations under the License.\ndiff --git a/modin/pandas/index/partitioned_index.py b/modin/pandas/index/partitioned_index.py\ndeleted file mode 100644\n--- a/modin/pandas/index/partitioned_index.py\n+++ /dev/null\n@@ -1,38 +0,0 @@\n-# Licensed to Modin Development Team under one or more contributor license agreements.\n-# See the NOTICE file distributed with this work for additional information regarding\n-# copyright ownership. The Modin Development Team licenses this file to you under the\n-# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n-# compliance with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software distributed under\n-# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n-# ANY KIND, either express or implied. See the License for the specific language\n-# governing permissions and limitations under the License.\n-\n-\n-class PartitionedIndex(object):\n-\n- _index_lengths_cache = None\n-\n- def _get_partition_lengths(self):\n- if self._index_lengths_cache is None:\n- self._index_lengths_cache = [\n- obj.apply(len).get() for obj in self.index_partitions[:0]\n- ]\n- return self._index_lengths_cache\n-\n- def _set_partition_lengths(self, new_value):\n- self._partition_length_cache = new_value\n-\n- index_lengths = property(_get_partition_lengths, _set_partition_lengths)\n-\n- def __getitem__(self, key):\n- cls = type(self)\n- return cls(self.index_partitions[key])\n-\n-\n-class RayPartitionedIndex(PartitionedIndex):\n- def __init__(self, index_partitions):\n- self.index_partitions = index_partitions\n", "issue": "[REFACTOR]: remove dead code in modin/pandas/index folder\n<!--\r\nGeneral questions should be asked on the mailing list [email protected].\r\n\r\nBefore submitting an issue, please fill out the following form.\r\n-->\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:\r\n- **Modin installed from (source or binary)**:\r\n- **Modin version**:\r\n- **Python version**:\r\n- **Exact command to reproduce**:\r\n\r\n<!--\r\nYou can obtain the Modin version with\r\n\r\npython -c \"import modin; print(modin.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\n\r\n### Source code / logs\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\nclass PartitionedIndex(object):\n\n _index_lengths_cache = None\n\n def _get_partition_lengths(self):\n if self._index_lengths_cache is None:\n self._index_lengths_cache = [\n obj.apply(len).get() for obj in self.index_partitions[:0]\n ]\n return self._index_lengths_cache\n\n def _set_partition_lengths(self, new_value):\n self._partition_length_cache = new_value\n\n index_lengths = property(_get_partition_lengths, _set_partition_lengths)\n\n def __getitem__(self, key):\n cls = type(self)\n return cls(self.index_partitions[key])\n\n\nclass RayPartitionedIndex(PartitionedIndex):\n def __init__(self, index_partitions):\n self.index_partitions = index_partitions\n", "path": "modin/pandas/index/partitioned_index.py"}, {"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n", "path": "modin/pandas/index/__init__.py"}]} | 1,359 | 660 |
gh_patches_debug_9364 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-2258 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Azure VM - We are not getting power state
The VM data we are getting back does not tell you if the VM is running or not.
I think perhaps you have to tell the `list_all` api what you want - we want `instanceview`
https://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/instanceview
Not sure how this happens via SDK.
</issue>
<code>
[start of tools/c7n_azure/c7n_azure/resources/vm.py]
1 # Copyright 2018 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from c7n_azure.query import QueryResourceManager
16 from c7n_azure.provider import resources
17 from c7n.filters.core import ValueFilter, type_schema
18
19 @resources.register('vm')
20 class VirtualMachine(QueryResourceManager):
21
22 class resource_type(object):
23 service = 'azure.mgmt.compute'
24 client = 'ComputeManagementClient'
25 enum_spec = ('virtual_machines', 'list_all')
26 id = 'id'
27 name = 'name'
28 default_report_fields = (
29 'name',
30 'location',
31 'resourceGroup',
32 'properties.hardwareProfile.vmSize',
33 )
34
[end of tools/c7n_azure/c7n_azure/resources/vm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/c7n_azure/c7n_azure/resources/vm.py b/tools/c7n_azure/c7n_azure/resources/vm.py
--- a/tools/c7n_azure/c7n_azure/resources/vm.py
+++ b/tools/c7n_azure/c7n_azure/resources/vm.py
@@ -31,3 +31,15 @@
'resourceGroup',
'properties.hardwareProfile.vmSize',
)
+
[email protected]_registry.register('instance-view')
+class InstanceViewFilter(ValueFilter):
+ schema = type_schema('instance-view', rinherit=ValueFilter.schema)
+
+ def __call__(self, i):
+ if 'instanceView' not in i:
+ client = self.manager.get_client()
+ instance = client.virtual_machines.get(i['resourceGroup'], i['name'], expand='instanceview').instance_view
+ i['instanceView'] = instance.serialize()
+
+ return super(InstanceViewFilter, self).__call__(i['instanceView'])
| {"golden_diff": "diff --git a/tools/c7n_azure/c7n_azure/resources/vm.py b/tools/c7n_azure/c7n_azure/resources/vm.py\n--- a/tools/c7n_azure/c7n_azure/resources/vm.py\n+++ b/tools/c7n_azure/c7n_azure/resources/vm.py\n@@ -31,3 +31,15 @@\n 'resourceGroup',\n 'properties.hardwareProfile.vmSize',\n )\n+\[email protected]_registry.register('instance-view')\n+class InstanceViewFilter(ValueFilter):\n+ schema = type_schema('instance-view', rinherit=ValueFilter.schema)\n+\n+ def __call__(self, i):\n+ if 'instanceView' not in i:\n+ client = self.manager.get_client()\n+ instance = client.virtual_machines.get(i['resourceGroup'], i['name'], expand='instanceview').instance_view\n+ i['instanceView'] = instance.serialize()\n+\n+ return super(InstanceViewFilter, self).__call__(i['instanceView'])\n", "issue": "Azure VM - We are not getting power state\nThe VM data we are getting back does not tell you if the VM is running or not.\r\n\r\nI think perhaps you have to tell the `list_all` api what you want - we want `instanceview`\r\n\r\nhttps://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/instanceview\r\n\r\nNot sure how this happens via SDK.\n", "before_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom c7n_azure.query import QueryResourceManager\nfrom c7n_azure.provider import resources\nfrom c7n.filters.core import ValueFilter, type_schema\n\[email protected]('vm')\nclass VirtualMachine(QueryResourceManager):\n\n class resource_type(object):\n service = 'azure.mgmt.compute'\n client = 'ComputeManagementClient'\n enum_spec = ('virtual_machines', 'list_all')\n id = 'id'\n name = 'name'\n default_report_fields = (\n 'name',\n 'location',\n 'resourceGroup',\n 'properties.hardwareProfile.vmSize',\n )\n", "path": "tools/c7n_azure/c7n_azure/resources/vm.py"}]} | 953 | 224 |
gh_patches_debug_50933 | rasdani/github-patches | git_diff | apache__airflow-15117 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove 'user_id', 'role_id' from User and Role in OpenAPI schema
Would be good to remove the 'id' of both User and Role schemas from what is dumped in REST API endpoints. ID of User and Role table are sensitive data that would be fine to hide from the endpoints
</issue>
<code>
[start of airflow/api_connexion/schemas/user_schema.py]
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 from typing import List, NamedTuple
18
19 from flask_appbuilder.security.sqla.models import User
20 from marshmallow import Schema, fields
21 from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
22
23 from airflow.api_connexion.parameters import validate_istimezone
24 from airflow.api_connexion.schemas.role_and_permission_schema import RoleSchema
25
26
27 class UserCollectionItemSchema(SQLAlchemySchema):
28 """user collection item schema"""
29
30 class Meta:
31 """Meta"""
32
33 model = User
34 dateformat = "iso"
35
36 user_id = auto_field('id', dump_only=True)
37 first_name = auto_field()
38 last_name = auto_field()
39 username = auto_field()
40 active = auto_field(dump_only=True)
41 email = auto_field()
42 last_login = auto_field(dump_only=True)
43 login_count = auto_field(dump_only=True)
44 fail_login_count = auto_field(dump_only=True)
45 roles = fields.List(fields.Nested(RoleSchema, only=('name',)))
46 created_on = auto_field(validate=validate_istimezone, dump_only=True)
47 changed_on = auto_field(validate=validate_istimezone, dump_only=True)
48
49
50 class UserSchema(UserCollectionItemSchema):
51 """User schema"""
52
53 password = auto_field(load_only=True)
54
55
56 class UserCollection(NamedTuple):
57 """User collection"""
58
59 users: List[User]
60 total_entries: int
61
62
63 class UserCollectionSchema(Schema):
64 """User collection schema"""
65
66 users = fields.List(fields.Nested(UserCollectionItemSchema))
67 total_entries = fields.Int()
68
69
70 user_collection_item_schema = UserCollectionItemSchema()
71 user_schema = UserSchema()
72 user_collection_schema = UserCollectionSchema()
73
[end of airflow/api_connexion/schemas/user_schema.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/api_connexion/schemas/user_schema.py b/airflow/api_connexion/schemas/user_schema.py
--- a/airflow/api_connexion/schemas/user_schema.py
+++ b/airflow/api_connexion/schemas/user_schema.py
@@ -33,7 +33,6 @@
model = User
dateformat = "iso"
- user_id = auto_field('id', dump_only=True)
first_name = auto_field()
last_name = auto_field()
username = auto_field()
| {"golden_diff": "diff --git a/airflow/api_connexion/schemas/user_schema.py b/airflow/api_connexion/schemas/user_schema.py\n--- a/airflow/api_connexion/schemas/user_schema.py\n+++ b/airflow/api_connexion/schemas/user_schema.py\n@@ -33,7 +33,6 @@\n model = User\n dateformat = \"iso\"\n \n- user_id = auto_field('id', dump_only=True)\n first_name = auto_field()\n last_name = auto_field()\n username = auto_field()\n", "issue": "Remove 'user_id', 'role_id' from User and Role in OpenAPI schema \nWould be good to remove the 'id' of both User and Role schemas from what is dumped in REST API endpoints. ID of User and Role table are sensitive data that would be fine to hide from the endpoints\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom typing import List, NamedTuple\n\nfrom flask_appbuilder.security.sqla.models import User\nfrom marshmallow import Schema, fields\nfrom marshmallow_sqlalchemy import SQLAlchemySchema, auto_field\n\nfrom airflow.api_connexion.parameters import validate_istimezone\nfrom airflow.api_connexion.schemas.role_and_permission_schema import RoleSchema\n\n\nclass UserCollectionItemSchema(SQLAlchemySchema):\n \"\"\"user collection item schema\"\"\"\n\n class Meta:\n \"\"\"Meta\"\"\"\n\n model = User\n dateformat = \"iso\"\n\n user_id = auto_field('id', dump_only=True)\n first_name = auto_field()\n last_name = auto_field()\n username = auto_field()\n active = auto_field(dump_only=True)\n email = auto_field()\n last_login = auto_field(dump_only=True)\n login_count = auto_field(dump_only=True)\n fail_login_count = auto_field(dump_only=True)\n roles = fields.List(fields.Nested(RoleSchema, only=('name',)))\n created_on = auto_field(validate=validate_istimezone, dump_only=True)\n changed_on = auto_field(validate=validate_istimezone, dump_only=True)\n\n\nclass UserSchema(UserCollectionItemSchema):\n \"\"\"User schema\"\"\"\n\n password = auto_field(load_only=True)\n\n\nclass UserCollection(NamedTuple):\n \"\"\"User collection\"\"\"\n\n users: List[User]\n total_entries: int\n\n\nclass UserCollectionSchema(Schema):\n \"\"\"User collection schema\"\"\"\n\n users = fields.List(fields.Nested(UserCollectionItemSchema))\n total_entries = fields.Int()\n\n\nuser_collection_item_schema = UserCollectionItemSchema()\nuser_schema = UserSchema()\nuser_collection_schema = UserCollectionSchema()\n", "path": "airflow/api_connexion/schemas/user_schema.py"}]} | 1,277 | 116 |
gh_patches_debug_7405 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-823 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
urllib instrumentation fails for local file access
When reading local files the status code is not specified and is None. This isn't handled by the instrumentation and causes an exception.
https://github.com/open-telemetry/opentelemetry-python-contrib/blob/444e0a13127304d3a04ccd44445b2e6caed3f770/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py#L212-L217
urllib instrumentation fails for local file access
When reading local files the status code is not specified and is None. This isn't handled by the instrumentation and causes an exception.
https://github.com/open-telemetry/opentelemetry-python-contrib/blob/444e0a13127304d3a04ccd44445b2e6caed3f770/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py#L212-L217
</issue>
<code>
[start of opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Dict, Sequence
16
17 from wrapt import ObjectProxy
18
19 from opentelemetry import context, trace
20
21 # pylint: disable=unused-import
22 # pylint: disable=E0611
23 from opentelemetry.context import _SUPPRESS_INSTRUMENTATION_KEY # noqa: F401
24 from opentelemetry.propagate import extract
25 from opentelemetry.trace import StatusCode
26
27
28 def extract_attributes_from_object(
29 obj: any, attributes: Sequence[str], existing: Dict[str, str] = None
30 ) -> Dict[str, str]:
31 extracted = {}
32 if existing:
33 extracted.update(existing)
34 for attr in attributes:
35 value = getattr(obj, attr, None)
36 if value is not None:
37 extracted[attr] = str(value)
38 return extracted
39
40
41 def http_status_to_status_code(
42 status: int,
43 allow_redirect: bool = True,
44 server_span: bool = False,
45 ) -> StatusCode:
46 """Converts an HTTP status code to an OpenTelemetry canonical status code
47
48 Args:
49 status (int): HTTP status code
50 """
51 # See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status
52 if status < 100:
53 return StatusCode.ERROR
54 if status <= 299:
55 return StatusCode.UNSET
56 if status <= 399 and allow_redirect:
57 return StatusCode.UNSET
58 if status <= 499 and server_span:
59 return StatusCode.UNSET
60 return StatusCode.ERROR
61
62
63 def unwrap(obj, attr: str):
64 """Given a function that was wrapped by wrapt.wrap_function_wrapper, unwrap it
65
66 Args:
67 obj: Object that holds a reference to the wrapped function
68 attr (str): Name of the wrapped function
69 """
70 func = getattr(obj, attr, None)
71 if func and isinstance(func, ObjectProxy) and hasattr(func, "__wrapped__"):
72 setattr(obj, attr, func.__wrapped__)
73
74
75 def _start_internal_or_server_span(
76 tracer, span_name, start_time, context_carrier, context_getter
77 ):
78 """Returns internal or server span along with the token which can be used by caller to reset context
79
80
81 Args:
82 tracer : tracer in use by given instrumentation library
83 name (string): name of the span
84 start_time : start time of the span
85 context_carrier : object which contains values that are
86 used to construct a Context. This object
87 must be paired with an appropriate getter
88 which understands how to extract a value from it.
89 context_getter : an object which contains a get function that can retrieve zero
90 or more values from the carrier and a keys function that can get all the keys
91 from carrier.
92 """
93
94 token = ctx = span_kind = None
95 if trace.get_current_span() is trace.INVALID_SPAN:
96 ctx = extract(context_carrier, getter=context_getter)
97 token = context.attach(ctx)
98 span_kind = trace.SpanKind.SERVER
99 else:
100 ctx = context.get_current()
101 span_kind = trace.SpanKind.INTERNAL
102 span = tracer.start_span(
103 name=span_name,
104 context=ctx,
105 kind=span_kind,
106 start_time=start_time,
107 )
108 return span, token
109
[end of opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py
--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py
+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py
@@ -49,6 +49,9 @@
status (int): HTTP status code
"""
# See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status
+ if not isinstance(status, int):
+ return StatusCode.UNSET
+
if status < 100:
return StatusCode.ERROR
if status <= 299:
| {"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py\n@@ -49,6 +49,9 @@\n status (int): HTTP status code\n \"\"\"\n # See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status\n+ if not isinstance(status, int):\n+ return StatusCode.UNSET\n+\n if status < 100:\n return StatusCode.ERROR\n if status <= 299:\n", "issue": "urllib instrumentation fails for local file access\nWhen reading local files the status code is not specified and is None. This isn't handled by the instrumentation and causes an exception.\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-python-contrib/blob/444e0a13127304d3a04ccd44445b2e6caed3f770/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py#L212-L217\nurllib instrumentation fails for local file access\nWhen reading local files the status code is not specified and is None. This isn't handled by the instrumentation and causes an exception.\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-python-contrib/blob/444e0a13127304d3a04ccd44445b2e6caed3f770/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py#L212-L217\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, Sequence\n\nfrom wrapt import ObjectProxy\n\nfrom opentelemetry import context, trace\n\n# pylint: disable=unused-import\n# pylint: disable=E0611\nfrom opentelemetry.context import _SUPPRESS_INSTRUMENTATION_KEY # noqa: F401\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.trace import StatusCode\n\n\ndef extract_attributes_from_object(\n obj: any, attributes: Sequence[str], existing: Dict[str, str] = None\n) -> Dict[str, str]:\n extracted = {}\n if existing:\n extracted.update(existing)\n for attr in attributes:\n value = getattr(obj, attr, None)\n if value is not None:\n extracted[attr] = str(value)\n return extracted\n\n\ndef http_status_to_status_code(\n status: int,\n allow_redirect: bool = True,\n server_span: bool = False,\n) -> StatusCode:\n \"\"\"Converts an HTTP status code to an OpenTelemetry canonical status code\n\n Args:\n status (int): HTTP status code\n \"\"\"\n # See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status\n if status < 100:\n return StatusCode.ERROR\n if status <= 299:\n return StatusCode.UNSET\n if status <= 399 and allow_redirect:\n return StatusCode.UNSET\n if status <= 499 and server_span:\n return StatusCode.UNSET\n return StatusCode.ERROR\n\n\ndef unwrap(obj, attr: str):\n \"\"\"Given a function that was wrapped by wrapt.wrap_function_wrapper, unwrap it\n\n Args:\n obj: Object that holds a reference to the wrapped function\n attr (str): Name of the wrapped function\n \"\"\"\n func = getattr(obj, attr, None)\n if func and isinstance(func, ObjectProxy) and hasattr(func, \"__wrapped__\"):\n setattr(obj, attr, func.__wrapped__)\n\n\ndef _start_internal_or_server_span(\n tracer, span_name, start_time, context_carrier, context_getter\n):\n \"\"\"Returns internal or server span along with the token which can be used by caller to reset context\n\n\n Args:\n tracer : tracer in use by given instrumentation library\n name (string): name of the span\n start_time : start time of the span\n context_carrier : object which contains values that are\n used to construct a Context. This object\n must be paired with an appropriate getter\n which understands how to extract a value from it.\n context_getter : an object which contains a get function that can retrieve zero\n or more values from the carrier and a keys function that can get all the keys\n from carrier.\n \"\"\"\n\n token = ctx = span_kind = None\n if trace.get_current_span() is trace.INVALID_SPAN:\n ctx = extract(context_carrier, getter=context_getter)\n token = context.attach(ctx)\n span_kind = trace.SpanKind.SERVER\n else:\n ctx = context.get_current()\n span_kind = trace.SpanKind.INTERNAL\n span = tracer.start_span(\n name=span_name,\n context=ctx,\n kind=span_kind,\n start_time=start_time,\n )\n return span, token\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py"}]} | 1,854 | 167 |
gh_patches_debug_19589 | rasdani/github-patches | git_diff | cloudtools__troposphere-839 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use the PlatformArn property to specify a custom platform for Elastic Beanstalk.
[AWS::ElasticBeanstalk::ConfigurationTemplate](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-beanstalk-configurationtemplate.html) and [AWS::ElasticBeanstalk::Environment](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html)
Use the PlatformArn property to specify a custom platform for Elastic Beanstalk.
</issue>
<code>
[start of troposphere/elasticbeanstalk.py]
1 # Copyright (c) 2013, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty, Tags
7
8
9 WebServer = "WebServer"
10 Worker = "Worker"
11 WebServerType = "Standard"
12 WorkerType = "SQS/HTTP"
13
14
15 class SourceBundle(AWSProperty):
16 props = {
17 'S3Bucket': (basestring, True),
18 'S3Key': (basestring, True),
19 }
20
21
22 class SourceConfiguration(AWSProperty):
23 props = {
24 'ApplicationName': (basestring, True),
25 'TemplateName': (basestring, True),
26 }
27
28
29 class OptionSettings(AWSProperty):
30 props = {
31 'Namespace': (basestring, True),
32 'OptionName': (basestring, True),
33 'Value': (basestring, True),
34 }
35
36
37 class Application(AWSObject):
38 resource_type = "AWS::ElasticBeanstalk::Application"
39
40 props = {
41 'ApplicationName': (basestring, False),
42 'Description': (basestring, False),
43 }
44
45
46 class ApplicationVersion(AWSObject):
47 resource_type = "AWS::ElasticBeanstalk::ApplicationVersion"
48
49 props = {
50 'ApplicationName': (basestring, True),
51 'Description': (basestring, False),
52 'SourceBundle': (SourceBundle, False),
53 }
54
55
56 class ConfigurationTemplate(AWSObject):
57 resource_type = "AWS::ElasticBeanstalk::ConfigurationTemplate"
58
59 props = {
60 'ApplicationName': (basestring, True),
61 'Description': (basestring, False),
62 'EnvironmentId': (basestring, False),
63 'OptionSettings': ([OptionSettings], False),
64 'SolutionStackName': (basestring, False),
65 'SourceConfiguration': (SourceConfiguration, False),
66 }
67
68
69 def validate_tier_name(name):
70 valid_names = [WebServer, Worker]
71 if name not in valid_names:
72 raise ValueError('Tier name needs to be one of %r' % valid_names)
73 return name
74
75
76 def validate_tier_type(tier_type):
77 valid_types = [WebServerType, WorkerType]
78 if tier_type not in valid_types:
79 raise ValueError('Tier type needs to be one of %r' % valid_types)
80 return tier_type
81
82
83 class Tier(AWSProperty):
84 props = {
85 'Name': (validate_tier_name, False),
86 'Type': (validate_tier_type, False),
87 'Version': (basestring, False),
88 }
89
90
91 class Environment(AWSObject):
92 resource_type = "AWS::ElasticBeanstalk::Environment"
93
94 props = {
95 'ApplicationName': (basestring, True),
96 'CNAMEPrefix': (basestring, False),
97 'Description': (basestring, False),
98 'EnvironmentName': (basestring, False),
99 'OptionSettings': ([OptionSettings], False),
100 'SolutionStackName': (basestring, False),
101 'Tags': (Tags, False),
102 'TemplateName': (basestring, False),
103 'Tier': (Tier, False),
104 'VersionLabel': (basestring, False),
105 }
106
[end of troposphere/elasticbeanstalk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/troposphere/elasticbeanstalk.py b/troposphere/elasticbeanstalk.py
--- a/troposphere/elasticbeanstalk.py
+++ b/troposphere/elasticbeanstalk.py
@@ -61,6 +61,7 @@
'Description': (basestring, False),
'EnvironmentId': (basestring, False),
'OptionSettings': ([OptionSettings], False),
+ 'PlatformArn': (basestring, False),
'SolutionStackName': (basestring, False),
'SourceConfiguration': (SourceConfiguration, False),
}
@@ -97,6 +98,7 @@
'Description': (basestring, False),
'EnvironmentName': (basestring, False),
'OptionSettings': ([OptionSettings], False),
+ 'PlatformArn': (basestring, False),
'SolutionStackName': (basestring, False),
'Tags': (Tags, False),
'TemplateName': (basestring, False),
| {"golden_diff": "diff --git a/troposphere/elasticbeanstalk.py b/troposphere/elasticbeanstalk.py\n--- a/troposphere/elasticbeanstalk.py\n+++ b/troposphere/elasticbeanstalk.py\n@@ -61,6 +61,7 @@\n 'Description': (basestring, False),\n 'EnvironmentId': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n+ 'PlatformArn': (basestring, False),\n 'SolutionStackName': (basestring, False),\n 'SourceConfiguration': (SourceConfiguration, False),\n }\n@@ -97,6 +98,7 @@\n 'Description': (basestring, False),\n 'EnvironmentName': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n+ 'PlatformArn': (basestring, False),\n 'SolutionStackName': (basestring, False),\n 'Tags': (Tags, False),\n 'TemplateName': (basestring, False),\n", "issue": "Use the PlatformArn property to specify a custom platform for Elastic Beanstalk.\n[AWS::ElasticBeanstalk::ConfigurationTemplate](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-beanstalk-configurationtemplate.html) and [AWS::ElasticBeanstalk::Environment](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html)\r\nUse the PlatformArn property to specify a custom platform for Elastic Beanstalk.\n", "before_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\n\n\nWebServer = \"WebServer\"\nWorker = \"Worker\"\nWebServerType = \"Standard\"\nWorkerType = \"SQS/HTTP\"\n\n\nclass SourceBundle(AWSProperty):\n props = {\n 'S3Bucket': (basestring, True),\n 'S3Key': (basestring, True),\n }\n\n\nclass SourceConfiguration(AWSProperty):\n props = {\n 'ApplicationName': (basestring, True),\n 'TemplateName': (basestring, True),\n }\n\n\nclass OptionSettings(AWSProperty):\n props = {\n 'Namespace': (basestring, True),\n 'OptionName': (basestring, True),\n 'Value': (basestring, True),\n }\n\n\nclass Application(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::Application\"\n\n props = {\n 'ApplicationName': (basestring, False),\n 'Description': (basestring, False),\n }\n\n\nclass ApplicationVersion(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::ApplicationVersion\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'Description': (basestring, False),\n 'SourceBundle': (SourceBundle, False),\n }\n\n\nclass ConfigurationTemplate(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'Description': (basestring, False),\n 'EnvironmentId': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n 'SolutionStackName': (basestring, False),\n 'SourceConfiguration': (SourceConfiguration, False),\n }\n\n\ndef validate_tier_name(name):\n valid_names = [WebServer, Worker]\n if name not in valid_names:\n raise ValueError('Tier name needs to be one of %r' % valid_names)\n return name\n\n\ndef validate_tier_type(tier_type):\n valid_types = [WebServerType, WorkerType]\n if tier_type not in valid_types:\n raise ValueError('Tier type needs to be one of %r' % valid_types)\n return tier_type\n\n\nclass Tier(AWSProperty):\n props = {\n 'Name': (validate_tier_name, False),\n 'Type': (validate_tier_type, False),\n 'Version': (basestring, False),\n }\n\n\nclass Environment(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::Environment\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'CNAMEPrefix': (basestring, False),\n 'Description': (basestring, False),\n 'EnvironmentName': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n 'SolutionStackName': (basestring, False),\n 'Tags': (Tags, False),\n 'TemplateName': (basestring, False),\n 'Tier': (Tier, False),\n 'VersionLabel': (basestring, False),\n }\n", "path": "troposphere/elasticbeanstalk.py"}]} | 1,540 | 213 |
gh_patches_debug_8055 | rasdani/github-patches | git_diff | urllib3__urllib3-1497 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
setup.py lacks appropriate metadata for differing python versions
Please see this issue for the full context: https://github.com/NixOS/nixpkgs/issues/46318
Basically, it appears the METADATA in the resulting installation differs depending on the installation method.
I've done some minimal patching to `setup.py` to include the same `python_version` constraints that appear in `setup.cfg` and it appears to fix the issues with regards to METADATA.
However, I'm not very experienced in python packaging and am surprised that no one else has run into this issue before me.
Can anyone confirm that there is a mismatch here and that adding additional constraints to `setup.py` would be appropriate? I'll go ahead and get a PR together in the meantime.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import setup
4
5 import os
6 import re
7 import codecs
8
9 base_path = os.path.dirname(__file__)
10
11 # Get the version (borrowed from SQLAlchemy)
12 with open(os.path.join(base_path, 'src', 'urllib3', '__init__.py')) as fp:
13 VERSION = re.compile(r".*__version__ = '(.*?)'",
14 re.S).match(fp.read()).group(1)
15
16 with codecs.open('README.rst', encoding='utf-8') as fp:
17 readme = fp.read()
18 with codecs.open('CHANGES.rst', encoding='utf-8') as fp:
19 changes = fp.read()
20 version = VERSION
21
22 setup(name='urllib3',
23 version=version,
24 description="HTTP library with thread-safe connection pooling, file post, and more.",
25 long_description=u'\n\n'.join([readme, changes]),
26 classifiers=[
27 'Environment :: Web Environment',
28 'Intended Audience :: Developers',
29 'License :: OSI Approved :: MIT License',
30 'Operating System :: OS Independent',
31 'Programming Language :: Python',
32 'Programming Language :: Python :: 2',
33 'Programming Language :: Python :: 2.7',
34 'Programming Language :: Python :: 3',
35 'Programming Language :: Python :: 3.4',
36 'Programming Language :: Python :: 3.5',
37 'Programming Language :: Python :: 3.6',
38 'Programming Language :: Python :: 3.7',
39 'Programming Language :: Python :: 3.8',
40 'Programming Language :: Python :: Implementation :: CPython',
41 'Programming Language :: Python :: Implementation :: PyPy',
42 'Topic :: Internet :: WWW/HTTP',
43 'Topic :: Software Development :: Libraries',
44 ],
45 keywords='urllib httplib threadsafe filepost http https ssl pooling',
46 author='Andrey Petrov',
47 author_email='[email protected]',
48 url='https://urllib3.readthedocs.io/',
49 license='MIT',
50 packages=['urllib3',
51 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',
52 'urllib3.packages.backports', 'urllib3.packages.rfc3986',
53 'urllib3.contrib', 'urllib3.contrib._securetransport',
54 'urllib3.util'],
55 package_dir={'': 'src'},
56 requires=[],
57 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
58 tests_require=[
59 # These are a less-specific subset of dev-requirements.txt, for the
60 # convenience of distro package maintainers.
61 'pytest',
62 'mock',
63 'tornado',
64 ],
65 test_suite='test',
66 extras_require={
67 'secure': [
68 'pyOpenSSL >= 0.14',
69 'cryptography>=1.3.4',
70 'idna>=2.0.0',
71 'certifi',
72 "ipaddress",
73 ],
74 'socks': [
75 'PySocks>=1.5.6,<2.0,!=1.5.7',
76 ]
77 },
78 )
79
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -65,11 +65,11 @@
test_suite='test',
extras_require={
'secure': [
- 'pyOpenSSL >= 0.14',
+ 'pyOpenSSL>=0.14',
'cryptography>=1.3.4',
'idna>=2.0.0',
'certifi',
- "ipaddress",
+ "ipaddress; python_version=='2.7'",
],
'socks': [
'PySocks>=1.5.6,<2.0,!=1.5.7',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,11 +65,11 @@\n test_suite='test',\n extras_require={\n 'secure': [\n- 'pyOpenSSL >= 0.14',\n+ 'pyOpenSSL>=0.14',\n 'cryptography>=1.3.4',\n 'idna>=2.0.0',\n 'certifi',\n- \"ipaddress\",\n+ \"ipaddress; python_version=='2.7'\",\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0,!=1.5.7',\n", "issue": "setup.py lacks appropriate metadata for differing python versions\nPlease see this issue for the full context: https://github.com/NixOS/nixpkgs/issues/46318\r\n\r\nBasically, it appears the METADATA in the resulting installation differs depending on the installation method.\r\n\r\nI've done some minimal patching to `setup.py` to include the same `python_version` constraints that appear in `setup.cfg` and it appears to fix the issues with regards to METADATA.\r\n\r\nHowever, I'm not very experienced in python packaging and am surprised that no one else has run into this issue before me.\r\n\r\nCan anyone confirm that there is a mismatch here and that adding additional constraints to `setup.py` would be appropriate? I'll go ahead and get a PR together in the meantime.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nwith open(os.path.join(base_path, 'src', 'urllib3', '__init__.py')) as fp:\n VERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\n\nwith codecs.open('README.rst', encoding='utf-8') as fp:\n readme = fp.read()\nwith codecs.open('CHANGES.rst', encoding='utf-8') as fp:\n changes = fp.read()\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='[email protected]',\n url='https://urllib3.readthedocs.io/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.packages.backports', 'urllib3.packages.rfc3986',\n 'urllib3.contrib', 'urllib3.contrib._securetransport',\n 'urllib3.util'],\n package_dir={'': 'src'},\n requires=[],\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'pytest',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL >= 0.14',\n 'cryptography>=1.3.4',\n 'idna>=2.0.0',\n 'certifi',\n \"ipaddress\",\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0,!=1.5.7',\n ]\n },\n )\n", "path": "setup.py"}]} | 1,523 | 152 |
gh_patches_debug_18891 | rasdani/github-patches | git_diff | gratipay__gratipay.com-3198 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix `safely_reserve_a_username`
This function keeps using a cursor after an `IntegrityError` exception is raised, that's invalid and raises another exception. See https://github.com/gratipay/gratipay.com/pull/2752#issuecomment-65266388.
Fix `safely_reserve_a_username`
This function keeps using a cursor after an `IntegrityError` exception is raised, that's invalid and raises another exception. See https://github.com/gratipay/gratipay.com/pull/2752#issuecomment-65266388.
</issue>
<code>
[start of gratipay/utils/username.py]
1 from psycopg2 import IntegrityError
2 import random
3
4
5 class FailedToReserveUsername(Exception): pass
6 class RanOutOfUsernameAttempts(Exception): pass
7
8
9 def gen_random_usernames():
10 """Yield random 12-hex-digit unicodes.
11 """
12 while 1:
13 yield hex(int(random.random() * 16**12))[2:].zfill(12).decode('ASCII')
14
15
16 def insert_into_participants(cursor, username):
17 return cursor.one( "INSERT INTO participants (username, username_lower) "
18 "VALUES (%s, %s) RETURNING username"
19 , (username, username.lower())
20 )
21
22
23 def safely_reserve_a_username(cursor, gen_usernames=gen_random_usernames,
24 reserve=insert_into_participants):
25 """Safely reserve a username.
26
27 :param cursor: a :py:class:`psycopg2.cursor` managed as a :py:mod:`postgres`
28 transaction
29 :param gen_usernames: a generator of usernames to try
30 :param reserve: a function that takes the cursor and does the SQL
31 stuff
32 :database: one ``INSERT`` on average
33 :returns: a 12-hex-digit unicode
34 :raises: :py:class:`FailedToReserveUsername` if no acceptable username is found
35 within 100 attempts, or :py:class:`RanOutOfUsernameAttempts` if the username
36 generator runs out first
37
38 The returned value is guaranteed to have been reserved in the database.
39
40 """
41 seatbelt = 0
42 for username in gen_usernames():
43 seatbelt += 1
44 if seatbelt > 100:
45 raise FailedToReserveUsername
46
47 try:
48 check = reserve(cursor, username)
49 except IntegrityError: # Collision, try again with another value.
50 continue
51 else:
52 assert check == username
53 break
54 else:
55 raise RanOutOfUsernameAttempts
56 return username
57
[end of gratipay/utils/username.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gratipay/utils/username.py b/gratipay/utils/username.py
--- a/gratipay/utils/username.py
+++ b/gratipay/utils/username.py
@@ -38,6 +38,8 @@
The returned value is guaranteed to have been reserved in the database.
"""
+ cursor.execute("SAVEPOINT safely_reserve_a_username")
+
seatbelt = 0
for username in gen_usernames():
seatbelt += 1
@@ -47,10 +49,13 @@
try:
check = reserve(cursor, username)
except IntegrityError: # Collision, try again with another value.
+ cursor.execute("ROLLBACK TO safely_reserve_a_username")
continue
else:
assert check == username
break
else:
raise RanOutOfUsernameAttempts
+
+ cursor.execute("RELEASE safely_reserve_a_username")
return username
| {"golden_diff": "diff --git a/gratipay/utils/username.py b/gratipay/utils/username.py\n--- a/gratipay/utils/username.py\n+++ b/gratipay/utils/username.py\n@@ -38,6 +38,8 @@\n The returned value is guaranteed to have been reserved in the database.\n \n \"\"\"\n+ cursor.execute(\"SAVEPOINT safely_reserve_a_username\")\n+\n seatbelt = 0\n for username in gen_usernames():\n seatbelt += 1\n@@ -47,10 +49,13 @@\n try:\n check = reserve(cursor, username)\n except IntegrityError: # Collision, try again with another value.\n+ cursor.execute(\"ROLLBACK TO safely_reserve_a_username\")\n continue\n else:\n assert check == username\n break\n else:\n raise RanOutOfUsernameAttempts\n+\n+ cursor.execute(\"RELEASE safely_reserve_a_username\")\n return username\n", "issue": "Fix `safely_reserve_a_username`\nThis function keeps using a cursor after an `IntegrityError` exception is raised, that's invalid and raises another exception. See https://github.com/gratipay/gratipay.com/pull/2752#issuecomment-65266388.\n\nFix `safely_reserve_a_username`\nThis function keeps using a cursor after an `IntegrityError` exception is raised, that's invalid and raises another exception. See https://github.com/gratipay/gratipay.com/pull/2752#issuecomment-65266388.\n\n", "before_files": [{"content": "from psycopg2 import IntegrityError\nimport random\n\n\nclass FailedToReserveUsername(Exception): pass\nclass RanOutOfUsernameAttempts(Exception): pass\n\n\ndef gen_random_usernames():\n \"\"\"Yield random 12-hex-digit unicodes.\n \"\"\"\n while 1:\n yield hex(int(random.random() * 16**12))[2:].zfill(12).decode('ASCII')\n\n\ndef insert_into_participants(cursor, username):\n return cursor.one( \"INSERT INTO participants (username, username_lower) \"\n \"VALUES (%s, %s) RETURNING username\"\n , (username, username.lower())\n )\n\n\ndef safely_reserve_a_username(cursor, gen_usernames=gen_random_usernames,\n reserve=insert_into_participants):\n \"\"\"Safely reserve a username.\n\n :param cursor: a :py:class:`psycopg2.cursor` managed as a :py:mod:`postgres`\n transaction\n :param gen_usernames: a generator of usernames to try\n :param reserve: a function that takes the cursor and does the SQL\n stuff\n :database: one ``INSERT`` on average\n :returns: a 12-hex-digit unicode\n :raises: :py:class:`FailedToReserveUsername` if no acceptable username is found\n within 100 attempts, or :py:class:`RanOutOfUsernameAttempts` if the username\n generator runs out first\n\n The returned value is guaranteed to have been reserved in the database.\n\n \"\"\"\n seatbelt = 0\n for username in gen_usernames():\n seatbelt += 1\n if seatbelt > 100:\n raise FailedToReserveUsername\n\n try:\n check = reserve(cursor, username)\n except IntegrityError: # Collision, try again with another value.\n continue\n else:\n assert check == username\n break\n else:\n raise RanOutOfUsernameAttempts\n return username\n", "path": "gratipay/utils/username.py"}]} | 1,197 | 199 |
gh_patches_debug_5570 | rasdani/github-patches | git_diff | mindsdb__lightwood-40 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ModuleNotFoundError: No module named '_lzma'
I've tried to test lightwood with [home rentals
example](https://github.com/mindsdb/lightwood/blob/master/docs/examples/home_rentals.py) but got ModuleNotFoundError: No module named '_lzma'.
Screenshot:

It looks like _lzma is a dependency to pandas, but it should be included with Python 3.x version.
</issue>
<code>
[start of lightwood/__about__.py]
1 __title__ = 'lightwood'
2 __package_name__ = 'mindsdb'
3 __version__ = '0.9.0'
4 __description__ = "Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects."
5 __email__ = "[email protected]"
6 __author__ = 'MindsDB Inc'
7 __github__ = 'https://github.com/mindsdb/lightwood'
8 __pypi__ = 'https://pypi.org/project/lightwood'
9 __license__ = 'MIT'
10 __copyright__ = 'Copyright 2019- mindsdb'
11
[end of lightwood/__about__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightwood/__about__.py b/lightwood/__about__.py
--- a/lightwood/__about__.py
+++ b/lightwood/__about__.py
@@ -1,6 +1,6 @@
__title__ = 'lightwood'
__package_name__ = 'mindsdb'
-__version__ = '0.9.0'
+__version__ = '0.9.1'
__description__ = "Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects."
__email__ = "[email protected]"
__author__ = 'MindsDB Inc'
| {"golden_diff": "diff --git a/lightwood/__about__.py b/lightwood/__about__.py\n--- a/lightwood/__about__.py\n+++ b/lightwood/__about__.py\n@@ -1,6 +1,6 @@\n __title__ = 'lightwood'\n __package_name__ = 'mindsdb'\n-__version__ = '0.9.0'\n+__version__ = '0.9.1'\n __description__ = \"Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects.\"\n __email__ = \"[email protected]\"\n __author__ = 'MindsDB Inc'\n", "issue": "ModuleNotFoundError: No module named '_lzma'\nI've tried to test lightwood with [home rentals\r\n example](https://github.com/mindsdb/lightwood/blob/master/docs/examples/home_rentals.py) but got ModuleNotFoundError: No module named '_lzma'.\r\n\r\nScreenshot:\r\n\r\n\r\nIt looks like _lzma is a dependency to pandas, but it should be included with Python 3.x version.\r\n\r\n\n", "before_files": [{"content": "__title__ = 'lightwood'\n__package_name__ = 'mindsdb'\n__version__ = '0.9.0'\n__description__ = \"Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects.\"\n__email__ = \"[email protected]\"\n__author__ = 'MindsDB Inc'\n__github__ = 'https://github.com/mindsdb/lightwood'\n__pypi__ = 'https://pypi.org/project/lightwood'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2019- mindsdb'\n", "path": "lightwood/__about__.py"}]} | 849 | 140 |
gh_patches_debug_713 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-1826 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Agate type inference is too clever
### Describe the bug
We’re trying to set a value from a {% call statement %} and within the call, one line is SELECT 0 AS my_value...and it then treats it as a boolean (false) in the returned values.
The same happens if we try SELECT 1 AS my_value, but as soon as we do SELECT 2 AS my_value it treats it like a number (as it should).
### Steps To Reproduce
Create a call statement that selects 0, or 1. false, and true respectively will be returned.
### Expected behavior
0, or 1 to be returned, as integers.
### Screenshots and log output
### System information
**Which database are you using dbt with?**
- [ ] postgres
- [ ] redshift
- [x] bigquery
- [ ] snowflake
- [ ] other (specify: ____________)
**The output of `dbt --version`:**
```
installed version: 0.15.0-a1
latest version: 0.14.2
Your version of dbt is ahead of the latest release!
```
FYI, we run a fork, but that shouldn't have affected anything here.
**The operating system you're using:**
Mojave
**The output of `python --version`:**
Python 3.7.1
### Additional context
We'd love a quick fix for this, even if it's ugly!
</issue>
<code>
[start of core/dbt/clients/agate_helper.py]
1 from codecs import BOM_UTF8
2
3 import agate
4 import json
5
6
7 BOM = BOM_UTF8.decode('utf-8') # '\ufeff'
8
9 DEFAULT_TYPE_TESTER = agate.TypeTester(types=[
10 agate.data_types.Number(null_values=('null', '')),
11 agate.data_types.TimeDelta(null_values=('null', '')),
12 agate.data_types.Date(null_values=('null', '')),
13 agate.data_types.DateTime(null_values=('null', '')),
14 agate.data_types.Boolean(true_values=('true',),
15 false_values=('false',),
16 null_values=('null', '')),
17 agate.data_types.Text(null_values=('null', ''))
18 ])
19
20
21 def table_from_data(data, column_names):
22 "Convert list of dictionaries into an Agate table"
23
24 # The agate table is generated from a list of dicts, so the column order
25 # from `data` is not preserved. We can use `select` to reorder the columns
26 #
27 # If there is no data, create an empty table with the specified columns
28
29 if len(data) == 0:
30 return agate.Table([], column_names=column_names)
31 else:
32 table = agate.Table.from_object(data, column_types=DEFAULT_TYPE_TESTER)
33 return table.select(column_names)
34
35
36 def table_from_data_flat(data, column_names):
37 "Convert list of dictionaries into an Agate table"
38
39 rows = []
40 for _row in data:
41 row = []
42 for value in list(_row.values()):
43 if isinstance(value, (dict, list, tuple)):
44 row.append(json.dumps(value))
45 else:
46 row.append(value)
47 rows.append(row)
48
49 return agate.Table(rows, column_names)
50
51
52 def empty_table():
53 "Returns an empty Agate table. To be used in place of None"
54
55 return agate.Table(rows=[])
56
57
58 def as_matrix(table):
59 "Return an agate table as a matrix of data sans columns"
60
61 return [r.values() for r in table.rows.values()]
62
63
64 def from_csv(abspath):
65 with open(abspath, encoding='utf-8') as fp:
66 if fp.read(1) != BOM:
67 fp.seek(0)
68 return agate.Table.from_csv(fp, column_types=DEFAULT_TYPE_TESTER)
69
[end of core/dbt/clients/agate_helper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/dbt/clients/agate_helper.py b/core/dbt/clients/agate_helper.py
--- a/core/dbt/clients/agate_helper.py
+++ b/core/dbt/clients/agate_helper.py
@@ -46,7 +46,7 @@
row.append(value)
rows.append(row)
- return agate.Table(rows, column_names)
+ return agate.Table(rows, column_names, column_types=DEFAULT_TYPE_TESTER)
def empty_table():
| {"golden_diff": "diff --git a/core/dbt/clients/agate_helper.py b/core/dbt/clients/agate_helper.py\n--- a/core/dbt/clients/agate_helper.py\n+++ b/core/dbt/clients/agate_helper.py\n@@ -46,7 +46,7 @@\n row.append(value)\n rows.append(row)\n \n- return agate.Table(rows, column_names)\n+ return agate.Table(rows, column_names, column_types=DEFAULT_TYPE_TESTER)\n \n \n def empty_table():\n", "issue": "Agate type inference is too clever\n### Describe the bug\r\nWe\u2019re trying to set a value from a {% call statement %} and within the call, one line is SELECT 0 AS my_value...and it then treats it as a boolean (false) in the returned values. \r\n\r\nThe same happens if we try SELECT 1 AS my_value, but as soon as we do SELECT 2 AS my_value it treats it like a number (as it should).\r\n\r\n### Steps To Reproduce\r\nCreate a call statement that selects 0, or 1. false, and true respectively will be returned.\r\n\r\n### Expected behavior\r\n0, or 1 to be returned, as integers.\r\n\r\n### Screenshots and log output\r\n\r\n### System information\r\n**Which database are you using dbt with?**\r\n- [ ] postgres\r\n- [ ] redshift\r\n- [x] bigquery\r\n- [ ] snowflake\r\n- [ ] other (specify: ____________)\r\n\r\n\r\n**The output of `dbt --version`:**\r\n```\r\ninstalled version: 0.15.0-a1\r\n latest version: 0.14.2\r\nYour version of dbt is ahead of the latest release!\r\n```\r\n\r\nFYI, we run a fork, but that shouldn't have affected anything here.\r\n\r\n**The operating system you're using:**\r\n\r\nMojave\r\n\r\n**The output of `python --version`:**\r\n\r\nPython 3.7.1\r\n\r\n### Additional context\r\nWe'd love a quick fix for this, even if it's ugly!\r\n\n", "before_files": [{"content": "from codecs import BOM_UTF8\n\nimport agate\nimport json\n\n\nBOM = BOM_UTF8.decode('utf-8') # '\\ufeff'\n\nDEFAULT_TYPE_TESTER = agate.TypeTester(types=[\n agate.data_types.Number(null_values=('null', '')),\n agate.data_types.TimeDelta(null_values=('null', '')),\n agate.data_types.Date(null_values=('null', '')),\n agate.data_types.DateTime(null_values=('null', '')),\n agate.data_types.Boolean(true_values=('true',),\n false_values=('false',),\n null_values=('null', '')),\n agate.data_types.Text(null_values=('null', ''))\n])\n\n\ndef table_from_data(data, column_names):\n \"Convert list of dictionaries into an Agate table\"\n\n # The agate table is generated from a list of dicts, so the column order\n # from `data` is not preserved. We can use `select` to reorder the columns\n #\n # If there is no data, create an empty table with the specified columns\n\n if len(data) == 0:\n return agate.Table([], column_names=column_names)\n else:\n table = agate.Table.from_object(data, column_types=DEFAULT_TYPE_TESTER)\n return table.select(column_names)\n\n\ndef table_from_data_flat(data, column_names):\n \"Convert list of dictionaries into an Agate table\"\n\n rows = []\n for _row in data:\n row = []\n for value in list(_row.values()):\n if isinstance(value, (dict, list, tuple)):\n row.append(json.dumps(value))\n else:\n row.append(value)\n rows.append(row)\n\n return agate.Table(rows, column_names)\n\n\ndef empty_table():\n \"Returns an empty Agate table. To be used in place of None\"\n\n return agate.Table(rows=[])\n\n\ndef as_matrix(table):\n \"Return an agate table as a matrix of data sans columns\"\n\n return [r.values() for r in table.rows.values()]\n\n\ndef from_csv(abspath):\n with open(abspath, encoding='utf-8') as fp:\n if fp.read(1) != BOM:\n fp.seek(0)\n return agate.Table.from_csv(fp, column_types=DEFAULT_TYPE_TESTER)\n", "path": "core/dbt/clients/agate_helper.py"}]} | 1,486 | 107 |
gh_patches_debug_1031 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-3857 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[beta][v20] S'inscrire/se connecter/chercher avec un emoji provoque une 500
Serveur : Beta
Version : v20-RC3/d3fd8af
Système : Mac OS X
Navigateur : 52.0.2743.116 (64-bit)
---
1. Rendez-vous à la page d'inscription et renseigner un pseudo du type : 👚 test
2. Remplissez les autres champs.
3. Soumettez le formulaire.
4. Constatez une erreur 500.
Note : Vous pouvez reproduire la même erreur en tentant de vous connecter avec le même pseudo ou en faisant une recherche sur le pseudo d'un membre.
</issue>
<code>
[start of zds/utils/misc.py]
1 # coding: utf-8
2 import hashlib
3 import re
4
5 THUMB_MAX_WIDTH = 80
6 THUMB_MAX_HEIGHT = 80
7
8 MEDIUM_MAX_WIDTH = 200
9 MEDIUM_MAX_HEIGHT = 200
10
11
12 def compute_hash(filenames):
13 """returns a md5 hexdigest of group of files to check if they have change"""
14 md5_hash = hashlib.md5()
15 for filename in filenames:
16 if filename:
17 file_handle = open(filename, 'rb')
18 must_continue = True
19 while must_continue:
20 read_bytes = file_handle.read(8096)
21 if not read_bytes:
22 must_continue = False
23 else:
24 md5_hash.update(read_bytes)
25 return md5_hash.hexdigest()
26
27
28 def content_has_changed(filenames, md5):
29 return md5 != compute_hash(filenames)
30
31
32 def has_changed(instance, field, manager='objects'):
33 """Returns true if a field has changed in a model May be used in a
34 model.save() method."""
35 if not instance.pk:
36 return True
37 manager = getattr(instance.__class__, manager)
38 old = getattr(manager.get(pk=instance.pk), field)
39 return not getattr(instance, field) == old
40
41
42 def convert_camel_to_underscore(camel_case):
43 """
44 Converts a name in camel case to underscore.
45 """
46 s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel_case)
47 return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
48
49
50 def contains_utf8mb4(s):
51 """
52 This string contains at least one character of more than 3 bytes
53 """
54 if not isinstance(s, unicode):
55 s = unicode(s, 'utf-8')
56 return not all(len(c.encode('utf-8')) <= 3 for c in s)
57
[end of zds/utils/misc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/utils/misc.py b/zds/utils/misc.py
--- a/zds/utils/misc.py
+++ b/zds/utils/misc.py
@@ -53,4 +53,5 @@
"""
if not isinstance(s, unicode):
s = unicode(s, 'utf-8')
- return not all(len(c.encode('utf-8')) <= 3 for c in s)
+ re_pattern = re.compile(u'[^\u0000-\uD7FF\uE000-\uFFFF]', re.UNICODE)
+ return s != re_pattern.sub(u'\uFFFD', s)
| {"golden_diff": "diff --git a/zds/utils/misc.py b/zds/utils/misc.py\n--- a/zds/utils/misc.py\n+++ b/zds/utils/misc.py\n@@ -53,4 +53,5 @@\n \"\"\"\n if not isinstance(s, unicode):\n s = unicode(s, 'utf-8')\n- return not all(len(c.encode('utf-8')) <= 3 for c in s)\n+ re_pattern = re.compile(u'[^\\u0000-\\uD7FF\\uE000-\\uFFFF]', re.UNICODE)\n+ return s != re_pattern.sub(u'\\uFFFD', s)\n", "issue": "[beta][v20] S'inscrire/se connecter/chercher avec un emoji provoque une 500\nServeur : Beta\nVersion : v20-RC3/d3fd8af\nSyst\u00e8me : Mac OS X\nNavigateur : 52.0.2743.116 (64-bit)\n\n---\n1. Rendez-vous \u00e0 la page d'inscription et renseigner un pseudo du type : \ud83d\udc5a test\n2. Remplissez les autres champs.\n3. Soumettez le formulaire.\n4. Constatez une erreur 500.\n\nNote : Vous pouvez reproduire la m\u00eame erreur en tentant de vous connecter avec le m\u00eame pseudo ou en faisant une recherche sur le pseudo d'un membre.\n\n", "before_files": [{"content": "# coding: utf-8\nimport hashlib\nimport re\n\nTHUMB_MAX_WIDTH = 80\nTHUMB_MAX_HEIGHT = 80\n\nMEDIUM_MAX_WIDTH = 200\nMEDIUM_MAX_HEIGHT = 200\n\n\ndef compute_hash(filenames):\n \"\"\"returns a md5 hexdigest of group of files to check if they have change\"\"\"\n md5_hash = hashlib.md5()\n for filename in filenames:\n if filename:\n file_handle = open(filename, 'rb')\n must_continue = True\n while must_continue:\n read_bytes = file_handle.read(8096)\n if not read_bytes:\n must_continue = False\n else:\n md5_hash.update(read_bytes)\n return md5_hash.hexdigest()\n\n\ndef content_has_changed(filenames, md5):\n return md5 != compute_hash(filenames)\n\n\ndef has_changed(instance, field, manager='objects'):\n \"\"\"Returns true if a field has changed in a model May be used in a\n model.save() method.\"\"\"\n if not instance.pk:\n return True\n manager = getattr(instance.__class__, manager)\n old = getattr(manager.get(pk=instance.pk), field)\n return not getattr(instance, field) == old\n\n\ndef convert_camel_to_underscore(camel_case):\n \"\"\"\n Converts a name in camel case to underscore.\n \"\"\"\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', camel_case)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n\ndef contains_utf8mb4(s):\n \"\"\"\n This string contains at least one character of more than 3 bytes\n \"\"\"\n if not isinstance(s, unicode):\n s = unicode(s, 'utf-8')\n return not all(len(c.encode('utf-8')) <= 3 for c in s)\n", "path": "zds/utils/misc.py"}]} | 1,224 | 135 |
gh_patches_debug_39012 | rasdani/github-patches | git_diff | chainer__chainer-6031 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Orthogonal initializer dimensions error
Orthogonal Initializer throws an error whenever the first dimension is larger than the second. This could be fixed by transposing the array rather than throwing an error.
e.g. 1. ` dense = L.Linear(64,128, initialW=initializers.Orthogonal())`

e.g. 2 `initializers.generate_array(initializers.Orthogonal(), (20,10), numpy, 'f')`

System Info:
Chainer: 5.0.0rc1
NumPy: 1.14.2
CuPy:
CuPy Version : 5.0.0rc1
CUDA Root : /usr/local/cuda
CUDA Build Version : 8000
CUDA Driver Version : 9020
CUDA Runtime Version : 8000
cuDNN Build Version : 7102
cuDNN Version : 7102
NCCL Build Version : 2213
</issue>
<code>
[start of chainer/initializers/orthogonal.py]
1 import numpy
2
3 from chainer import backend
4 from chainer import initializer
5 from chainer import utils
6
7
8 # Original code forked from MIT licensed keras project
9 # https://github.com/fchollet/keras/blob/master/keras/initializations.py
10
11 class Orthogonal(initializer.Initializer):
12 """Initializes array with an orthogonal system.
13
14 This initializer first makes a matrix of the same shape as the
15 array to be initialized whose elements are drawn independently from
16 standard Gaussian distribution.
17 Next, it applies QR decomposition to (the transpose of) the matrix.
18 To make the decomposition (almost surely) unique, we require the diagonal
19 of the triangular matrix R to be non-negative (see e.g. Edelman & Rao,
20 https://web.eecs.umich.edu/~rajnrao/Acta05rmt.pdf).
21 Then, it initializes the array with the (semi-)orthogonal matrix Q.
22 Finally, the array is multiplied by the constant ``scale``.
23
24 If the ``ndim`` of the input array is more than 2, we consider the array
25 to be a matrix by concatenating all axes except the first one.
26
27 The number of vectors consisting of the orthogonal system
28 (i.e. first element of the shape of the array) must be equal to or smaller
29 than the dimension of each vector (i.e. second element of the shape of
30 the array).
31
32 Attributes:
33 scale (float): A constant to be multiplied by.
34 dtype: Data type specifier.
35
36 Reference: Saxe et al., https://arxiv.org/abs/1312.6120
37
38 """
39
40 def __init__(self, scale=1.1, dtype=None):
41 self.scale = scale
42 super(Orthogonal, self).__init__(dtype)
43
44 # TODO(Kenta Oono)
45 # How do we treat overcomplete base-system case?
46 def __call__(self, array):
47 if self.dtype is not None:
48 assert array.dtype == self.dtype
49 xp = backend.get_array_module(array)
50 if not array.shape: # 0-dim case
51 array[...] = self.scale * (2 * numpy.random.randint(2) - 1)
52 elif not array.size:
53 raise ValueError('Array to be initialized must be non-empty.')
54 else:
55 # numpy.prod returns float value when the argument is empty.
56 flat_shape = (len(array), utils.size_of_shape(array.shape[1:]))
57 if flat_shape[0] > flat_shape[1]:
58 raise ValueError('Cannot make orthogonal system because'
59 ' # of vectors ({}) is larger than'
60 ' that of dimensions ({})'.format(
61 flat_shape[0], flat_shape[1]))
62 a = numpy.random.normal(size=flat_shape)
63 # cupy.linalg.qr requires cusolver in CUDA 8+
64 q, r = numpy.linalg.qr(a.T)
65 q *= numpy.copysign(self.scale, numpy.diag(r))
66 array[...] = xp.asarray(q.T.reshape(array.shape))
67
[end of chainer/initializers/orthogonal.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/initializers/orthogonal.py b/chainer/initializers/orthogonal.py
--- a/chainer/initializers/orthogonal.py
+++ b/chainer/initializers/orthogonal.py
@@ -5,6 +5,14 @@
from chainer import utils
+_orthogonal_constraints = { # (assert emb., assert proj.)
+ 'auto': (False, False),
+ 'projection': (False, True),
+ 'embedding': (True, False),
+ 'basis': (True, True),
+}
+
+
# Original code forked from MIT licensed keras project
# https://github.com/fchollet/keras/blob/master/keras/initializations.py
@@ -32,13 +40,24 @@
Attributes:
scale (float): A constant to be multiplied by.
dtype: Data type specifier.
+ mode (str): Assertion on the initialized shape.
+ ``'auto'`` (default), ``'projection'`` (before v7),
+ ``'embedding'``, or ``'basis'``.
Reference: Saxe et al., https://arxiv.org/abs/1312.6120
"""
- def __init__(self, scale=1.1, dtype=None):
+ def __init__(self, scale=1.1, dtype=None, mode='auto'):
self.scale = scale
+ self.mode = mode
+ try:
+ self._checks = _orthogonal_constraints[mode]
+ except KeyError:
+ raise ValueError(
+ 'Invalid mode: {}. Choose from {}.'.format(
+ repr(mode),
+ ', '.join(repr(m) for m in _orthogonal_constraints)))
super(Orthogonal, self).__init__(dtype)
# TODO(Kenta Oono)
@@ -53,14 +72,22 @@
raise ValueError('Array to be initialized must be non-empty.')
else:
# numpy.prod returns float value when the argument is empty.
- flat_shape = (len(array), utils.size_of_shape(array.shape[1:]))
- if flat_shape[0] > flat_shape[1]:
- raise ValueError('Cannot make orthogonal system because'
- ' # of vectors ({}) is larger than'
- ' that of dimensions ({})'.format(
- flat_shape[0], flat_shape[1]))
- a = numpy.random.normal(size=flat_shape)
+ out_dim = len(array)
+ in_dim = utils.size_of_shape(array.shape[1:])
+ if (in_dim > out_dim and self._checks[0]) or (
+ in_dim < out_dim and self._checks[1]):
+ raise ValueError(
+ 'Cannot make orthogonal {}.'
+ 'shape = {}, interpreted as '
+ '{}-dim input and {}-dim output.'.format(
+ self.mode, array.shape, in_dim, out_dim))
+ transpose = in_dim > out_dim
+ a = numpy.random.normal(size=(out_dim, in_dim))
+ if transpose:
+ a = a.T
# cupy.linalg.qr requires cusolver in CUDA 8+
- q, r = numpy.linalg.qr(a.T)
+ q, r = numpy.linalg.qr(a)
q *= numpy.copysign(self.scale, numpy.diag(r))
- array[...] = xp.asarray(q.T.reshape(array.shape))
+ if transpose:
+ q = q.T
+ array[...] = xp.asarray(q.reshape(array.shape))
| {"golden_diff": "diff --git a/chainer/initializers/orthogonal.py b/chainer/initializers/orthogonal.py\n--- a/chainer/initializers/orthogonal.py\n+++ b/chainer/initializers/orthogonal.py\n@@ -5,6 +5,14 @@\n from chainer import utils\n \n \n+_orthogonal_constraints = { # (assert emb., assert proj.)\n+ 'auto': (False, False),\n+ 'projection': (False, True),\n+ 'embedding': (True, False),\n+ 'basis': (True, True),\n+}\n+\n+\n # Original code forked from MIT licensed keras project\n # https://github.com/fchollet/keras/blob/master/keras/initializations.py\n \n@@ -32,13 +40,24 @@\n Attributes:\n scale (float): A constant to be multiplied by.\n dtype: Data type specifier.\n+ mode (str): Assertion on the initialized shape.\n+ ``'auto'`` (default), ``'projection'`` (before v7),\n+ ``'embedding'``, or ``'basis'``.\n \n Reference: Saxe et al., https://arxiv.org/abs/1312.6120\n \n \"\"\"\n \n- def __init__(self, scale=1.1, dtype=None):\n+ def __init__(self, scale=1.1, dtype=None, mode='auto'):\n self.scale = scale\n+ self.mode = mode\n+ try:\n+ self._checks = _orthogonal_constraints[mode]\n+ except KeyError:\n+ raise ValueError(\n+ 'Invalid mode: {}. Choose from {}.'.format(\n+ repr(mode),\n+ ', '.join(repr(m) for m in _orthogonal_constraints)))\n super(Orthogonal, self).__init__(dtype)\n \n # TODO(Kenta Oono)\n@@ -53,14 +72,22 @@\n raise ValueError('Array to be initialized must be non-empty.')\n else:\n # numpy.prod returns float value when the argument is empty.\n- flat_shape = (len(array), utils.size_of_shape(array.shape[1:]))\n- if flat_shape[0] > flat_shape[1]:\n- raise ValueError('Cannot make orthogonal system because'\n- ' # of vectors ({}) is larger than'\n- ' that of dimensions ({})'.format(\n- flat_shape[0], flat_shape[1]))\n- a = numpy.random.normal(size=flat_shape)\n+ out_dim = len(array)\n+ in_dim = utils.size_of_shape(array.shape[1:])\n+ if (in_dim > out_dim and self._checks[0]) or (\n+ in_dim < out_dim and self._checks[1]):\n+ raise ValueError(\n+ 'Cannot make orthogonal {}.'\n+ 'shape = {}, interpreted as '\n+ '{}-dim input and {}-dim output.'.format(\n+ self.mode, array.shape, in_dim, out_dim))\n+ transpose = in_dim > out_dim\n+ a = numpy.random.normal(size=(out_dim, in_dim))\n+ if transpose:\n+ a = a.T\n # cupy.linalg.qr requires cusolver in CUDA 8+\n- q, r = numpy.linalg.qr(a.T)\n+ q, r = numpy.linalg.qr(a)\n q *= numpy.copysign(self.scale, numpy.diag(r))\n- array[...] = xp.asarray(q.T.reshape(array.shape))\n+ if transpose:\n+ q = q.T\n+ array[...] = xp.asarray(q.reshape(array.shape))\n", "issue": "Orthogonal initializer dimensions error\nOrthogonal Initializer throws an error whenever the first dimension is larger than the second. This could be fixed by transposing the array rather than throwing an error.\r\n\r\n\r\ne.g. 1. ` dense = L.Linear(64,128, initialW=initializers.Orthogonal())`\r\n\r\n\r\n\r\n\r\ne.g. 2 `initializers.generate_array(initializers.Orthogonal(), (20,10), numpy, 'f')`\r\n\r\n\r\n\r\n\r\nSystem Info:\r\nChainer: 5.0.0rc1\r\nNumPy: 1.14.2\r\nCuPy:\r\n CuPy Version : 5.0.0rc1\r\n CUDA Root : /usr/local/cuda\r\n CUDA Build Version : 8000\r\n CUDA Driver Version : 9020\r\n CUDA Runtime Version : 8000\r\n cuDNN Build Version : 7102\r\n cuDNN Version : 7102\r\n NCCL Build Version : 2213\r\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import backend\nfrom chainer import initializer\nfrom chainer import utils\n\n\n# Original code forked from MIT licensed keras project\n# https://github.com/fchollet/keras/blob/master/keras/initializations.py\n\nclass Orthogonal(initializer.Initializer):\n \"\"\"Initializes array with an orthogonal system.\n\n This initializer first makes a matrix of the same shape as the\n array to be initialized whose elements are drawn independently from\n standard Gaussian distribution.\n Next, it applies QR decomposition to (the transpose of) the matrix.\n To make the decomposition (almost surely) unique, we require the diagonal\n of the triangular matrix R to be non-negative (see e.g. Edelman & Rao,\n https://web.eecs.umich.edu/~rajnrao/Acta05rmt.pdf).\n Then, it initializes the array with the (semi-)orthogonal matrix Q.\n Finally, the array is multiplied by the constant ``scale``.\n\n If the ``ndim`` of the input array is more than 2, we consider the array\n to be a matrix by concatenating all axes except the first one.\n\n The number of vectors consisting of the orthogonal system\n (i.e. first element of the shape of the array) must be equal to or smaller\n than the dimension of each vector (i.e. second element of the shape of\n the array).\n\n Attributes:\n scale (float): A constant to be multiplied by.\n dtype: Data type specifier.\n\n Reference: Saxe et al., https://arxiv.org/abs/1312.6120\n\n \"\"\"\n\n def __init__(self, scale=1.1, dtype=None):\n self.scale = scale\n super(Orthogonal, self).__init__(dtype)\n\n # TODO(Kenta Oono)\n # How do we treat overcomplete base-system case?\n def __call__(self, array):\n if self.dtype is not None:\n assert array.dtype == self.dtype\n xp = backend.get_array_module(array)\n if not array.shape: # 0-dim case\n array[...] = self.scale * (2 * numpy.random.randint(2) - 1)\n elif not array.size:\n raise ValueError('Array to be initialized must be non-empty.')\n else:\n # numpy.prod returns float value when the argument is empty.\n flat_shape = (len(array), utils.size_of_shape(array.shape[1:]))\n if flat_shape[0] > flat_shape[1]:\n raise ValueError('Cannot make orthogonal system because'\n ' # of vectors ({}) is larger than'\n ' that of dimensions ({})'.format(\n flat_shape[0], flat_shape[1]))\n a = numpy.random.normal(size=flat_shape)\n # cupy.linalg.qr requires cusolver in CUDA 8+\n q, r = numpy.linalg.qr(a.T)\n q *= numpy.copysign(self.scale, numpy.diag(r))\n array[...] = xp.asarray(q.T.reshape(array.shape))\n", "path": "chainer/initializers/orthogonal.py"}]} | 1,683 | 774 |
gh_patches_debug_18049 | rasdani/github-patches | git_diff | PrefectHQ__prefect-9390 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Filter by work pool not filtering the "dot" graph
### First check
- [X] I added a descriptive title to this issue.
- [X] I used the GitHub search to find a similar issue and didn't find it.
- [X] I searched the Prefect documentation for this issue.
- [X] I checked that this issue is related to Prefect and not one of its dependencies.
### Bug summary
When filtering by "work pool" in the flows screen, the "dots graph" is not filtered.
The "main-pool" is a pool I've just created, and there are no runs associated to it.

### Reproduction
```python3
Create a new "work flow".
Make sure it's empty and has no runs in it.
Go to the flows screen, and filter by this work pool.
You'll see that all the dots on the graph remain although they are not related to that work pool.
```
### Error
_No response_
### Versions
```Text
Prefect cloud
```
### Additional context
_No response_
</issue>
<code>
[start of src/prefect/server/api/ui/flow_runs.py]
1 import datetime
2 from typing import List
3 from uuid import UUID
4
5 from fastapi import Body, Depends
6 from pydantic import Field
7
8 import prefect.server.schemas as schemas
9 from prefect.logging import get_logger
10 from prefect.server import models
11 from prefect.server.database.dependencies import provide_database_interface
12 from prefect.server.database.interface import PrefectDBInterface
13 from prefect.server.utilities.schemas import DateTimeTZ, PrefectBaseModel
14 from prefect.server.utilities.server import PrefectRouter
15
16 logger = get_logger("server.api.ui.flow_runs")
17
18 router = PrefectRouter(prefix="/ui/flow_runs", tags=["Flow Runs", "UI"])
19
20
21 class SimpleFlowRun(PrefectBaseModel):
22 id: UUID = Field(default=..., description="The flow run id.")
23 state_type: schemas.states.StateType = Field(
24 default=..., description="The state type."
25 )
26 timestamp: DateTimeTZ = Field(
27 default=...,
28 description=(
29 "The start time of the run, or the expected start time "
30 "if it hasn't run yet."
31 ),
32 )
33 duration: datetime.timedelta = Field(
34 default=..., description="The total run time of the run."
35 )
36 lateness: datetime.timedelta = Field(
37 default=..., description="The delay between the expected and actual start time."
38 )
39
40
41 @router.post("/history")
42 async def read_flow_run_history(
43 sort: schemas.sorting.FlowRunSort = Body(
44 schemas.sorting.FlowRunSort.EXPECTED_START_TIME_DESC
45 ),
46 limit: int = Body(1000, le=1000),
47 offset: int = Body(0, ge=0),
48 flows: schemas.filters.FlowFilter = None,
49 flow_runs: schemas.filters.FlowRunFilter = None,
50 task_runs: schemas.filters.TaskRunFilter = None,
51 deployments: schemas.filters.DeploymentFilter = None,
52 db: PrefectDBInterface = Depends(provide_database_interface),
53 ) -> List[SimpleFlowRun]:
54 columns = [
55 db.FlowRun.id,
56 db.FlowRun.state_type,
57 db.FlowRun.start_time,
58 db.FlowRun.expected_start_time,
59 db.FlowRun.total_run_time,
60 # Although it isn't returned, we need to select
61 # this field in order to compute `estimated_run_time`
62 db.FlowRun.state_timestamp,
63 ]
64 async with db.session_context() as session:
65 result = await models.flow_runs.read_flow_runs(
66 columns=columns,
67 flow_filter=flows,
68 flow_run_filter=flow_runs,
69 task_run_filter=task_runs,
70 deployment_filter=deployments,
71 sort=sort,
72 limit=limit,
73 offset=offset,
74 session=session,
75 )
76 return [
77 SimpleFlowRun(
78 id=r.id,
79 state_type=r.state_type,
80 timestamp=r.start_time or r.expected_start_time,
81 duration=r.estimated_run_time,
82 lateness=r.estimated_start_time_delta,
83 )
84 for r in result
85 ]
86
[end of src/prefect/server/api/ui/flow_runs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/prefect/server/api/ui/flow_runs.py b/src/prefect/server/api/ui/flow_runs.py
--- a/src/prefect/server/api/ui/flow_runs.py
+++ b/src/prefect/server/api/ui/flow_runs.py
@@ -49,6 +49,7 @@
flow_runs: schemas.filters.FlowRunFilter = None,
task_runs: schemas.filters.TaskRunFilter = None,
deployments: schemas.filters.DeploymentFilter = None,
+ work_pools: schemas.filters.WorkPoolFilter = None,
db: PrefectDBInterface = Depends(provide_database_interface),
) -> List[SimpleFlowRun]:
columns = [
@@ -68,6 +69,7 @@
flow_run_filter=flow_runs,
task_run_filter=task_runs,
deployment_filter=deployments,
+ work_pool_filter=work_pools,
sort=sort,
limit=limit,
offset=offset,
| {"golden_diff": "diff --git a/src/prefect/server/api/ui/flow_runs.py b/src/prefect/server/api/ui/flow_runs.py\n--- a/src/prefect/server/api/ui/flow_runs.py\n+++ b/src/prefect/server/api/ui/flow_runs.py\n@@ -49,6 +49,7 @@\n flow_runs: schemas.filters.FlowRunFilter = None,\n task_runs: schemas.filters.TaskRunFilter = None,\n deployments: schemas.filters.DeploymentFilter = None,\n+ work_pools: schemas.filters.WorkPoolFilter = None,\n db: PrefectDBInterface = Depends(provide_database_interface),\n ) -> List[SimpleFlowRun]:\n columns = [\n@@ -68,6 +69,7 @@\n flow_run_filter=flow_runs,\n task_run_filter=task_runs,\n deployment_filter=deployments,\n+ work_pool_filter=work_pools,\n sort=sort,\n limit=limit,\n offset=offset,\n", "issue": "Filter by work pool not filtering the \"dot\" graph\n### First check\n\n- [X] I added a descriptive title to this issue.\n- [X] I used the GitHub search to find a similar issue and didn't find it.\n- [X] I searched the Prefect documentation for this issue.\n- [X] I checked that this issue is related to Prefect and not one of its dependencies.\n\n### Bug summary\n\nWhen filtering by \"work pool\" in the flows screen, the \"dots graph\" is not filtered.\r\nThe \"main-pool\" is a pool I've just created, and there are no runs associated to it.\r\n\r\n\r\n\r\n\n\n### Reproduction\n\n```python3\nCreate a new \"work flow\".\r\nMake sure it's empty and has no runs in it.\r\nGo to the flows screen, and filter by this work pool.\r\nYou'll see that all the dots on the graph remain although they are not related to that work pool.\n```\n\n\n### Error\n\n_No response_\n\n### Versions\n\n```Text\nPrefect cloud\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "import datetime\nfrom typing import List\nfrom uuid import UUID\n\nfrom fastapi import Body, Depends\nfrom pydantic import Field\n\nimport prefect.server.schemas as schemas\nfrom prefect.logging import get_logger\nfrom prefect.server import models\nfrom prefect.server.database.dependencies import provide_database_interface\nfrom prefect.server.database.interface import PrefectDBInterface\nfrom prefect.server.utilities.schemas import DateTimeTZ, PrefectBaseModel\nfrom prefect.server.utilities.server import PrefectRouter\n\nlogger = get_logger(\"server.api.ui.flow_runs\")\n\nrouter = PrefectRouter(prefix=\"/ui/flow_runs\", tags=[\"Flow Runs\", \"UI\"])\n\n\nclass SimpleFlowRun(PrefectBaseModel):\n id: UUID = Field(default=..., description=\"The flow run id.\")\n state_type: schemas.states.StateType = Field(\n default=..., description=\"The state type.\"\n )\n timestamp: DateTimeTZ = Field(\n default=...,\n description=(\n \"The start time of the run, or the expected start time \"\n \"if it hasn't run yet.\"\n ),\n )\n duration: datetime.timedelta = Field(\n default=..., description=\"The total run time of the run.\"\n )\n lateness: datetime.timedelta = Field(\n default=..., description=\"The delay between the expected and actual start time.\"\n )\n\n\[email protected](\"/history\")\nasync def read_flow_run_history(\n sort: schemas.sorting.FlowRunSort = Body(\n schemas.sorting.FlowRunSort.EXPECTED_START_TIME_DESC\n ),\n limit: int = Body(1000, le=1000),\n offset: int = Body(0, ge=0),\n flows: schemas.filters.FlowFilter = None,\n flow_runs: schemas.filters.FlowRunFilter = None,\n task_runs: schemas.filters.TaskRunFilter = None,\n deployments: schemas.filters.DeploymentFilter = None,\n db: PrefectDBInterface = Depends(provide_database_interface),\n) -> List[SimpleFlowRun]:\n columns = [\n db.FlowRun.id,\n db.FlowRun.state_type,\n db.FlowRun.start_time,\n db.FlowRun.expected_start_time,\n db.FlowRun.total_run_time,\n # Although it isn't returned, we need to select\n # this field in order to compute `estimated_run_time`\n db.FlowRun.state_timestamp,\n ]\n async with db.session_context() as session:\n result = await models.flow_runs.read_flow_runs(\n columns=columns,\n flow_filter=flows,\n flow_run_filter=flow_runs,\n task_run_filter=task_runs,\n deployment_filter=deployments,\n sort=sort,\n limit=limit,\n offset=offset,\n session=session,\n )\n return [\n SimpleFlowRun(\n id=r.id,\n state_type=r.state_type,\n timestamp=r.start_time or r.expected_start_time,\n duration=r.estimated_run_time,\n lateness=r.estimated_start_time_delta,\n )\n for r in result\n ]\n", "path": "src/prefect/server/api/ui/flow_runs.py"}]} | 1,652 | 204 |
gh_patches_debug_4840 | rasdani/github-patches | git_diff | vega__altair-1192 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect description of an example
https://altair-viz.github.io/gallery/scatter_linked_brush.html
The title of the page says "Faceted Scatter Plot with Linked Brushing".
But the example is a concatenated view, not a faceted view.
(The data points are shown twice in the visualization, not split by a category.)
</issue>
<code>
[start of altair/vegalite/v2/examples/scatter_linked_brush.py]
1 """
2 Faceted Scatter Plot with Linked Brushing
3 -----------------------------------------
4 This is an example of using an interval selection to control the color of
5 points across multiple facets.
6 """
7 # category: interactive charts
8 import altair as alt
9 from vega_datasets import data
10
11 cars = data.cars()
12
13 brush = alt.selection(type='interval', resolve='global')
14
15 base = alt.Chart(cars).mark_point().encode(
16 y='Miles_per_Gallon',
17 color=alt.condition(brush, 'Origin', alt.ColorValue('gray'))
18 ).add_selection(
19 brush
20 ).properties(
21 width=250,
22 height=250
23 )
24
25 base.encode(x='Horsepower') | base.encode(x='Acceleration')
26
[end of altair/vegalite/v2/examples/scatter_linked_brush.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/altair/vegalite/v2/examples/scatter_linked_brush.py b/altair/vegalite/v2/examples/scatter_linked_brush.py
--- a/altair/vegalite/v2/examples/scatter_linked_brush.py
+++ b/altair/vegalite/v2/examples/scatter_linked_brush.py
@@ -1,8 +1,8 @@
"""
-Faceted Scatter Plot with Linked Brushing
------------------------------------------
+Multi-panel Scatter Plot with Linked Brushing
+---------------------------------------------
This is an example of using an interval selection to control the color of
-points across multiple facets.
+points across multiple panels.
"""
# category: interactive charts
import altair as alt
| {"golden_diff": "diff --git a/altair/vegalite/v2/examples/scatter_linked_brush.py b/altair/vegalite/v2/examples/scatter_linked_brush.py\n--- a/altair/vegalite/v2/examples/scatter_linked_brush.py\n+++ b/altair/vegalite/v2/examples/scatter_linked_brush.py\n@@ -1,8 +1,8 @@\n \"\"\"\n-Faceted Scatter Plot with Linked Brushing\n------------------------------------------\n+Multi-panel Scatter Plot with Linked Brushing\n+---------------------------------------------\n This is an example of using an interval selection to control the color of\n-points across multiple facets.\n+points across multiple panels.\n \"\"\"\n # category: interactive charts\n import altair as alt\n", "issue": "Incorrect description of an example\nhttps://altair-viz.github.io/gallery/scatter_linked_brush.html\r\n\r\nThe title of the page says \"Faceted Scatter Plot with Linked Brushing\".\r\nBut the example is a concatenated view, not a faceted view. \r\n(The data points are shown twice in the visualization, not split by a category.) \n", "before_files": [{"content": "\"\"\"\nFaceted Scatter Plot with Linked Brushing\n-----------------------------------------\nThis is an example of using an interval selection to control the color of\npoints across multiple facets.\n\"\"\"\n# category: interactive charts\nimport altair as alt\nfrom vega_datasets import data\n\ncars = data.cars()\n\nbrush = alt.selection(type='interval', resolve='global')\n\nbase = alt.Chart(cars).mark_point().encode(\n y='Miles_per_Gallon',\n color=alt.condition(brush, 'Origin', alt.ColorValue('gray'))\n).add_selection(\n brush\n).properties(\n width=250,\n height=250\n)\n\nbase.encode(x='Horsepower') | base.encode(x='Acceleration')\n", "path": "altair/vegalite/v2/examples/scatter_linked_brush.py"}]} | 822 | 152 |
gh_patches_debug_39588 | rasdani/github-patches | git_diff | google__turbinia-1002 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve exception handling in FileSystemTimelineJob
Under certain conditions, dfvfs may throw exceptions that are not currently being handked:
```
dfvfs.lib.errors.BackEndError: Unable to open file system with error: pyfsext_volume_open_file_object: unable to open volume. libfsext_superblock_read_data: unsupported read-only compatible features flags: 0xff000003. libfsext_superblock_read_file_io_handle: unable to read superblock at offset: 1024 (0x00000400). libfsext_internal_volume_read_block_groups: unable to read superblock: 0 at offset: 1024 (0x00000400). libfsext_internal_volume_open_read: unable to read block groups. libfsext_volume_open_file_io_handle: unable to read from file IO handle.
```
</issue>
<code>
[start of turbinia/workers/file_system_timeline.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2022 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Task to run dfimagetools FileEntryLister on disk partitions."""
16
17 from __future__ import unicode_literals
18
19 import os
20
21 from turbinia import TurbiniaException
22 from turbinia.workers import TurbiniaTask
23 from turbinia.evidence import EvidenceState as state
24 from turbinia.evidence import BodyFile
25
26 if TurbiniaTask.check_worker_role():
27 try:
28 from dfvfs.helpers import volume_scanner
29 from dfvfs.lib import errors as dfvfs_errors
30 from dfimagetools import file_entry_lister
31 except ImportError as exception:
32 message = 'Could not import libraries: {0!s}'.format(exception)
33 raise TurbiniaException(message)
34
35
36 class FileSystemTimelineTask(TurbiniaTask):
37
38 REQUIRED_STATES = [state.ATTACHED]
39
40 TASK_CONFIG = {'partitions': ['all']}
41
42 def run(self, evidence, result):
43 """Task to execute (dfimagetools) FileEntryLister.
44
45 Args:
46 evidence (Evidence object): The evidence we will process.
47 result (TurbiniaTaskResult): The object to place task results into.
48
49 Returns:
50 TurbiniaTaskResult object.
51 """
52 bodyfile_output = os.path.join(self.output_dir, 'file_system.bodyfile')
53 output_evidence = BodyFile(source_path=bodyfile_output)
54 number_of_entries = 0
55
56 # Set things up for the FileEntryLister client. We will scan all
57 # partitions in the volume.
58 volume_scanner_options = volume_scanner.VolumeScannerOptions()
59 volume_scanner_options.partitions = self.task_config.get('partitions')
60
61 # Create the FileEntryLister client and generate the path specs
62 # for all available partitions.
63 entry_lister = file_entry_lister.FileEntryLister()
64 base_path_specs = entry_lister.GetBasePathSpecs(
65 evidence.device_path, options=volume_scanner_options)
66
67 # Iterate over all file entries and generate the output in bodyfile
68 # format.
69 try:
70 with open(bodyfile_output, 'w') as file_object:
71 for file_entry, path_segments in entry_lister.ListFileEntries(
72 base_path_specs):
73 bodyfile_entries = entry_lister.GetBodyfileEntries(
74 file_entry, path_segments)
75 for bodyfile_entry in bodyfile_entries:
76 file_object.write(bodyfile_entry)
77 file_object.write('\n')
78 number_of_entries += 1
79 output_evidence.number_of_entries = number_of_entries
80 result.add_evidence(output_evidence, evidence.config)
81 status = 'Generated file system timeline containing [{0:d}] entries'.format(
82 number_of_entries)
83 result.close(self, success=True, status=status)
84 except dfvfs_errors.ScannerError as exception:
85 result.log('Error generating bodyfile {0!s}'.format(exception))
86 status = 'Unable to generate bodyfile using provided evidence data.'
87 result.close(self, success=False, status=status)
88 raise TurbiniaException(
89 'Could not process volume: {0!s}'.format(exception))
90
91 return result
92
[end of turbinia/workers/file_system_timeline.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/turbinia/workers/file_system_timeline.py b/turbinia/workers/file_system_timeline.py
--- a/turbinia/workers/file_system_timeline.py
+++ b/turbinia/workers/file_system_timeline.py
@@ -34,6 +34,7 @@
class FileSystemTimelineTask(TurbiniaTask):
+ """Task to generate file system timelines. """
REQUIRED_STATES = [state.ATTACHED]
@@ -61,31 +62,49 @@
# Create the FileEntryLister client and generate the path specs
# for all available partitions.
entry_lister = file_entry_lister.FileEntryLister()
- base_path_specs = entry_lister.GetBasePathSpecs(
- evidence.device_path, options=volume_scanner_options)
+ try:
+ base_path_specs = entry_lister.GetBasePathSpecs(
+ evidence.device_path, options=volume_scanner_options)
+ except dfvfs_errors.ScannerError as exception:
+ status = 'Unable to open evidence: {0!s}'.format(exception)
+ result.close(self, success=False, status=status)
# Iterate over all file entries and generate the output in bodyfile
# format.
try:
- with open(bodyfile_output, 'w') as file_object:
- for file_entry, path_segments in entry_lister.ListFileEntries(
- base_path_specs):
- bodyfile_entries = entry_lister.GetBodyfileEntries(
- file_entry, path_segments)
- for bodyfile_entry in bodyfile_entries:
- file_object.write(bodyfile_entry)
- file_object.write('\n')
- number_of_entries += 1
- output_evidence.number_of_entries = number_of_entries
- result.add_evidence(output_evidence, evidence.config)
- status = 'Generated file system timeline containing [{0:d}] entries'.format(
- number_of_entries)
- result.close(self, success=True, status=status)
- except dfvfs_errors.ScannerError as exception:
- result.log('Error generating bodyfile {0!s}'.format(exception))
- status = 'Unable to generate bodyfile using provided evidence data.'
+ file_entries = None
+ with open(bodyfile_output, 'w', encoding='utf-8') as file_object:
+ file_entries = enumerate(entry_lister.ListFileEntries(base_path_specs))
+ while file_entries:
+ try:
+ _, (file_entry, path_segments) = next(file_entries)
+ bodyfile_entries = entry_lister.GetBodyfileEntries(
+ file_entry, path_segments)
+ for bodyfile_entry in bodyfile_entries:
+ file_object.write(bodyfile_entry)
+ file_object.write('\n')
+ number_of_entries += 1
+ except StopIteration:
+ break
+ except (dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
+ dfvfs_errors.MountPointError,
+ dfvfs_errors.PathSpecError) as exception:
+ status = 'Unable to process file entry: {0!s}'.format(exception)
+ result.log(status)
+
+ if number_of_entries > 0:
+ output_evidence.number_of_entries = number_of_entries
+ result.add_evidence(output_evidence, evidence.config)
+ status = 'Generated file system timeline containing [{0:d}] entries'.format(
+ number_of_entries)
+ result.close(self, success=True, status=status)
+ else:
+ status = 'Unable to process any file entries.'
+ result.close(self, success=False, status=status)
+
+ except IOError as exception:
+ status = 'Unable to create bodyfile local output file: {0!s}'.format(
+ exception)
result.close(self, success=False, status=status)
- raise TurbiniaException(
- 'Could not process volume: {0!s}'.format(exception))
return result
| {"golden_diff": "diff --git a/turbinia/workers/file_system_timeline.py b/turbinia/workers/file_system_timeline.py\n--- a/turbinia/workers/file_system_timeline.py\n+++ b/turbinia/workers/file_system_timeline.py\n@@ -34,6 +34,7 @@\n \n \n class FileSystemTimelineTask(TurbiniaTask):\n+ \"\"\"Task to generate file system timelines. \"\"\"\n \n REQUIRED_STATES = [state.ATTACHED]\n \n@@ -61,31 +62,49 @@\n # Create the FileEntryLister client and generate the path specs\n # for all available partitions.\n entry_lister = file_entry_lister.FileEntryLister()\n- base_path_specs = entry_lister.GetBasePathSpecs(\n- evidence.device_path, options=volume_scanner_options)\n+ try:\n+ base_path_specs = entry_lister.GetBasePathSpecs(\n+ evidence.device_path, options=volume_scanner_options)\n+ except dfvfs_errors.ScannerError as exception:\n+ status = 'Unable to open evidence: {0!s}'.format(exception)\n+ result.close(self, success=False, status=status)\n \n # Iterate over all file entries and generate the output in bodyfile\n # format.\n try:\n- with open(bodyfile_output, 'w') as file_object:\n- for file_entry, path_segments in entry_lister.ListFileEntries(\n- base_path_specs):\n- bodyfile_entries = entry_lister.GetBodyfileEntries(\n- file_entry, path_segments)\n- for bodyfile_entry in bodyfile_entries:\n- file_object.write(bodyfile_entry)\n- file_object.write('\\n')\n- number_of_entries += 1\n- output_evidence.number_of_entries = number_of_entries\n- result.add_evidence(output_evidence, evidence.config)\n- status = 'Generated file system timeline containing [{0:d}] entries'.format(\n- number_of_entries)\n- result.close(self, success=True, status=status)\n- except dfvfs_errors.ScannerError as exception:\n- result.log('Error generating bodyfile {0!s}'.format(exception))\n- status = 'Unable to generate bodyfile using provided evidence data.'\n+ file_entries = None\n+ with open(bodyfile_output, 'w', encoding='utf-8') as file_object:\n+ file_entries = enumerate(entry_lister.ListFileEntries(base_path_specs))\n+ while file_entries:\n+ try:\n+ _, (file_entry, path_segments) = next(file_entries)\n+ bodyfile_entries = entry_lister.GetBodyfileEntries(\n+ file_entry, path_segments)\n+ for bodyfile_entry in bodyfile_entries:\n+ file_object.write(bodyfile_entry)\n+ file_object.write('\\n')\n+ number_of_entries += 1\n+ except StopIteration:\n+ break\n+ except (dfvfs_errors.AccessError, dfvfs_errors.BackEndError,\n+ dfvfs_errors.MountPointError,\n+ dfvfs_errors.PathSpecError) as exception:\n+ status = 'Unable to process file entry: {0!s}'.format(exception)\n+ result.log(status)\n+\n+ if number_of_entries > 0:\n+ output_evidence.number_of_entries = number_of_entries\n+ result.add_evidence(output_evidence, evidence.config)\n+ status = 'Generated file system timeline containing [{0:d}] entries'.format(\n+ number_of_entries)\n+ result.close(self, success=True, status=status)\n+ else:\n+ status = 'Unable to process any file entries.'\n+ result.close(self, success=False, status=status)\n+\n+ except IOError as exception:\n+ status = 'Unable to create bodyfile local output file: {0!s}'.format(\n+ exception)\n result.close(self, success=False, status=status)\n- raise TurbiniaException(\n- 'Could not process volume: {0!s}'.format(exception))\n \n return result\n", "issue": "Improve exception handling in FileSystemTimelineJob\nUnder certain conditions, dfvfs may throw exceptions that are not currently being handked:\r\n\r\n```\r\ndfvfs.lib.errors.BackEndError: Unable to open file system with error: pyfsext_volume_open_file_object: unable to open volume. libfsext_superblock_read_data: unsupported read-only compatible features flags: 0xff000003. libfsext_superblock_read_file_io_handle: unable to read superblock at offset: 1024 (0x00000400). libfsext_internal_volume_read_block_groups: unable to read superblock: 0 at offset: 1024 (0x00000400). libfsext_internal_volume_open_read: unable to read block groups. libfsext_volume_open_file_io_handle: unable to read from file IO handle.\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2022 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task to run dfimagetools FileEntryLister on disk partitions.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\n\nfrom turbinia import TurbiniaException\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.evidence import BodyFile\n\nif TurbiniaTask.check_worker_role():\n try:\n from dfvfs.helpers import volume_scanner\n from dfvfs.lib import errors as dfvfs_errors\n from dfimagetools import file_entry_lister\n except ImportError as exception:\n message = 'Could not import libraries: {0!s}'.format(exception)\n raise TurbiniaException(message)\n\n\nclass FileSystemTimelineTask(TurbiniaTask):\n\n REQUIRED_STATES = [state.ATTACHED]\n\n TASK_CONFIG = {'partitions': ['all']}\n\n def run(self, evidence, result):\n \"\"\"Task to execute (dfimagetools) FileEntryLister.\n\n Args:\n evidence (Evidence object): The evidence we will process.\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n bodyfile_output = os.path.join(self.output_dir, 'file_system.bodyfile')\n output_evidence = BodyFile(source_path=bodyfile_output)\n number_of_entries = 0\n\n # Set things up for the FileEntryLister client. We will scan all\n # partitions in the volume.\n volume_scanner_options = volume_scanner.VolumeScannerOptions()\n volume_scanner_options.partitions = self.task_config.get('partitions')\n\n # Create the FileEntryLister client and generate the path specs\n # for all available partitions.\n entry_lister = file_entry_lister.FileEntryLister()\n base_path_specs = entry_lister.GetBasePathSpecs(\n evidence.device_path, options=volume_scanner_options)\n\n # Iterate over all file entries and generate the output in bodyfile\n # format.\n try:\n with open(bodyfile_output, 'w') as file_object:\n for file_entry, path_segments in entry_lister.ListFileEntries(\n base_path_specs):\n bodyfile_entries = entry_lister.GetBodyfileEntries(\n file_entry, path_segments)\n for bodyfile_entry in bodyfile_entries:\n file_object.write(bodyfile_entry)\n file_object.write('\\n')\n number_of_entries += 1\n output_evidence.number_of_entries = number_of_entries\n result.add_evidence(output_evidence, evidence.config)\n status = 'Generated file system timeline containing [{0:d}] entries'.format(\n number_of_entries)\n result.close(self, success=True, status=status)\n except dfvfs_errors.ScannerError as exception:\n result.log('Error generating bodyfile {0!s}'.format(exception))\n status = 'Unable to generate bodyfile using provided evidence data.'\n result.close(self, success=False, status=status)\n raise TurbiniaException(\n 'Could not process volume: {0!s}'.format(exception))\n\n return result\n", "path": "turbinia/workers/file_system_timeline.py"}]} | 1,715 | 850 |
gh_patches_debug_24183 | rasdani/github-patches | git_diff | scikit-image__scikit-image-6460 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tifffile deprecation warning on test_tifffile.py
## Description
<!--
(Note: for guidance on how to use `scikit-image`, please post instead on https://forum.image.sc/tag/scikit-image)
-->
## Way to reproduce
```python
run pytest on skimage/io/tests/test_tifffile.py
```
Will show deprecation warning issues
```bash
skimage/io/tests/test_tifffile.py ................................... [100%]
=============================== warnings summary ===============================
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float32-shape1]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float32-shape1]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'float32' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float32-shape2]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float32-shape2]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'float32' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-int16-shape1]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-int16-shape1]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'int16' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-int16-shape2]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-int16-shape2]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'int16' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float64-shape1]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float64-shape1]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'float64' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float64-shape2]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float64-shape2]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'float64' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
```
</issue>
<code>
[start of skimage/io/_plugins/tifffile_plugin.py]
1 __all__ = ['imread', 'imsave']
2
3 from tifffile import imwrite as imsave, imread as tifffile_imread
4
5
6 def imread(fname, **kwargs):
7 """Load a tiff image from file.
8
9 Parameters
10 ----------
11 fname : str or file
12 File name or file-like-object.
13 kwargs : keyword pairs, optional
14 Additional keyword arguments to pass through (see ``tifffile``'s
15 ``imread`` function).
16
17 Notes
18 -----
19 Provided by the tifffile library [1]_, and supports many
20 advanced image types including multi-page and floating point.
21
22 References
23 ----------
24 .. [1] https://pypi.org/project/tifffile/
25
26 """
27 if 'img_num' in kwargs:
28 kwargs['key'] = kwargs.pop('img_num')
29
30 return tifffile_imread(fname, **kwargs)
31
[end of skimage/io/_plugins/tifffile_plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py
--- a/skimage/io/_plugins/tifffile_plugin.py
+++ b/skimage/io/_plugins/tifffile_plugin.py
@@ -1,6 +1,50 @@
+from tifffile import imread as tifffile_imread
+from tifffile import imwrite as tifffile_imwrite
+
__all__ = ['imread', 'imsave']
-from tifffile import imwrite as imsave, imread as tifffile_imread
+
+def imsave(fname, arr, **kwargs):
+ """Load a tiff image to file.
+
+ Parameters
+ ----------
+ fname : str or file
+ File name or file-like object.
+ arr : ndarray
+ The array to write.
+ kwargs : keyword pairs, optional
+ Additional keyword arguments to pass through (see ``tifffile``'s
+ ``imwrite`` function).
+
+ Notes
+ -----
+ Provided by the tifffile library [1]_, and supports many
+ advanced image types including multi-page and floating-point.
+
+ This implementation will set ``photometric='RGB'`` when writing if the first
+ or last axis of `arr` has length 3 or 4. To override this, explicitly
+ pass the ``photometric`` kwarg.
+
+ This implementation will set ``planarconfig='SEPARATE'`` when writing if the
+ first axis of arr has length 3 or 4. To override this, explicitly
+ specify the ``planarconfig`` kwarg.
+
+ References
+ ----------
+ .. [1] https://pypi.org/project/tifffile/
+
+ """
+ if arr.shape[0] in [3, 4]:
+ if 'planarconfig' not in kwargs:
+ kwargs['planarconfig'] = 'SEPARATE'
+ rgb = True
+ else:
+ rgb = arr.shape[-1] in [3, 4]
+ if rgb and 'photometric' not in kwargs:
+ kwargs['photometric'] = 'RGB'
+
+ return tifffile_imwrite(fname, arr, **kwargs)
def imread(fname, **kwargs):
| {"golden_diff": "diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py\n--- a/skimage/io/_plugins/tifffile_plugin.py\n+++ b/skimage/io/_plugins/tifffile_plugin.py\n@@ -1,6 +1,50 @@\n+from tifffile import imread as tifffile_imread\n+from tifffile import imwrite as tifffile_imwrite\n+\n __all__ = ['imread', 'imsave']\n \n-from tifffile import imwrite as imsave, imread as tifffile_imread\n+\n+def imsave(fname, arr, **kwargs):\n+ \"\"\"Load a tiff image to file.\n+\n+ Parameters\n+ ----------\n+ fname : str or file\n+ File name or file-like object.\n+ arr : ndarray\n+ The array to write.\n+ kwargs : keyword pairs, optional\n+ Additional keyword arguments to pass through (see ``tifffile``'s\n+ ``imwrite`` function).\n+\n+ Notes\n+ -----\n+ Provided by the tifffile library [1]_, and supports many\n+ advanced image types including multi-page and floating-point.\n+\n+ This implementation will set ``photometric='RGB'`` when writing if the first\n+ or last axis of `arr` has length 3 or 4. To override this, explicitly\n+ pass the ``photometric`` kwarg.\n+\n+ This implementation will set ``planarconfig='SEPARATE'`` when writing if the\n+ first axis of arr has length 3 or 4. To override this, explicitly\n+ specify the ``planarconfig`` kwarg.\n+\n+ References\n+ ----------\n+ .. [1] https://pypi.org/project/tifffile/\n+\n+ \"\"\"\n+ if arr.shape[0] in [3, 4]:\n+ if 'planarconfig' not in kwargs:\n+ kwargs['planarconfig'] = 'SEPARATE'\n+ rgb = True\n+ else:\n+ rgb = arr.shape[-1] in [3, 4]\n+ if rgb and 'photometric' not in kwargs:\n+ kwargs['photometric'] = 'RGB'\n+\n+ return tifffile_imwrite(fname, arr, **kwargs)\n \n \n def imread(fname, **kwargs):\n", "issue": "tifffile deprecation warning on test_tifffile.py\n## Description\r\n\r\n<!--\r\n(Note: for guidance on how to use `scikit-image`, please post instead on https://forum.image.sc/tag/scikit-image)\r\n-->\r\n\r\n## Way to reproduce\r\n```python\r\nrun pytest on skimage/io/tests/test_tifffile.py\r\n```\r\nWill show deprecation warning issues\r\n```bash\r\nskimage/io/tests/test_tifffile.py ................................... [100%]\r\n\r\n=============================== warnings summary ===============================\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float32-shape1]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float32-shape1]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'float32' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float32-shape2]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float32-shape2]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'float32' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-int16-shape1]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-int16-shape1]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'int16' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-int16-shape2]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-int16-shape2]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'int16' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float64-shape1]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float64-shape1]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'float64' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float64-shape2]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float64-shape2]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'float64' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\n-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html\r\n\r\n```\r\n\r\n\n", "before_files": [{"content": "__all__ = ['imread', 'imsave']\n\nfrom tifffile import imwrite as imsave, imread as tifffile_imread\n\n\ndef imread(fname, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n\n Notes\n -----\n Provided by the tifffile library [1]_, and supports many\n advanced image types including multi-page and floating point.\n\n References\n ----------\n .. [1] https://pypi.org/project/tifffile/\n\n \"\"\"\n if 'img_num' in kwargs:\n kwargs['key'] = kwargs.pop('img_num')\n\n return tifffile_imread(fname, **kwargs)\n", "path": "skimage/io/_plugins/tifffile_plugin.py"}]} | 1,934 | 527 |
gh_patches_debug_34329 | rasdani/github-patches | git_diff | wagtail__wagtail-1417 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Documents: This backend doesn't support absolute paths. (Update to 1.0b2)
## 1. Bug since Wagtail update
I recently upgraded to 1.0b2 (from 1.0b1) and now, when I try to access I document I uploaded via a `wagtaildocs.Document` field, I get the following error:
```
NotImplementedError at /documents/3/headphones.svg
This backend doesn't support absolute paths.
```
The field is specified as:
```
svg_mask = models.ForeignKey(
verbose_name=u"Mask (SVG)",
to='wagtaildocs.Document',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
```
Is this a wagtail bug or a configuration error?
## 2. Unexpected behaviour
I would like to serve the svg as a `image/svg+xml`, not as a stream (which was the case in 1.0b1). I set the mimetype in my settings as follows:
```
mimetypes.add_type("image/svg+xml", ".svg", True)
mimetypes.add_type("image/svg+xml", ".svgz", True)
```
Yet (in 1.0b1) it always got served as a stream. I couldn't test it in 1.0b2 yet, because of the above problem.
Thank you in advance.
</issue>
<code>
[start of wagtail/wagtaildocs/views/serve.py]
1 from django.shortcuts import get_object_or_404
2 from django.conf import settings
3
4 from wagtail.utils.sendfile import sendfile
5 from wagtail.utils import sendfile_streaming_backend
6
7 from wagtail.wagtaildocs.models import Document, document_served
8
9
10 def serve(request, document_id, document_filename):
11 doc = get_object_or_404(Document, id=document_id)
12
13 # Send document_served signal
14 document_served.send(sender=Document, instance=doc, request=request)
15
16 if hasattr(settings, 'SENDFILE_BACKEND'):
17 return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename)
18 else:
19 # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND
20 return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)
21
[end of wagtail/wagtaildocs/views/serve.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/wagtaildocs/views/serve.py b/wagtail/wagtaildocs/views/serve.py
--- a/wagtail/wagtaildocs/views/serve.py
+++ b/wagtail/wagtaildocs/views/serve.py
@@ -1,5 +1,9 @@
from django.shortcuts import get_object_or_404
from django.conf import settings
+from django.http import StreamingHttpResponse, BadHeaderError
+
+from unidecode import unidecode
+from wsgiref.util import FileWrapper
from wagtail.utils.sendfile import sendfile
from wagtail.utils import sendfile_streaming_backend
@@ -13,8 +17,40 @@
# Send document_served signal
document_served.send(sender=Document, instance=doc, request=request)
- if hasattr(settings, 'SENDFILE_BACKEND'):
- return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename)
+ try:
+ local_path = doc.file.path
+ except NotImplementedError:
+ local_path = None
+
+ if local_path:
+
+ # Use wagtail.utils.sendfile to serve the file;
+ # this provides support for mimetypes, if-modified-since and django-sendfile backends
+
+ if hasattr(settings, 'SENDFILE_BACKEND'):
+ return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename)
+ else:
+ # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND
+ return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)
+
else:
- # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND
- return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)
+
+ # We are using a storage backend which does not expose filesystem paths
+ # (e.g. storages.backends.s3boto.S3BotoStorage).
+ # Fall back on pre-sendfile behaviour of reading the file content and serving it
+ # as a StreamingHttpResponse
+
+ wrapper = FileWrapper(doc.file)
+ response = StreamingHttpResponse(wrapper, content_type='application/octet-stream')
+
+ try:
+ response['Content-Disposition'] = 'attachment; filename=%s' % doc.filename
+ except BadHeaderError:
+ # Unicode filenames can fail on Django <1.8, Python 2 due to
+ # https://code.djangoproject.com/ticket/20889 - try with an ASCIIfied version of the name
+ response['Content-Disposition'] = 'attachment; filename=%s' % unidecode(doc.filename)
+
+ # FIXME: storage backends are not guaranteed to implement 'size'
+ response['Content-Length'] = doc.file.size
+
+ return response
| {"golden_diff": "diff --git a/wagtail/wagtaildocs/views/serve.py b/wagtail/wagtaildocs/views/serve.py\n--- a/wagtail/wagtaildocs/views/serve.py\n+++ b/wagtail/wagtaildocs/views/serve.py\n@@ -1,5 +1,9 @@\n from django.shortcuts import get_object_or_404\n from django.conf import settings\n+from django.http import StreamingHttpResponse, BadHeaderError\n+\n+from unidecode import unidecode\n+from wsgiref.util import FileWrapper\n \n from wagtail.utils.sendfile import sendfile\n from wagtail.utils import sendfile_streaming_backend\n@@ -13,8 +17,40 @@\n # Send document_served signal\n document_served.send(sender=Document, instance=doc, request=request)\n \n- if hasattr(settings, 'SENDFILE_BACKEND'):\n- return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename)\n+ try:\n+ local_path = doc.file.path\n+ except NotImplementedError:\n+ local_path = None\n+\n+ if local_path:\n+\n+ # Use wagtail.utils.sendfile to serve the file;\n+ # this provides support for mimetypes, if-modified-since and django-sendfile backends\n+\n+ if hasattr(settings, 'SENDFILE_BACKEND'):\n+ return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename)\n+ else:\n+ # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND\n+ return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)\n+\n else:\n- # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND\n- return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)\n+\n+ # We are using a storage backend which does not expose filesystem paths\n+ # (e.g. storages.backends.s3boto.S3BotoStorage).\n+ # Fall back on pre-sendfile behaviour of reading the file content and serving it\n+ # as a StreamingHttpResponse\n+\n+ wrapper = FileWrapper(doc.file)\n+ response = StreamingHttpResponse(wrapper, content_type='application/octet-stream')\n+\n+ try:\n+ response['Content-Disposition'] = 'attachment; filename=%s' % doc.filename\n+ except BadHeaderError:\n+ # Unicode filenames can fail on Django <1.8, Python 2 due to\n+ # https://code.djangoproject.com/ticket/20889 - try with an ASCIIfied version of the name\n+ response['Content-Disposition'] = 'attachment; filename=%s' % unidecode(doc.filename)\n+\n+ # FIXME: storage backends are not guaranteed to implement 'size'\n+ response['Content-Length'] = doc.file.size\n+\n+ return response\n", "issue": "Documents: This backend doesn't support absolute paths. (Update to 1.0b2)\n## 1. Bug since Wagtail update\n\nI recently upgraded to 1.0b2 (from 1.0b1) and now, when I try to access I document I uploaded via a `wagtaildocs.Document` field, I get the following error:\n\n```\nNotImplementedError at /documents/3/headphones.svg\nThis backend doesn't support absolute paths.\n```\n\nThe field is specified as:\n\n```\nsvg_mask = models.ForeignKey(\n verbose_name=u\"Mask (SVG)\",\n to='wagtaildocs.Document',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n)\n```\n\nIs this a wagtail bug or a configuration error?\n## 2. Unexpected behaviour\n\nI would like to serve the svg as a `image/svg+xml`, not as a stream (which was the case in 1.0b1). I set the mimetype in my settings as follows:\n\n```\nmimetypes.add_type(\"image/svg+xml\", \".svg\", True)\nmimetypes.add_type(\"image/svg+xml\", \".svgz\", True)\n```\n\nYet (in 1.0b1) it always got served as a stream. I couldn't test it in 1.0b2 yet, because of the above problem.\n\nThank you in advance.\n\n", "before_files": [{"content": "from django.shortcuts import get_object_or_404\nfrom django.conf import settings\n\nfrom wagtail.utils.sendfile import sendfile\nfrom wagtail.utils import sendfile_streaming_backend\n\nfrom wagtail.wagtaildocs.models import Document, document_served\n\n\ndef serve(request, document_id, document_filename):\n doc = get_object_or_404(Document, id=document_id)\n\n # Send document_served signal\n document_served.send(sender=Document, instance=doc, request=request)\n\n if hasattr(settings, 'SENDFILE_BACKEND'):\n return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename)\n else:\n # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND\n return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)\n", "path": "wagtail/wagtaildocs/views/serve.py"}]} | 1,061 | 638 |
gh_patches_debug_4929 | rasdani/github-patches | git_diff | carpentries__amy-1283 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add search by github handle to API persons endpoint
Would it be possible to add the functionality to search for people using their github handle through the API? i.e. `https://amy.software-carpentry.org/api/v1/persons/?github=fmichonneau`
</issue>
<code>
[start of api/filters.py]
1 from django_filters import rest_framework as filters
2
3 from workshops.filters import AMYFilterSet
4 from workshops.models import Event, Task, Tag, Person, Badge
5
6
7 def filter_tag_by_name(queryset, name, values):
8 tags = Tag.objects.filter(name__in=values)
9 for tag in tags:
10 queryset = queryset.filter(tags=tag)
11 return queryset
12
13
14 class EventFilter(filters.FilterSet):
15 start_after = filters.DateFilter(name='start', lookup_expr='gte')
16 start_before = filters.DateFilter(name='start', lookup_expr='lte')
17 end_after = filters.DateFilter(name='end', lookup_expr='gte')
18 end_before = filters.DateFilter(name='end', lookup_expr='lte')
19 TAG_CHOICES = Tag.objects.all().values_list('name', 'name')
20 tag = filters.MultipleChoiceFilter(
21 choices=TAG_CHOICES, name='tags', method=filter_tag_by_name,
22 )
23
24 class Meta:
25 model = Event
26 fields = (
27 'completed', 'tag',
28 'start', 'start_before', 'start_after',
29 'end', 'end_before', 'end_after',
30 )
31 order_by = ('-slug', 'slug', 'start', '-start', 'end', '-end')
32
33
34 class TaskFilter(filters.FilterSet):
35 role = filters.CharFilter(name='role__name')
36
37 class Meta:
38 model = Task
39 fields = (
40 'role',
41 )
42
43
44 def filter_instructors(queryset, name, value):
45 instructor_badges = Badge.objects.instructor_badges()
46 if value is True:
47 return queryset.filter(badges__in=instructor_badges)
48 elif value is False:
49 return queryset.exclude(badges__in=instructor_badges)
50 else:
51 return queryset
52
53
54 class PersonFilter(filters.FilterSet):
55 is_instructor = filters.BooleanFilter(method=filter_instructors,
56 label='Is instructor?')
57
58 class Meta:
59 model = Person
60 fields = (
61 'badges', 'username', 'personal', 'middle', 'family', 'email',
62 'may_contact', 'publish_profile',
63 )
64 order_by = (
65 'lastname', '-lastname', 'firstname', '-firstname', 'email',
66 '-email',
67 )
68
69 def get_order_by(self, order_value):
70 if order_value == 'firstname':
71 return ['personal', 'middle', 'family']
72 elif order_value == '-firstname':
73 return ['-personal', '-middle', '-family']
74 elif order_value == 'lastname':
75 return ['family', 'middle', 'personal']
76 elif order_value == '-lastname':
77 return ['-family', '-middle', '-personal']
78 return super().get_order_by(order_value)
79
80
81 class InstructorsOverTimeFilter(AMYFilterSet):
82 badges = filters.ModelMultipleChoiceFilter(
83 queryset=Badge.objects.instructor_badges(),
84 label='Badges',
85 lookup_expr='in',
86 )
87
88 class Meta:
89 model = Person
90 fields = [
91 'badges',
92 ]
93
94
95 class WorkshopsOverTimeFilter(AMYFilterSet):
96 tags = filters.ModelMultipleChoiceFilter(
97 queryset=Tag.objects.all(),
98 label='Events with at least one of the following tags:',
99 )
100
101 class Meta:
102 model = Event
103 fields = [
104 'tags',
105 ]
106
107
108 class LearnersOverTimeFilter(AMYFilterSet):
109 tags = filters.ModelMultipleChoiceFilter(
110 queryset=Tag.objects.all(),
111 label='Events with all the following tags:',
112 conjoined=True,
113 )
114
115 class Meta:
116 model = Event
117 fields = [
118 'tags',
119 ]
120
[end of api/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/api/filters.py b/api/filters.py
--- a/api/filters.py
+++ b/api/filters.py
@@ -59,7 +59,7 @@
model = Person
fields = (
'badges', 'username', 'personal', 'middle', 'family', 'email',
- 'may_contact', 'publish_profile',
+ 'may_contact', 'publish_profile', 'github',
)
order_by = (
'lastname', '-lastname', 'firstname', '-firstname', 'email',
| {"golden_diff": "diff --git a/api/filters.py b/api/filters.py\n--- a/api/filters.py\n+++ b/api/filters.py\n@@ -59,7 +59,7 @@\n model = Person\n fields = (\n 'badges', 'username', 'personal', 'middle', 'family', 'email',\n- 'may_contact', 'publish_profile',\n+ 'may_contact', 'publish_profile', 'github',\n )\n order_by = (\n 'lastname', '-lastname', 'firstname', '-firstname', 'email',\n", "issue": "Add search by github handle to API persons endpoint\nWould it be possible to add the functionality to search for people using their github handle through the API? i.e. `https://amy.software-carpentry.org/api/v1/persons/?github=fmichonneau`\n", "before_files": [{"content": "from django_filters import rest_framework as filters\n\nfrom workshops.filters import AMYFilterSet\nfrom workshops.models import Event, Task, Tag, Person, Badge\n\n\ndef filter_tag_by_name(queryset, name, values):\n tags = Tag.objects.filter(name__in=values)\n for tag in tags:\n queryset = queryset.filter(tags=tag)\n return queryset\n\n\nclass EventFilter(filters.FilterSet):\n start_after = filters.DateFilter(name='start', lookup_expr='gte')\n start_before = filters.DateFilter(name='start', lookup_expr='lte')\n end_after = filters.DateFilter(name='end', lookup_expr='gte')\n end_before = filters.DateFilter(name='end', lookup_expr='lte')\n TAG_CHOICES = Tag.objects.all().values_list('name', 'name')\n tag = filters.MultipleChoiceFilter(\n choices=TAG_CHOICES, name='tags', method=filter_tag_by_name,\n )\n\n class Meta:\n model = Event\n fields = (\n 'completed', 'tag',\n 'start', 'start_before', 'start_after',\n 'end', 'end_before', 'end_after',\n )\n order_by = ('-slug', 'slug', 'start', '-start', 'end', '-end')\n\n\nclass TaskFilter(filters.FilterSet):\n role = filters.CharFilter(name='role__name')\n\n class Meta:\n model = Task\n fields = (\n 'role',\n )\n\n\ndef filter_instructors(queryset, name, value):\n instructor_badges = Badge.objects.instructor_badges()\n if value is True:\n return queryset.filter(badges__in=instructor_badges)\n elif value is False:\n return queryset.exclude(badges__in=instructor_badges)\n else:\n return queryset\n\n\nclass PersonFilter(filters.FilterSet):\n is_instructor = filters.BooleanFilter(method=filter_instructors,\n label='Is instructor?')\n\n class Meta:\n model = Person\n fields = (\n 'badges', 'username', 'personal', 'middle', 'family', 'email',\n 'may_contact', 'publish_profile',\n )\n order_by = (\n 'lastname', '-lastname', 'firstname', '-firstname', 'email',\n '-email',\n )\n\n def get_order_by(self, order_value):\n if order_value == 'firstname':\n return ['personal', 'middle', 'family']\n elif order_value == '-firstname':\n return ['-personal', '-middle', '-family']\n elif order_value == 'lastname':\n return ['family', 'middle', 'personal']\n elif order_value == '-lastname':\n return ['-family', '-middle', '-personal']\n return super().get_order_by(order_value)\n\n\nclass InstructorsOverTimeFilter(AMYFilterSet):\n badges = filters.ModelMultipleChoiceFilter(\n queryset=Badge.objects.instructor_badges(),\n label='Badges',\n lookup_expr='in',\n )\n\n class Meta:\n model = Person\n fields = [\n 'badges',\n ]\n\n\nclass WorkshopsOverTimeFilter(AMYFilterSet):\n tags = filters.ModelMultipleChoiceFilter(\n queryset=Tag.objects.all(),\n label='Events with at least one of the following tags:',\n )\n\n class Meta:\n model = Event\n fields = [\n 'tags',\n ]\n\n\nclass LearnersOverTimeFilter(AMYFilterSet):\n tags = filters.ModelMultipleChoiceFilter(\n queryset=Tag.objects.all(),\n label='Events with all the following tags:',\n conjoined=True,\n )\n\n class Meta:\n model = Event\n fields = [\n 'tags',\n ]\n", "path": "api/filters.py"}]} | 1,616 | 116 |
gh_patches_debug_14474 | rasdani/github-patches | git_diff | ocadotechnology__aimmo-512 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make logout button on AI:MMO
</issue>
<code>
[start of players/urls.py]
1 from django.conf.urls import url
2 from django.contrib.auth import views as auth_views
3 from django.contrib.auth.decorators import login_required
4 from django.views.generic import TemplateView
5 from django.views.generic import RedirectView
6
7 from players import views
8
9 urlpatterns = [
10 url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),
11
12 url(r'^accounts/login/$', auth_views.login),
13
14 url(r'^program/(?P<id>[0-9]+)/$', login_required(views.ProgramView.as_view()), name='aimmo/program'),
15 url(r'^program_level/(?P<num>[0-9]+)/$', login_required(views.program_level), name='aimmo/program_level'),
16 url(r'^watch/(?P<id>[0-9]+)/$', login_required(views.watch_game), name='aimmo/watch'),
17 url(r'^watch_level/(?P<num>[0-9]+)/$', login_required(views.watch_level), name='aimmo/watch_level'),
18 url(r'^statistics/$', TemplateView.as_view(template_name='players/statistics.html'), name='aimmo/statistics'),
19 url(r'^game_ide/$', TemplateView.as_view(template_name='players/game_ide.html'), name='aimmo/game_ide'),
20
21 url(r'^api/code/(?P<id>[0-9]+)/$', views.code, name='aimmo/code'),
22 url(r'^api/games/$', views.list_games, name='aimmo/games'),
23 url(r'^api/games/(?P<id>[0-9]+)/$', views.get_game, name='aimmo/game_details'),
24 url(r'^api/games/(?P<id>[0-9]+)/complete/$', views.mark_game_complete, name='aimmo/complete_game'),
25 url(r'^api/games/(?P<game_id>[0-9]+)/current_avatar/$', views.current_avatar_in_game, name='aimmo/current_avatar_in_game'),
26
27 url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls
28 url(r'^games/new/$', views.add_game, name='aimmo/new_game'),
29
30 # TODO: this is a quickfix for redirecting for the Unity resources
31 url(r'^watch/(?P<id>[0-9]+)/(?P<resource>.[0-9A-Za-z/.]+)$',
32 RedirectView.as_view(url='/static/unity/%(resource)s', permanent=False)),
33
34 url(r'^socket.io/socket.io.js',
35 RedirectView.as_view(url='https://cdnjs.cloudflare.com/ajax/libs/socket.io/1.7.4/socket.io.min.js', permanent=False)),
36 ]
37
[end of players/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/players/urls.py b/players/urls.py
--- a/players/urls.py
+++ b/players/urls.py
@@ -10,6 +10,8 @@
url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),
url(r'^accounts/login/$', auth_views.login),
+ url(r'^accounts/logout/$', auth_views.logout, {'next_page' : 'aimmo/logout_success'}, name='aimmo/logout'),
+ url(r'^accounts/logout_success/$', TemplateView.as_view(template_name='registration/success_logout.html'), name='aimmo/logout_success'),
url(r'^program/(?P<id>[0-9]+)/$', login_required(views.ProgramView.as_view()), name='aimmo/program'),
url(r'^program_level/(?P<num>[0-9]+)/$', login_required(views.program_level), name='aimmo/program_level'),
| {"golden_diff": "diff --git a/players/urls.py b/players/urls.py\n--- a/players/urls.py\n+++ b/players/urls.py\n@@ -10,6 +10,8 @@\n url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),\n \n url(r'^accounts/login/$', auth_views.login),\n+ url(r'^accounts/logout/$', auth_views.logout, {'next_page' : 'aimmo/logout_success'}, name='aimmo/logout'),\n+ url(r'^accounts/logout_success/$', TemplateView.as_view(template_name='registration/success_logout.html'), name='aimmo/logout_success'),\n \n url(r'^program/(?P<id>[0-9]+)/$', login_required(views.ProgramView.as_view()), name='aimmo/program'),\n url(r'^program_level/(?P<num>[0-9]+)/$', login_required(views.program_level), name='aimmo/program_level'),\n", "issue": "Make logout button on AI:MMO\n\n", "before_files": [{"content": "from django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import TemplateView\nfrom django.views.generic import RedirectView\n\nfrom players import views\n\nurlpatterns = [\n url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),\n\n url(r'^accounts/login/$', auth_views.login),\n\n url(r'^program/(?P<id>[0-9]+)/$', login_required(views.ProgramView.as_view()), name='aimmo/program'),\n url(r'^program_level/(?P<num>[0-9]+)/$', login_required(views.program_level), name='aimmo/program_level'),\n url(r'^watch/(?P<id>[0-9]+)/$', login_required(views.watch_game), name='aimmo/watch'),\n url(r'^watch_level/(?P<num>[0-9]+)/$', login_required(views.watch_level), name='aimmo/watch_level'),\n url(r'^statistics/$', TemplateView.as_view(template_name='players/statistics.html'), name='aimmo/statistics'),\n url(r'^game_ide/$', TemplateView.as_view(template_name='players/game_ide.html'), name='aimmo/game_ide'),\n\n url(r'^api/code/(?P<id>[0-9]+)/$', views.code, name='aimmo/code'),\n url(r'^api/games/$', views.list_games, name='aimmo/games'),\n url(r'^api/games/(?P<id>[0-9]+)/$', views.get_game, name='aimmo/game_details'),\n url(r'^api/games/(?P<id>[0-9]+)/complete/$', views.mark_game_complete, name='aimmo/complete_game'),\n url(r'^api/games/(?P<game_id>[0-9]+)/current_avatar/$', views.current_avatar_in_game, name='aimmo/current_avatar_in_game'),\n\n url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls\n url(r'^games/new/$', views.add_game, name='aimmo/new_game'),\n\n # TODO: this is a quickfix for redirecting for the Unity resources\n url(r'^watch/(?P<id>[0-9]+)/(?P<resource>.[0-9A-Za-z/.]+)$',\n RedirectView.as_view(url='/static/unity/%(resource)s', permanent=False)),\n\n url(r'^socket.io/socket.io.js',\n RedirectView.as_view(url='https://cdnjs.cloudflare.com/ajax/libs/socket.io/1.7.4/socket.io.min.js', permanent=False)),\n]\n", "path": "players/urls.py"}]} | 1,163 | 198 |
gh_patches_debug_173 | rasdani/github-patches | git_diff | Parsl__parsl-2038 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Parsl v1.1.0 Release Checklist
## Checklist
Please edit the checklist if I've missed any items.
### Documentation updates :
- [x] Update docs to point at 1.1.0 as the latest
- [x] Make sure docs are not broken on readthedocs, since a broken doc build will stick on as stable till next release.
- [x] Update changelog with summary of changes since 0.9.0 [@benclifford to take a crack at this]
- [ ] Update Parsl tutorial repo with a 1.1.0 branch that folds in changes
- [x] Add `Beta` tags to components/features that are not yet stable.
### Testing :
- [ ] All testing should be green on Travis
- [x] Update all configs in `parsl/parsl/configs` to match current best practices
- [x] Update all test configs in `parsl/parsl/test/configs`
- [x] Test notebooks/tutorials and basic tests on a Mac
- [ ] Post news update on the website about release
- [x] Site testing:
- [x] Bridges2(PSC) [YY]
- [ ] ~~Comet (SDSC)~~ Machine is getting replaced by Expanse
- [x] Cori (NERSC) [YY/Yadu]
- [x] Stampede2 (TACC) [Yadu]
- [ ] ~~Frontera (TACC)~~ [Yadu, no access]
- [x] Theta (ALCF) [YY]
- [x] Bluewaters (NCSA) [ZZ]
- [x] Summit (ORNL) [Yadu]
- [ ] ~~CC-IN2P3 (French Grid)~~ [Yadu]
- [x] Midway (RCC, UChicago) [YY]
- [x] Open Science Grid
- [x] AWS
- [x] Kubernetes [ZZ]
- [x] NSCC Singapore [ZZ]
- [ ] Ad-Hoc clusters [YY]
### Release Tagging and pushing to PyPI
I'll make an updated alpha to smoothen installation and site testing.
</issue>
<code>
[start of parsl/version.py]
1 """Set module version.
2
3 <Major>.<Minor>.<maintenance>[alpha/beta/..]
4 Alphas will be numbered like this -> 0.4.0a0
5 """
6 VERSION = '1.1.0a1'
7
[end of parsl/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/version.py b/parsl/version.py
--- a/parsl/version.py
+++ b/parsl/version.py
@@ -3,4 +3,4 @@
<Major>.<Minor>.<maintenance>[alpha/beta/..]
Alphas will be numbered like this -> 0.4.0a0
"""
-VERSION = '1.1.0a1'
+VERSION = '1.1.0'
| {"golden_diff": "diff --git a/parsl/version.py b/parsl/version.py\n--- a/parsl/version.py\n+++ b/parsl/version.py\n@@ -3,4 +3,4 @@\n <Major>.<Minor>.<maintenance>[alpha/beta/..]\n Alphas will be numbered like this -> 0.4.0a0\n \"\"\"\n-VERSION = '1.1.0a1'\n+VERSION = '1.1.0'\n", "issue": "Parsl v1.1.0 Release Checklist\n## Checklist\r\n\r\nPlease edit the checklist if I've missed any items. \r\n\r\n### Documentation updates :\r\n\r\n- [x] Update docs to point at 1.1.0 as the latest\r\n- [x] Make sure docs are not broken on readthedocs, since a broken doc build will stick on as stable till next release.\r\n- [x] Update changelog with summary of changes since 0.9.0 [@benclifford to take a crack at this]\r\n- [ ] Update Parsl tutorial repo with a 1.1.0 branch that folds in changes \r\n- [x] Add `Beta` tags to components/features that are not yet stable.\r\n\r\n\r\n### Testing :\r\n\r\n- [ ] All testing should be green on Travis\r\n- [x] Update all configs in `parsl/parsl/configs` to match current best practices\r\n- [x] Update all test configs in `parsl/parsl/test/configs`\r\n- [x] Test notebooks/tutorials and basic tests on a Mac\r\n- [ ] Post news update on the website about release\r\n\r\n- [x] Site testing:\r\n - [x] Bridges2(PSC) [YY]\r\n - [ ] ~~Comet (SDSC)~~ Machine is getting replaced by Expanse\r\n - [x] Cori (NERSC) [YY/Yadu]\r\n - [x] Stampede2 (TACC) [Yadu]\r\n - [ ] ~~Frontera (TACC)~~ [Yadu, no access]\r\n - [x] Theta (ALCF) [YY]\r\n - [x] Bluewaters (NCSA) [ZZ]\r\n - [x] Summit (ORNL) [Yadu]\r\n - [ ] ~~CC-IN2P3 (French Grid)~~ [Yadu]\r\n - [x] Midway (RCC, UChicago) [YY]\r\n - [x] Open Science Grid\r\n - [x] AWS\r\n - [x] Kubernetes [ZZ]\r\n - [x] NSCC Singapore [ZZ]\r\n - [ ] Ad-Hoc clusters [YY]\r\n\r\n### Release Tagging and pushing to PyPI\r\n\r\nI'll make an updated alpha to smoothen installation and site testing.\r\n\n", "before_files": [{"content": "\"\"\"Set module version.\n\n<Major>.<Minor>.<maintenance>[alpha/beta/..]\nAlphas will be numbered like this -> 0.4.0a0\n\"\"\"\nVERSION = '1.1.0a1'\n", "path": "parsl/version.py"}]} | 1,075 | 98 |
gh_patches_debug_6931 | rasdani/github-patches | git_diff | zulip__zulip-28775 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve integrations documentation in help center
We should:
- Rename /help/bots-and-integrations to "Bots overview" everywhere (sidebar, page title, page URL).
- Add a copy of https://zulip.com/api/integrations-overview as the second page in the Bots & integrations section, titled "Integration overview"
- Cross-link as appropriate, both in related articles and in the content of the pages.
</issue>
<code>
[start of zerver/lib/url_redirects.py]
1 from dataclasses import dataclass
2 from typing import List
3
4
5 @dataclass
6 class URLRedirect:
7 old_url: str
8 new_url: str
9
10
11 API_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
12 # Add URL redirects for REST API documentation here:
13 URLRedirect("/api/delete-stream", "/api/archive-stream"),
14 ]
15
16 POLICY_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
17 # Add URL redirects for policy documentation here:
18 URLRedirect("/privacy/", "/policies/privacy"),
19 URLRedirect("/terms/", "/policies/terms"),
20 ]
21
22 HELP_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
23 # Add URL redirects for help center documentation here:
24 URLRedirect("/help/pm-mention-alert-notifications", "/help/dm-mention-alert-notifications"),
25 URLRedirect("/help/restrict-private-messages", "/help/restrict-direct-messages"),
26 URLRedirect("/help/reading-pms", "/help/reading-dms"),
27 URLRedirect("/help/private-messages", "/help/direct-messages"),
28 URLRedirect("/help/configure-who-can-edit-topics", "/help/restrict-moving-messages"),
29 URLRedirect(
30 "/help/configure-message-editing-and-deletion",
31 "/help/restrict-message-editing-and-deletion",
32 ),
33 URLRedirect("/help/restrict-visibility-of-email-addresses", "/help/configure-email-visibility"),
34 URLRedirect("/help/change-default-view", "/help/configure-default-view"),
35 URLRedirect("/help/recent-topics", "/help/recent-conversations"),
36 URLRedirect(
37 "/help/add-custom-profile-fields",
38 "/help/custom-profile-fields",
39 ),
40 URLRedirect(
41 "/help/enable-enter-to-send",
42 "/help/mastering-the-compose-box#toggle-between-ctrl-enter-and-enter-to-send-a-message",
43 ),
44 URLRedirect(
45 "/help/change-the-default-language-for-your-organization",
46 "/help/configure-organization-language",
47 ),
48 URLRedirect("/help/delete-a-stream", "/help/archive-a-stream"),
49 URLRedirect("/help/change-the-topic-of-a-message", "/help/rename-a-topic"),
50 URLRedirect("/help/configure-missed-message-emails", "/help/email-notifications"),
51 URLRedirect("/help/add-an-alert-word", "/help/dm-mention-alert-notifications#alert-words"),
52 URLRedirect("/help/test-mobile-notifications", "/help/mobile-notifications"),
53 URLRedirect(
54 "/help/troubleshooting-desktop-notifications",
55 "/help/desktop-notifications#troubleshooting-desktop-notifications",
56 ),
57 URLRedirect(
58 "/help/change-notification-sound", "/help/desktop-notifications#change-notification-sound"
59 ),
60 URLRedirect("/help/configure-message-notification-emails", "/help/email-notifications"),
61 URLRedirect("/help/disable-new-login-emails", "/help/email-notifications#new-login-emails"),
62 # The `help/about-streams-and-topics` redirect is particularly important,
63 # because the old URL appears in links from Welcome Bot messages.
64 URLRedirect("/help/about-streams-and-topics", "/help/streams-and-topics"),
65 URLRedirect("/help/community-topic-edits", "/help/restrict-moving-messages"),
66 URLRedirect(
67 "/help/only-allow-admins-to-add-emoji", "/help/custom-emoji#change-who-can-add-custom-emoji"
68 ),
69 URLRedirect(
70 "/help/configure-who-can-add-custom-emoji",
71 "/help/custom-emoji#change-who-can-add-custom-emoji",
72 ),
73 URLRedirect("/help/add-custom-emoji", "/help/custom-emoji"),
74 URLRedirect("/help/night-mode", "/help/dark-theme"),
75 URLRedirect("/help/enable-emoticon-translations", "/help/configure-emoticon-translations"),
76 URLRedirect("/help/web-public-streams", "/help/public-access-option"),
77 URLRedirect("/help/starting-a-new-private-thread", "/help/starting-a-new-direct-message"),
78 URLRedirect("/help/edit-or-delete-a-message", "/help/delete-a-message"),
79 URLRedirect("/help/start-a-new-topic", "/help/starting-a-new-topic"),
80 URLRedirect("/help/configure-default-view", "/help/configure-home-view"),
81 URLRedirect("/help/reading-topics", "/help/reading-conversations"),
82 URLRedirect("/help/finding-a-topic-to-read", "/help/finding-a-conversation-to-read"),
83 URLRedirect("/help/view-and-browse-images", "/help/view-images-and-videos"),
84 ]
85
86 LANDING_PAGE_REDIRECTS = [
87 # Add URL redirects for corporate landing pages here.
88 URLRedirect("/new-user/", "/hello/"),
89 URLRedirect("/developer-community/", "/development-community"),
90 URLRedirect("/for/companies/", "/for/business"),
91 URLRedirect("/for/working-groups-and-communities/", "/for/communities"),
92 ]
93
94 DOCUMENTATION_REDIRECTS = (
95 API_DOCUMENTATION_REDIRECTS + POLICY_DOCUMENTATION_REDIRECTS + HELP_DOCUMENTATION_REDIRECTS
96 )
97
[end of zerver/lib/url_redirects.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/lib/url_redirects.py b/zerver/lib/url_redirects.py
--- a/zerver/lib/url_redirects.py
+++ b/zerver/lib/url_redirects.py
@@ -81,6 +81,7 @@
URLRedirect("/help/reading-topics", "/help/reading-conversations"),
URLRedirect("/help/finding-a-topic-to-read", "/help/finding-a-conversation-to-read"),
URLRedirect("/help/view-and-browse-images", "/help/view-images-and-videos"),
+ URLRedirect("/help/bots-and-integrations", "/help/bots-overview"),
]
LANDING_PAGE_REDIRECTS = [
| {"golden_diff": "diff --git a/zerver/lib/url_redirects.py b/zerver/lib/url_redirects.py\n--- a/zerver/lib/url_redirects.py\n+++ b/zerver/lib/url_redirects.py\n@@ -81,6 +81,7 @@\n URLRedirect(\"/help/reading-topics\", \"/help/reading-conversations\"),\n URLRedirect(\"/help/finding-a-topic-to-read\", \"/help/finding-a-conversation-to-read\"),\n URLRedirect(\"/help/view-and-browse-images\", \"/help/view-images-and-videos\"),\n+ URLRedirect(\"/help/bots-and-integrations\", \"/help/bots-overview\"),\n ]\n \n LANDING_PAGE_REDIRECTS = [\n", "issue": "Improve integrations documentation in help center\nWe should:\r\n- Rename /help/bots-and-integrations to \"Bots overview\" everywhere (sidebar, page title, page URL).\r\n- Add a copy of https://zulip.com/api/integrations-overview as the second page in the Bots & integrations section, titled \"Integration overview\"\r\n- Cross-link as appropriate, both in related articles and in the content of the pages.\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom typing import List\n\n\n@dataclass\nclass URLRedirect:\n old_url: str\n new_url: str\n\n\nAPI_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for REST API documentation here:\n URLRedirect(\"/api/delete-stream\", \"/api/archive-stream\"),\n]\n\nPOLICY_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for policy documentation here:\n URLRedirect(\"/privacy/\", \"/policies/privacy\"),\n URLRedirect(\"/terms/\", \"/policies/terms\"),\n]\n\nHELP_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for help center documentation here:\n URLRedirect(\"/help/pm-mention-alert-notifications\", \"/help/dm-mention-alert-notifications\"),\n URLRedirect(\"/help/restrict-private-messages\", \"/help/restrict-direct-messages\"),\n URLRedirect(\"/help/reading-pms\", \"/help/reading-dms\"),\n URLRedirect(\"/help/private-messages\", \"/help/direct-messages\"),\n URLRedirect(\"/help/configure-who-can-edit-topics\", \"/help/restrict-moving-messages\"),\n URLRedirect(\n \"/help/configure-message-editing-and-deletion\",\n \"/help/restrict-message-editing-and-deletion\",\n ),\n URLRedirect(\"/help/restrict-visibility-of-email-addresses\", \"/help/configure-email-visibility\"),\n URLRedirect(\"/help/change-default-view\", \"/help/configure-default-view\"),\n URLRedirect(\"/help/recent-topics\", \"/help/recent-conversations\"),\n URLRedirect(\n \"/help/add-custom-profile-fields\",\n \"/help/custom-profile-fields\",\n ),\n URLRedirect(\n \"/help/enable-enter-to-send\",\n \"/help/mastering-the-compose-box#toggle-between-ctrl-enter-and-enter-to-send-a-message\",\n ),\n URLRedirect(\n \"/help/change-the-default-language-for-your-organization\",\n \"/help/configure-organization-language\",\n ),\n URLRedirect(\"/help/delete-a-stream\", \"/help/archive-a-stream\"),\n URLRedirect(\"/help/change-the-topic-of-a-message\", \"/help/rename-a-topic\"),\n URLRedirect(\"/help/configure-missed-message-emails\", \"/help/email-notifications\"),\n URLRedirect(\"/help/add-an-alert-word\", \"/help/dm-mention-alert-notifications#alert-words\"),\n URLRedirect(\"/help/test-mobile-notifications\", \"/help/mobile-notifications\"),\n URLRedirect(\n \"/help/troubleshooting-desktop-notifications\",\n \"/help/desktop-notifications#troubleshooting-desktop-notifications\",\n ),\n URLRedirect(\n \"/help/change-notification-sound\", \"/help/desktop-notifications#change-notification-sound\"\n ),\n URLRedirect(\"/help/configure-message-notification-emails\", \"/help/email-notifications\"),\n URLRedirect(\"/help/disable-new-login-emails\", \"/help/email-notifications#new-login-emails\"),\n # The `help/about-streams-and-topics` redirect is particularly important,\n # because the old URL appears in links from Welcome Bot messages.\n URLRedirect(\"/help/about-streams-and-topics\", \"/help/streams-and-topics\"),\n URLRedirect(\"/help/community-topic-edits\", \"/help/restrict-moving-messages\"),\n URLRedirect(\n \"/help/only-allow-admins-to-add-emoji\", \"/help/custom-emoji#change-who-can-add-custom-emoji\"\n ),\n URLRedirect(\n \"/help/configure-who-can-add-custom-emoji\",\n \"/help/custom-emoji#change-who-can-add-custom-emoji\",\n ),\n URLRedirect(\"/help/add-custom-emoji\", \"/help/custom-emoji\"),\n URLRedirect(\"/help/night-mode\", \"/help/dark-theme\"),\n URLRedirect(\"/help/enable-emoticon-translations\", \"/help/configure-emoticon-translations\"),\n URLRedirect(\"/help/web-public-streams\", \"/help/public-access-option\"),\n URLRedirect(\"/help/starting-a-new-private-thread\", \"/help/starting-a-new-direct-message\"),\n URLRedirect(\"/help/edit-or-delete-a-message\", \"/help/delete-a-message\"),\n URLRedirect(\"/help/start-a-new-topic\", \"/help/starting-a-new-topic\"),\n URLRedirect(\"/help/configure-default-view\", \"/help/configure-home-view\"),\n URLRedirect(\"/help/reading-topics\", \"/help/reading-conversations\"),\n URLRedirect(\"/help/finding-a-topic-to-read\", \"/help/finding-a-conversation-to-read\"),\n URLRedirect(\"/help/view-and-browse-images\", \"/help/view-images-and-videos\"),\n]\n\nLANDING_PAGE_REDIRECTS = [\n # Add URL redirects for corporate landing pages here.\n URLRedirect(\"/new-user/\", \"/hello/\"),\n URLRedirect(\"/developer-community/\", \"/development-community\"),\n URLRedirect(\"/for/companies/\", \"/for/business\"),\n URLRedirect(\"/for/working-groups-and-communities/\", \"/for/communities\"),\n]\n\nDOCUMENTATION_REDIRECTS = (\n API_DOCUMENTATION_REDIRECTS + POLICY_DOCUMENTATION_REDIRECTS + HELP_DOCUMENTATION_REDIRECTS\n)\n", "path": "zerver/lib/url_redirects.py"}]} | 1,853 | 140 |
gh_patches_debug_1008 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-4892 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OSError: File /checkpoint/meganu/projects/safety_failures/recovery/model_templates/blender_3B/model.dict-vocab.json does not exist. --bpe-vocab must be pretrained.
**Bug description**
Please enter a clear and concise description of what the bug is.
When I execute:
```sh
$python -m parlai eval_model --task fromfile:parlaiformat\
--fromfile_datapath "${test_set_path}" \
-mf zoo:saferdialogues/model\
-bs 1\
--world-logs $test_set_path.SafeRDialog_parlai.jsonl\
--no-cuda
```
It report:
```sh
16:13:53 | Overriding opt["task"] to fromfile:parlaiformat (previously: internal:safety_failures_with_recovery,internal:bst_sf_modified)
16:13:53 | Overriding opt["no_cuda"] to True (previously: False)
>>>using / style agent path
>>>finally module name: parlai.agents.transformer.generator
16:13:53 | loading dictionary from /home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/data/models/saferdialogues/model.dict
16:13:53 | num words = 8008
Traceback (most recent call last):
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/__main__.py", line 18, in <module>
main()
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/__main__.py", line 14, in main
superscript_main()
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/script.py", line 325, in superscript_main
return SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/script.py", line 108, in _run_from_parser_and_opt
return script.run()
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/scripts/eval_model.py", line 265, in run
return eval_model(self.opt)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/scripts/eval_model.py", line 233, in eval_model
agent = create_agent(opt, requireModelExists=True)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/agents.py", line 468, in create_agent
model = create_agent_from_opt_file(opt)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/agents.py", line 421, in create_agent_from_opt_file
return model_class(opt_from_file)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_generator_agent.py", line 462, in __init__
super().__init__(opt, shared)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_agent.py", line 783, in __init__
self.dict = self.build_dictionary()
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_agent.py", line 862, in build_dictionary
d = self.dictionary_class()(self.opt)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/dict.py", line 322, in __init__
self.bpe = bpe_factory(opt, shared)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/utils/bpe.py", line 68, in bpe_factory
bpe_helper = HuggingFaceBpeHelper(opt, shared)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/utils/bpe.py", line 841, in __init__
raise IOError(
OSError: File /checkpoint/meganu/projects/safety_failures/recovery/model_templates/blender_3B/model.dict-vocab.json does not exist. --bpe-vocab must be pretrained.
```
and the parlai version is: `1.6.0`.
**Reproduction steps**
Enter steps to reproduce the behavior.
**Expected behavior**
Give a clear and concise description of what you expected to happen.
**Logs**
Please paste the command line output:
```
Output goes here
```
**Additional context**
Add any other context about the problem here. (like proxy settings, network setup, overall goals, etc.)
</issue>
<code>
[start of parlai/zoo/saferdialogues/build.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 """
8 Blender 2.7B model fine-tuned on the SaFeRDialogues and BST (without persona) tasks.
9 """
10
11 from parlai.core.build_data import download_models
12
13
14 def download(datapath):
15 opt = {'datapath': datapath}
16 version = 'v0.1'
17 fnames = [f'models_{version}.tar.gz']
18 download_models(
19 opt,
20 fnames,
21 model_folder='saferdialogues',
22 version=version,
23 use_model_type=False,
24 )
25
[end of parlai/zoo/saferdialogues/build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parlai/zoo/saferdialogues/build.py b/parlai/zoo/saferdialogues/build.py
--- a/parlai/zoo/saferdialogues/build.py
+++ b/parlai/zoo/saferdialogues/build.py
@@ -13,7 +13,7 @@
def download(datapath):
opt = {'datapath': datapath}
- version = 'v0.1'
+ version = 'v0.2'
fnames = [f'models_{version}.tar.gz']
download_models(
opt,
| {"golden_diff": "diff --git a/parlai/zoo/saferdialogues/build.py b/parlai/zoo/saferdialogues/build.py\n--- a/parlai/zoo/saferdialogues/build.py\n+++ b/parlai/zoo/saferdialogues/build.py\n@@ -13,7 +13,7 @@\n \n def download(datapath):\n opt = {'datapath': datapath}\n- version = 'v0.1'\n+ version = 'v0.2'\n fnames = [f'models_{version}.tar.gz']\n download_models(\n opt,\n", "issue": "OSError: File /checkpoint/meganu/projects/safety_failures/recovery/model_templates/blender_3B/model.dict-vocab.json does not exist. --bpe-vocab must be pretrained.\n**Bug description**\r\n\r\nPlease enter a clear and concise description of what the bug is.\r\n\r\nWhen I execute:\r\n```sh\r\n$python -m parlai eval_model --task fromfile:parlaiformat\\\r\n --fromfile_datapath \"${test_set_path}\" \\\r\n -mf zoo:saferdialogues/model\\\r\n -bs 1\\\r\n --world-logs $test_set_path.SafeRDialog_parlai.jsonl\\\r\n --no-cuda\r\n```\r\nIt report:\r\n\r\n```sh\r\n16:13:53 | Overriding opt[\"task\"] to fromfile:parlaiformat (previously: internal:safety_failures_with_recovery,internal:bst_sf_modified)\r\n16:13:53 | Overriding opt[\"no_cuda\"] to True (previously: False)\r\n>>>using / style agent path\r\n>>>finally module name: parlai.agents.transformer.generator\r\n16:13:53 | loading dictionary from /home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/data/models/saferdialogues/model.dict\r\n16:13:53 | num words = 8008\r\nTraceback (most recent call last):\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/runpy.py\", line 194, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/__main__.py\", line 18, in <module>\r\n main()\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/__main__.py\", line 14, in main\r\n superscript_main()\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/script.py\", line 325, in superscript_main\r\n return SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/script.py\", line 108, in _run_from_parser_and_opt\r\n return script.run()\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/scripts/eval_model.py\", line 265, in run\r\n return eval_model(self.opt)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/scripts/eval_model.py\", line 233, in eval_model\r\n agent = create_agent(opt, requireModelExists=True)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/agents.py\", line 468, in create_agent\r\n model = create_agent_from_opt_file(opt)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/agents.py\", line 421, in create_agent_from_opt_file\r\n return model_class(opt_from_file)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_generator_agent.py\", line 462, in __init__\r\n super().__init__(opt, shared)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_agent.py\", line 783, in __init__\r\n self.dict = self.build_dictionary()\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_agent.py\", line 862, in build_dictionary\r\n d = self.dictionary_class()(self.opt)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/dict.py\", line 322, in __init__\r\n self.bpe = bpe_factory(opt, shared)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/utils/bpe.py\", line 68, in bpe_factory\r\n bpe_helper = HuggingFaceBpeHelper(opt, shared)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/utils/bpe.py\", line 841, in __init__\r\n raise IOError(\r\nOSError: File /checkpoint/meganu/projects/safety_failures/recovery/model_templates/blender_3B/model.dict-vocab.json does not exist. --bpe-vocab must be pretrained.\r\n```\r\n\r\nand the parlai version is: `1.6.0`.\r\n\r\n**Reproduction steps**\r\nEnter steps to reproduce the behavior.\r\n\r\n**Expected behavior**\r\nGive a clear and concise description of what you expected to happen.\r\n\r\n**Logs**\r\nPlease paste the command line output:\r\n\r\n```\r\nOutput goes here\r\n```\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. (like proxy settings, network setup, overall goals, etc.)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nBlender 2.7B model fine-tuned on the SaFeRDialogues and BST (without persona) tasks.\n\"\"\"\n\nfrom parlai.core.build_data import download_models\n\n\ndef download(datapath):\n opt = {'datapath': datapath}\n version = 'v0.1'\n fnames = [f'models_{version}.tar.gz']\n download_models(\n opt,\n fnames,\n model_folder='saferdialogues',\n version=version,\n use_model_type=False,\n )\n", "path": "parlai/zoo/saferdialogues/build.py"}]} | 2,033 | 136 |
gh_patches_debug_17633 | rasdani/github-patches | git_diff | nextcloud__appstore-523 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
App Generator creates faulty info.xml when using umlauts (special characters äöü)
Problem: nextcloud/apps/foo/appinfo/info.xml misses the closing **</info>** tag. Just generates **</**
## Details
* Browser and browser version: Firefox 55.0.3
## Steps to reproduce
Steps to reproduce:
- visit https://apps.nextcloud.com/developer/apps/generate
- enter the required information; in summary and description enter "This is a test app äöü"
- click generate and download.
- look at the generated info.xml
App Generator creates faulty info.xml when using umlauts (special characters äöü)
Problem: nextcloud/apps/foo/appinfo/info.xml misses the closing **</info>** tag. Just generates **</**
## Details
* Browser and browser version: Firefox 55.0.3
## Steps to reproduce
Steps to reproduce:
- visit https://apps.nextcloud.com/developer/apps/generate
- enter the required information; in summary and description enter "This is a test app äöü"
- click generate and download.
- look at the generated info.xml
</issue>
<code>
[start of nextcloudappstore/scaffolding/archive.py]
1 import re
2 import tarfile
3 from io import BytesIO, StringIO
4 from typing import Dict
5 from os.path import join, isdir, relpath
6 from os import walk
7
8 from django.template import Context
9 from django.template import Template
10
11 from nextcloudappstore.core.facades import resolve_file_relative_path
12 from nextcloudappstore.settings.base import APP_SCAFFOLDING_PROFILES
13
14
15 def build_files(args: Dict[str, str]) -> Dict[str, str]:
16 platform = int(args['platform']) # prevent path traversal
17 vars = {
18 'id': args['name'].lower(),
19 'summary': args['summary'],
20 'description': args['description'],
21 'name': ' '.join(re.findall(r'[A-Z][^A-Z]*', args['name'])),
22 'namespace': args['name'],
23 'author_name': args['author_name'],
24 'author_mail': args['author_email'],
25 'author_homepage': args['author_homepage'],
26 'issue_tracker': args['issue_tracker'],
27 'categories': args['categories'],
28 'nextcloud_version': platform
29 }
30 vars.update(APP_SCAFFOLDING_PROFILES.get(platform, {}))
31 relative_base = 'app-templates/%i/app/' % platform
32 base = resolve_file_relative_path(__file__, relative_base)
33
34 context = Context({'app': vars})
35 result = {}
36 if isdir(base):
37 for root, dirs, files in walk(base):
38 for file in files:
39 file_path = join(root, file)
40 rel_file_path = '%s/%s' % (
41 vars['id'], relpath(file_path, base)
42 )
43 with open(file_path) as f:
44 t = Template(f.read())
45 result[rel_file_path] = t.render(context)
46
47 return result
48
49
50 def build_archive(parameters: Dict[str, str]) -> BytesIO:
51 buffer = BytesIO()
52 with tarfile.open(fileobj=buffer, mode='w:gz') as f:
53 files = build_files(parameters)
54 for path, contents in files.items():
55 info = tarfile.TarInfo(path)
56 info.size = len(contents)
57 f.addfile(info, BytesIO(contents.encode()))
58 buffer.seek(0)
59 return buffer
60
[end of nextcloudappstore/scaffolding/archive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nextcloudappstore/scaffolding/archive.py b/nextcloudappstore/scaffolding/archive.py
--- a/nextcloudappstore/scaffolding/archive.py
+++ b/nextcloudappstore/scaffolding/archive.py
@@ -1,9 +1,9 @@
import re
import tarfile
-from io import BytesIO, StringIO
-from typing import Dict
-from os.path import join, isdir, relpath
+from io import BytesIO
from os import walk
+from os.path import join, isdir, relpath
+from typing import Dict
from django.template import Context
from django.template import Template
@@ -53,7 +53,8 @@
files = build_files(parameters)
for path, contents in files.items():
info = tarfile.TarInfo(path)
- info.size = len(contents)
- f.addfile(info, BytesIO(contents.encode()))
+ encoded_content = contents.encode()
+ info.size = len(encoded_content)
+ f.addfile(info, BytesIO(encoded_content))
buffer.seek(0)
return buffer
| {"golden_diff": "diff --git a/nextcloudappstore/scaffolding/archive.py b/nextcloudappstore/scaffolding/archive.py\n--- a/nextcloudappstore/scaffolding/archive.py\n+++ b/nextcloudappstore/scaffolding/archive.py\n@@ -1,9 +1,9 @@\n import re\n import tarfile\n-from io import BytesIO, StringIO\n-from typing import Dict\n-from os.path import join, isdir, relpath\n+from io import BytesIO\n from os import walk\n+from os.path import join, isdir, relpath\n+from typing import Dict\n \n from django.template import Context\n from django.template import Template\n@@ -53,7 +53,8 @@\n files = build_files(parameters)\n for path, contents in files.items():\n info = tarfile.TarInfo(path)\n- info.size = len(contents)\n- f.addfile(info, BytesIO(contents.encode()))\n+ encoded_content = contents.encode()\n+ info.size = len(encoded_content)\n+ f.addfile(info, BytesIO(encoded_content))\n buffer.seek(0)\n return buffer\n", "issue": "App Generator creates faulty info.xml when using umlauts (special characters \u00e4\u00f6\u00fc)\nProblem: nextcloud/apps/foo/appinfo/info.xml misses the closing **</info>** tag. Just generates **</**\r\n\r\n## Details\r\n\r\n* Browser and browser version: Firefox 55.0.3\r\n\r\n## Steps to reproduce\r\n\r\nSteps to reproduce:\r\n\r\n- visit https://apps.nextcloud.com/developer/apps/generate\r\n- enter the required information; in summary and description enter \"This is a test app \u00e4\u00f6\u00fc\"\r\n- click generate and download.\r\n- look at the generated info.xml\r\n\r\n\nApp Generator creates faulty info.xml when using umlauts (special characters \u00e4\u00f6\u00fc)\nProblem: nextcloud/apps/foo/appinfo/info.xml misses the closing **</info>** tag. Just generates **</**\r\n\r\n## Details\r\n\r\n* Browser and browser version: Firefox 55.0.3\r\n\r\n## Steps to reproduce\r\n\r\nSteps to reproduce:\r\n\r\n- visit https://apps.nextcloud.com/developer/apps/generate\r\n- enter the required information; in summary and description enter \"This is a test app \u00e4\u00f6\u00fc\"\r\n- click generate and download.\r\n- look at the generated info.xml\r\n\r\n\n", "before_files": [{"content": "import re\nimport tarfile\nfrom io import BytesIO, StringIO\nfrom typing import Dict\nfrom os.path import join, isdir, relpath\nfrom os import walk\n\nfrom django.template import Context\nfrom django.template import Template\n\nfrom nextcloudappstore.core.facades import resolve_file_relative_path\nfrom nextcloudappstore.settings.base import APP_SCAFFOLDING_PROFILES\n\n\ndef build_files(args: Dict[str, str]) -> Dict[str, str]:\n platform = int(args['platform']) # prevent path traversal\n vars = {\n 'id': args['name'].lower(),\n 'summary': args['summary'],\n 'description': args['description'],\n 'name': ' '.join(re.findall(r'[A-Z][^A-Z]*', args['name'])),\n 'namespace': args['name'],\n 'author_name': args['author_name'],\n 'author_mail': args['author_email'],\n 'author_homepage': args['author_homepage'],\n 'issue_tracker': args['issue_tracker'],\n 'categories': args['categories'],\n 'nextcloud_version': platform\n }\n vars.update(APP_SCAFFOLDING_PROFILES.get(platform, {}))\n relative_base = 'app-templates/%i/app/' % platform\n base = resolve_file_relative_path(__file__, relative_base)\n\n context = Context({'app': vars})\n result = {}\n if isdir(base):\n for root, dirs, files in walk(base):\n for file in files:\n file_path = join(root, file)\n rel_file_path = '%s/%s' % (\n vars['id'], relpath(file_path, base)\n )\n with open(file_path) as f:\n t = Template(f.read())\n result[rel_file_path] = t.render(context)\n\n return result\n\n\ndef build_archive(parameters: Dict[str, str]) -> BytesIO:\n buffer = BytesIO()\n with tarfile.open(fileobj=buffer, mode='w:gz') as f:\n files = build_files(parameters)\n for path, contents in files.items():\n info = tarfile.TarInfo(path)\n info.size = len(contents)\n f.addfile(info, BytesIO(contents.encode()))\n buffer.seek(0)\n return buffer\n", "path": "nextcloudappstore/scaffolding/archive.py"}]} | 1,380 | 235 |
gh_patches_debug_41254 | rasdani/github-patches | git_diff | Pylons__pyramid-3029 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
remove request.exception if the excview tween fails to handle the exception
Pyramid 1.9 makes `request.exception` and `request.exc_info` a little more important as I've moved the pyramid_tm tween over the excview and in general would advocate to move most tweens over the excview. With that in mind it's currently not possible to test `request.exception` to see if the response was rendered in relation to that exception - the excview tween sets the exception even if it failed to squash it (attempted to render an excview and couldn't find one). Ideally the exception would be related to the response that was generated when it was squashed. This would be more explicit if we used `response.exception` to indicate the response is from a squashed exception but I think that's a larger change.
I'm proposing to remove `request.exception` and `request.exc_info` in the excview tween if it reraises the original exception. This makes introspection `request.exception` more reliable by upstream tweens that want to know what the squashed exception was... Of course any raised exception should be more interesting than the original `request.exception` but if the tween receives a response then they can see if it is a response generated by a squashed exception or if it is a "normal" response.
</issue>
<code>
[start of pyramid/tweens.py]
1 import sys
2
3 from pyramid.compat import reraise
4 from pyramid.exceptions import PredicateMismatch
5 from pyramid.interfaces import (
6 IExceptionViewClassifier,
7 IRequest,
8 )
9
10 from zope.interface import providedBy
11 from pyramid.view import _call_view
12
13 def excview_tween_factory(handler, registry):
14 """ A :term:`tween` factory which produces a tween that catches an
15 exception raised by downstream tweens (or the main Pyramid request
16 handler) and, if possible, converts it into a Response using an
17 :term:`exception view`."""
18
19 def excview_tween(request):
20 attrs = request.__dict__
21 try:
22 response = handler(request)
23 except Exception as exc:
24 # WARNING: do not assign the result of sys.exc_info() to a local
25 # var here, doing so will cause a leak. We used to actually
26 # explicitly delete both "exception" and "exc_info" from ``attrs``
27 # in a ``finally:`` clause below, but now we do not because these
28 # attributes are useful to upstream tweens. This actually still
29 # apparently causes a reference cycle, but it is broken
30 # successfully by the garbage collector (see
31 # https://github.com/Pylons/pyramid/issues/1223).
32 attrs['exc_info'] = sys.exc_info()
33 attrs['exception'] = exc
34 # clear old generated request.response, if any; it may
35 # have been mutated by the view, and its state is not
36 # sane (e.g. caching headers)
37 if 'response' in attrs:
38 del attrs['response']
39 # we use .get instead of .__getitem__ below due to
40 # https://github.com/Pylons/pyramid/issues/700
41 request_iface = attrs.get('request_iface', IRequest)
42 provides = providedBy(exc)
43 try:
44 response = _call_view(
45 registry,
46 request,
47 exc,
48 provides,
49 '',
50 view_classifier=IExceptionViewClassifier,
51 request_iface=request_iface.combined
52 )
53
54 # if views matched but did not pass predicates, squash the error
55 # and re-raise the original exception
56 except PredicateMismatch:
57 response = None
58
59 # re-raise the original exception as no exception views were
60 # able to handle the error
61 if response is None:
62 reraise(*attrs['exc_info'])
63
64 return response
65
66 return excview_tween
67
68 MAIN = 'MAIN'
69 INGRESS = 'INGRESS'
70 EXCVIEW = 'pyramid.tweens.excview_tween_factory'
71
[end of pyramid/tweens.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyramid/tweens.py b/pyramid/tweens.py
--- a/pyramid/tweens.py
+++ b/pyramid/tweens.py
@@ -10,6 +10,50 @@
from zope.interface import providedBy
from pyramid.view import _call_view
+def _error_handler(request, exc):
+ # NOTE: we do not need to delete exc_info because this function
+ # should never be in the call stack of the exception
+ exc_info = sys.exc_info()
+
+ attrs = request.__dict__
+ attrs['exc_info'] = exc_info
+ attrs['exception'] = exc
+ # clear old generated request.response, if any; it may
+ # have been mutated by the view, and its state is not
+ # sane (e.g. caching headers)
+ if 'response' in attrs:
+ del attrs['response']
+ # we use .get instead of .__getitem__ below due to
+ # https://github.com/Pylons/pyramid/issues/700
+ request_iface = attrs.get('request_iface', IRequest)
+ provides = providedBy(exc)
+ try:
+ response = _call_view(
+ request.registry,
+ request,
+ exc,
+ provides,
+ '',
+ view_classifier=IExceptionViewClassifier,
+ request_iface=request_iface.combined
+ )
+
+ # if views matched but did not pass predicates then treat the
+ # same as not finding any matching views
+ except PredicateMismatch:
+ response = None
+
+ # re-raise the original exception as no exception views were
+ # able to handle the error
+ if response is None:
+ if 'exception' in attrs:
+ del attrs['exception']
+ if 'exc_info' in attrs:
+ del attrs['exc_info']
+ reraise(*exc_info)
+
+ return response
+
def excview_tween_factory(handler, registry):
""" A :term:`tween` factory which produces a tween that catches an
exception raised by downstream tweens (or the main Pyramid request
@@ -17,50 +61,10 @@
:term:`exception view`."""
def excview_tween(request):
- attrs = request.__dict__
try:
response = handler(request)
except Exception as exc:
- # WARNING: do not assign the result of sys.exc_info() to a local
- # var here, doing so will cause a leak. We used to actually
- # explicitly delete both "exception" and "exc_info" from ``attrs``
- # in a ``finally:`` clause below, but now we do not because these
- # attributes are useful to upstream tweens. This actually still
- # apparently causes a reference cycle, but it is broken
- # successfully by the garbage collector (see
- # https://github.com/Pylons/pyramid/issues/1223).
- attrs['exc_info'] = sys.exc_info()
- attrs['exception'] = exc
- # clear old generated request.response, if any; it may
- # have been mutated by the view, and its state is not
- # sane (e.g. caching headers)
- if 'response' in attrs:
- del attrs['response']
- # we use .get instead of .__getitem__ below due to
- # https://github.com/Pylons/pyramid/issues/700
- request_iface = attrs.get('request_iface', IRequest)
- provides = providedBy(exc)
- try:
- response = _call_view(
- registry,
- request,
- exc,
- provides,
- '',
- view_classifier=IExceptionViewClassifier,
- request_iface=request_iface.combined
- )
-
- # if views matched but did not pass predicates, squash the error
- # and re-raise the original exception
- except PredicateMismatch:
- response = None
-
- # re-raise the original exception as no exception views were
- # able to handle the error
- if response is None:
- reraise(*attrs['exc_info'])
-
+ response = _error_handler(request, exc)
return response
return excview_tween
| {"golden_diff": "diff --git a/pyramid/tweens.py b/pyramid/tweens.py\n--- a/pyramid/tweens.py\n+++ b/pyramid/tweens.py\n@@ -10,6 +10,50 @@\n from zope.interface import providedBy\n from pyramid.view import _call_view\n \n+def _error_handler(request, exc):\n+ # NOTE: we do not need to delete exc_info because this function\n+ # should never be in the call stack of the exception\n+ exc_info = sys.exc_info()\n+\n+ attrs = request.__dict__\n+ attrs['exc_info'] = exc_info\n+ attrs['exception'] = exc\n+ # clear old generated request.response, if any; it may\n+ # have been mutated by the view, and its state is not\n+ # sane (e.g. caching headers)\n+ if 'response' in attrs:\n+ del attrs['response']\n+ # we use .get instead of .__getitem__ below due to\n+ # https://github.com/Pylons/pyramid/issues/700\n+ request_iface = attrs.get('request_iface', IRequest)\n+ provides = providedBy(exc)\n+ try:\n+ response = _call_view(\n+ request.registry,\n+ request,\n+ exc,\n+ provides,\n+ '',\n+ view_classifier=IExceptionViewClassifier,\n+ request_iface=request_iface.combined\n+ )\n+\n+ # if views matched but did not pass predicates then treat the\n+ # same as not finding any matching views\n+ except PredicateMismatch:\n+ response = None\n+\n+ # re-raise the original exception as no exception views were\n+ # able to handle the error\n+ if response is None:\n+ if 'exception' in attrs:\n+ del attrs['exception']\n+ if 'exc_info' in attrs:\n+ del attrs['exc_info']\n+ reraise(*exc_info)\n+\n+ return response\n+\n def excview_tween_factory(handler, registry):\n \"\"\" A :term:`tween` factory which produces a tween that catches an\n exception raised by downstream tweens (or the main Pyramid request\n@@ -17,50 +61,10 @@\n :term:`exception view`.\"\"\"\n \n def excview_tween(request):\n- attrs = request.__dict__\n try:\n response = handler(request)\n except Exception as exc:\n- # WARNING: do not assign the result of sys.exc_info() to a local\n- # var here, doing so will cause a leak. We used to actually\n- # explicitly delete both \"exception\" and \"exc_info\" from ``attrs``\n- # in a ``finally:`` clause below, but now we do not because these\n- # attributes are useful to upstream tweens. This actually still\n- # apparently causes a reference cycle, but it is broken\n- # successfully by the garbage collector (see\n- # https://github.com/Pylons/pyramid/issues/1223).\n- attrs['exc_info'] = sys.exc_info()\n- attrs['exception'] = exc\n- # clear old generated request.response, if any; it may\n- # have been mutated by the view, and its state is not\n- # sane (e.g. caching headers)\n- if 'response' in attrs:\n- del attrs['response']\n- # we use .get instead of .__getitem__ below due to\n- # https://github.com/Pylons/pyramid/issues/700\n- request_iface = attrs.get('request_iface', IRequest)\n- provides = providedBy(exc)\n- try:\n- response = _call_view(\n- registry,\n- request,\n- exc,\n- provides,\n- '',\n- view_classifier=IExceptionViewClassifier,\n- request_iface=request_iface.combined\n- )\n-\n- # if views matched but did not pass predicates, squash the error\n- # and re-raise the original exception\n- except PredicateMismatch:\n- response = None\n-\n- # re-raise the original exception as no exception views were\n- # able to handle the error\n- if response is None:\n- reraise(*attrs['exc_info'])\n-\n+ response = _error_handler(request, exc)\n return response\n \n return excview_tween\n", "issue": "remove request.exception if the excview tween fails to handle the exception\nPyramid 1.9 makes `request.exception` and `request.exc_info` a little more important as I've moved the pyramid_tm tween over the excview and in general would advocate to move most tweens over the excview. With that in mind it's currently not possible to test `request.exception` to see if the response was rendered in relation to that exception - the excview tween sets the exception even if it failed to squash it (attempted to render an excview and couldn't find one). Ideally the exception would be related to the response that was generated when it was squashed. This would be more explicit if we used `response.exception` to indicate the response is from a squashed exception but I think that's a larger change.\r\n\r\nI'm proposing to remove `request.exception` and `request.exc_info` in the excview tween if it reraises the original exception. This makes introspection `request.exception` more reliable by upstream tweens that want to know what the squashed exception was... Of course any raised exception should be more interesting than the original `request.exception` but if the tween receives a response then they can see if it is a response generated by a squashed exception or if it is a \"normal\" response.\n", "before_files": [{"content": "import sys\n\nfrom pyramid.compat import reraise\nfrom pyramid.exceptions import PredicateMismatch\nfrom pyramid.interfaces import (\n IExceptionViewClassifier,\n IRequest,\n )\n\nfrom zope.interface import providedBy\nfrom pyramid.view import _call_view\n\ndef excview_tween_factory(handler, registry):\n \"\"\" A :term:`tween` factory which produces a tween that catches an\n exception raised by downstream tweens (or the main Pyramid request\n handler) and, if possible, converts it into a Response using an\n :term:`exception view`.\"\"\"\n\n def excview_tween(request):\n attrs = request.__dict__\n try:\n response = handler(request)\n except Exception as exc:\n # WARNING: do not assign the result of sys.exc_info() to a local\n # var here, doing so will cause a leak. We used to actually\n # explicitly delete both \"exception\" and \"exc_info\" from ``attrs``\n # in a ``finally:`` clause below, but now we do not because these\n # attributes are useful to upstream tweens. This actually still\n # apparently causes a reference cycle, but it is broken\n # successfully by the garbage collector (see\n # https://github.com/Pylons/pyramid/issues/1223).\n attrs['exc_info'] = sys.exc_info()\n attrs['exception'] = exc\n # clear old generated request.response, if any; it may\n # have been mutated by the view, and its state is not\n # sane (e.g. caching headers)\n if 'response' in attrs:\n del attrs['response']\n # we use .get instead of .__getitem__ below due to\n # https://github.com/Pylons/pyramid/issues/700\n request_iface = attrs.get('request_iface', IRequest)\n provides = providedBy(exc)\n try:\n response = _call_view(\n registry,\n request,\n exc,\n provides,\n '',\n view_classifier=IExceptionViewClassifier,\n request_iface=request_iface.combined\n )\n\n # if views matched but did not pass predicates, squash the error\n # and re-raise the original exception\n except PredicateMismatch:\n response = None\n\n # re-raise the original exception as no exception views were\n # able to handle the error\n if response is None:\n reraise(*attrs['exc_info'])\n\n return response\n\n return excview_tween\n\nMAIN = 'MAIN'\nINGRESS = 'INGRESS'\nEXCVIEW = 'pyramid.tweens.excview_tween_factory'\n", "path": "pyramid/tweens.py"}]} | 1,501 | 961 |
gh_patches_debug_668 | rasdani/github-patches | git_diff | liqd__a4-opin-388 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
timeline wrong way?
the phases in the timeline seem to be sorted in the wrong direction:


</issue>
<code>
[start of euth/phases/models.py]
1 from django.core.exceptions import ValidationError
2 from django.db import models
3 from django.utils import timezone
4 from django.utils.translation import ugettext as _
5
6 from euth.modules import models as modules_models
7
8 from . import content
9 from .validators import validate_content
10
11
12 class PhasesQuerySet(models.QuerySet):
13
14 def active_phases(self):
15 now = timezone.now()
16 return self.filter(start_date__lte=now, end_date__gt=now)
17
18
19 class Phase(models.Model):
20 name = models.CharField(max_length=80)
21 description = models.TextField(max_length=300)
22 type = models.CharField(max_length=128, validators=[validate_content])
23 module = models.ForeignKey(modules_models.Module, on_delete=models.CASCADE)
24 start_date = models.DateTimeField(blank=True, null=True)
25 end_date = models.DateTimeField(blank=True, null=True)
26
27 objects = PhasesQuerySet.as_manager()
28
29 def __str__(self):
30 return '{} ({})'.format(self.name, self.type)
31
32 def content(self):
33 return content[self.type]
34
35 def clean(self):
36 if self.end_date and self.start_date:
37 if self.end_date < self.start_date:
38 raise ValidationError({
39 'end_date': _('End date can not be smaller'
40 'than the start date.')
41 })
42 super().clean()
43
44 @property
45 def view(self):
46 return content[self.type].view
47
48 def has_feature(self, feature, model):
49 return content[self.type].has_feature(feature, model)
50
[end of euth/phases/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/euth/phases/models.py b/euth/phases/models.py
--- a/euth/phases/models.py
+++ b/euth/phases/models.py
@@ -26,6 +26,9 @@
objects = PhasesQuerySet.as_manager()
+ class Meta:
+ ordering = ['type']
+
def __str__(self):
return '{} ({})'.format(self.name, self.type)
| {"golden_diff": "diff --git a/euth/phases/models.py b/euth/phases/models.py\n--- a/euth/phases/models.py\n+++ b/euth/phases/models.py\n@@ -26,6 +26,9 @@\n \n objects = PhasesQuerySet.as_manager()\n \n+ class Meta:\n+ ordering = ['type']\n+\n def __str__(self):\n return '{} ({})'.format(self.name, self.type)\n", "issue": "timeline wrong way?\nthe phases in the timeline seem to be sorted in the wrong direction:\n\n\n\n", "before_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\n\nfrom euth.modules import models as modules_models\n\nfrom . import content\nfrom .validators import validate_content\n\n\nclass PhasesQuerySet(models.QuerySet):\n\n def active_phases(self):\n now = timezone.now()\n return self.filter(start_date__lte=now, end_date__gt=now)\n\n\nclass Phase(models.Model):\n name = models.CharField(max_length=80)\n description = models.TextField(max_length=300)\n type = models.CharField(max_length=128, validators=[validate_content])\n module = models.ForeignKey(modules_models.Module, on_delete=models.CASCADE)\n start_date = models.DateTimeField(blank=True, null=True)\n end_date = models.DateTimeField(blank=True, null=True)\n\n objects = PhasesQuerySet.as_manager()\n\n def __str__(self):\n return '{} ({})'.format(self.name, self.type)\n\n def content(self):\n return content[self.type]\n\n def clean(self):\n if self.end_date and self.start_date:\n if self.end_date < self.start_date:\n raise ValidationError({\n 'end_date': _('End date can not be smaller'\n 'than the start date.')\n })\n super().clean()\n\n @property\n def view(self):\n return content[self.type].view\n\n def has_feature(self, feature, model):\n return content[self.type].has_feature(feature, model)\n", "path": "euth/phases/models.py"}]} | 1,141 | 94 |
gh_patches_debug_29172 | rasdani/github-patches | git_diff | goauthentik__authentik-4876 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fixed light/dark theme setting
Hello, is it possible to set the dark and light theme fixed? With the OS autodetection I have otherwise problems with my different logos that I use in my Tenants settings. Light logos are not visible to users on light platforms, dark logos are not visible to users on dark platforms.
Thanks in advanced
pupazze
</issue>
<code>
[start of authentik/tenants/api.py]
1 """Serializer for tenant models"""
2 from typing import Any
3
4 from drf_spectacular.utils import extend_schema
5 from rest_framework.decorators import action
6 from rest_framework.exceptions import ValidationError
7 from rest_framework.fields import CharField, ListField
8 from rest_framework.filters import OrderingFilter, SearchFilter
9 from rest_framework.permissions import AllowAny
10 from rest_framework.request import Request
11 from rest_framework.response import Response
12 from rest_framework.serializers import ModelSerializer
13 from rest_framework.viewsets import ModelViewSet
14
15 from authentik.api.authorization import SecretKeyFilter
16 from authentik.core.api.used_by import UsedByMixin
17 from authentik.core.api.utils import PassiveSerializer
18 from authentik.lib.config import CONFIG
19 from authentik.tenants.models import Tenant
20
21
22 class FooterLinkSerializer(PassiveSerializer):
23 """Links returned in Config API"""
24
25 href = CharField(read_only=True)
26 name = CharField(read_only=True)
27
28
29 class TenantSerializer(ModelSerializer):
30 """Tenant Serializer"""
31
32 def validate(self, attrs: dict[str, Any]) -> dict[str, Any]:
33 if attrs.get("default", False):
34 tenants = Tenant.objects.filter(default=True)
35 if self.instance:
36 tenants = tenants.exclude(pk=self.instance.pk)
37 if tenants.exists():
38 raise ValidationError("Only a single Tenant can be set as default.")
39 return super().validate(attrs)
40
41 class Meta:
42 model = Tenant
43 fields = [
44 "tenant_uuid",
45 "domain",
46 "default",
47 "branding_title",
48 "branding_logo",
49 "branding_favicon",
50 "flow_authentication",
51 "flow_invalidation",
52 "flow_recovery",
53 "flow_unenrollment",
54 "flow_user_settings",
55 "flow_device_code",
56 "event_retention",
57 "web_certificate",
58 "attributes",
59 ]
60
61
62 class CurrentTenantSerializer(PassiveSerializer):
63 """Partial tenant information for styling"""
64
65 matched_domain = CharField(source="domain")
66 branding_title = CharField()
67 branding_logo = CharField()
68 branding_favicon = CharField()
69 ui_footer_links = ListField(
70 child=FooterLinkSerializer(),
71 read_only=True,
72 default=CONFIG.y("footer_links", []),
73 )
74
75 flow_authentication = CharField(source="flow_authentication.slug", required=False)
76 flow_invalidation = CharField(source="flow_invalidation.slug", required=False)
77 flow_recovery = CharField(source="flow_recovery.slug", required=False)
78 flow_unenrollment = CharField(source="flow_unenrollment.slug", required=False)
79 flow_user_settings = CharField(source="flow_user_settings.slug", required=False)
80 flow_device_code = CharField(source="flow_device_code.slug", required=False)
81
82 default_locale = CharField(read_only=True)
83
84
85 class TenantViewSet(UsedByMixin, ModelViewSet):
86 """Tenant Viewset"""
87
88 queryset = Tenant.objects.all()
89 serializer_class = TenantSerializer
90 search_fields = [
91 "domain",
92 "branding_title",
93 "web_certificate__name",
94 ]
95 filterset_fields = [
96 "tenant_uuid",
97 "domain",
98 "default",
99 "branding_title",
100 "branding_logo",
101 "branding_favicon",
102 "flow_authentication",
103 "flow_invalidation",
104 "flow_recovery",
105 "flow_unenrollment",
106 "flow_user_settings",
107 "flow_device_code",
108 "event_retention",
109 "web_certificate",
110 ]
111 ordering = ["domain"]
112
113 filter_backends = [SecretKeyFilter, OrderingFilter, SearchFilter]
114
115 @extend_schema(
116 responses=CurrentTenantSerializer(many=False),
117 )
118 @action(methods=["GET"], detail=False, permission_classes=[AllowAny])
119 def current(self, request: Request) -> Response:
120 """Get current tenant"""
121 tenant: Tenant = request._request.tenant
122 return Response(CurrentTenantSerializer(tenant).data)
123
[end of authentik/tenants/api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/authentik/tenants/api.py b/authentik/tenants/api.py
--- a/authentik/tenants/api.py
+++ b/authentik/tenants/api.py
@@ -1,10 +1,11 @@
"""Serializer for tenant models"""
from typing import Any
+from django.db import models
from drf_spectacular.utils import extend_schema
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
-from rest_framework.fields import CharField, ListField
+from rest_framework.fields import CharField, ChoiceField, ListField
from rest_framework.filters import OrderingFilter, SearchFilter
from rest_framework.permissions import AllowAny
from rest_framework.request import Request
@@ -59,6 +60,14 @@
]
+class Themes(models.TextChoices):
+ """Themes"""
+
+ AUTOMATIC = "automatic"
+ LIGHT = "light"
+ DARK = "dark"
+
+
class CurrentTenantSerializer(PassiveSerializer):
"""Partial tenant information for styling"""
@@ -71,6 +80,12 @@
read_only=True,
default=CONFIG.y("footer_links", []),
)
+ ui_theme = ChoiceField(
+ choices=Themes.choices,
+ source="attributes.settings.theme.base",
+ default=Themes.AUTOMATIC,
+ read_only=True,
+ )
flow_authentication = CharField(source="flow_authentication.slug", required=False)
flow_invalidation = CharField(source="flow_invalidation.slug", required=False)
| {"golden_diff": "diff --git a/authentik/tenants/api.py b/authentik/tenants/api.py\n--- a/authentik/tenants/api.py\n+++ b/authentik/tenants/api.py\n@@ -1,10 +1,11 @@\n \"\"\"Serializer for tenant models\"\"\"\n from typing import Any\n \n+from django.db import models\n from drf_spectacular.utils import extend_schema\n from rest_framework.decorators import action\n from rest_framework.exceptions import ValidationError\n-from rest_framework.fields import CharField, ListField\n+from rest_framework.fields import CharField, ChoiceField, ListField\n from rest_framework.filters import OrderingFilter, SearchFilter\n from rest_framework.permissions import AllowAny\n from rest_framework.request import Request\n@@ -59,6 +60,14 @@\n ]\n \n \n+class Themes(models.TextChoices):\n+ \"\"\"Themes\"\"\"\n+\n+ AUTOMATIC = \"automatic\"\n+ LIGHT = \"light\"\n+ DARK = \"dark\"\n+\n+\n class CurrentTenantSerializer(PassiveSerializer):\n \"\"\"Partial tenant information for styling\"\"\"\n \n@@ -71,6 +80,12 @@\n read_only=True,\n default=CONFIG.y(\"footer_links\", []),\n )\n+ ui_theme = ChoiceField(\n+ choices=Themes.choices,\n+ source=\"attributes.settings.theme.base\",\n+ default=Themes.AUTOMATIC,\n+ read_only=True,\n+ )\n \n flow_authentication = CharField(source=\"flow_authentication.slug\", required=False)\n flow_invalidation = CharField(source=\"flow_invalidation.slug\", required=False)\n", "issue": "Fixed light/dark theme setting\nHello, is it possible to set the dark and light theme fixed? With the OS autodetection I have otherwise problems with my different logos that I use in my Tenants settings. Light logos are not visible to users on light platforms, dark logos are not visible to users on dark platforms.\r\nThanks in advanced\r\npupazze\n", "before_files": [{"content": "\"\"\"Serializer for tenant models\"\"\"\nfrom typing import Any\n\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import CharField, ListField\nfrom rest_framework.filters import OrderingFilter, SearchFilter\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ModelSerializer\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom authentik.api.authorization import SecretKeyFilter\nfrom authentik.core.api.used_by import UsedByMixin\nfrom authentik.core.api.utils import PassiveSerializer\nfrom authentik.lib.config import CONFIG\nfrom authentik.tenants.models import Tenant\n\n\nclass FooterLinkSerializer(PassiveSerializer):\n \"\"\"Links returned in Config API\"\"\"\n\n href = CharField(read_only=True)\n name = CharField(read_only=True)\n\n\nclass TenantSerializer(ModelSerializer):\n \"\"\"Tenant Serializer\"\"\"\n\n def validate(self, attrs: dict[str, Any]) -> dict[str, Any]:\n if attrs.get(\"default\", False):\n tenants = Tenant.objects.filter(default=True)\n if self.instance:\n tenants = tenants.exclude(pk=self.instance.pk)\n if tenants.exists():\n raise ValidationError(\"Only a single Tenant can be set as default.\")\n return super().validate(attrs)\n\n class Meta:\n model = Tenant\n fields = [\n \"tenant_uuid\",\n \"domain\",\n \"default\",\n \"branding_title\",\n \"branding_logo\",\n \"branding_favicon\",\n \"flow_authentication\",\n \"flow_invalidation\",\n \"flow_recovery\",\n \"flow_unenrollment\",\n \"flow_user_settings\",\n \"flow_device_code\",\n \"event_retention\",\n \"web_certificate\",\n \"attributes\",\n ]\n\n\nclass CurrentTenantSerializer(PassiveSerializer):\n \"\"\"Partial tenant information for styling\"\"\"\n\n matched_domain = CharField(source=\"domain\")\n branding_title = CharField()\n branding_logo = CharField()\n branding_favicon = CharField()\n ui_footer_links = ListField(\n child=FooterLinkSerializer(),\n read_only=True,\n default=CONFIG.y(\"footer_links\", []),\n )\n\n flow_authentication = CharField(source=\"flow_authentication.slug\", required=False)\n flow_invalidation = CharField(source=\"flow_invalidation.slug\", required=False)\n flow_recovery = CharField(source=\"flow_recovery.slug\", required=False)\n flow_unenrollment = CharField(source=\"flow_unenrollment.slug\", required=False)\n flow_user_settings = CharField(source=\"flow_user_settings.slug\", required=False)\n flow_device_code = CharField(source=\"flow_device_code.slug\", required=False)\n\n default_locale = CharField(read_only=True)\n\n\nclass TenantViewSet(UsedByMixin, ModelViewSet):\n \"\"\"Tenant Viewset\"\"\"\n\n queryset = Tenant.objects.all()\n serializer_class = TenantSerializer\n search_fields = [\n \"domain\",\n \"branding_title\",\n \"web_certificate__name\",\n ]\n filterset_fields = [\n \"tenant_uuid\",\n \"domain\",\n \"default\",\n \"branding_title\",\n \"branding_logo\",\n \"branding_favicon\",\n \"flow_authentication\",\n \"flow_invalidation\",\n \"flow_recovery\",\n \"flow_unenrollment\",\n \"flow_user_settings\",\n \"flow_device_code\",\n \"event_retention\",\n \"web_certificate\",\n ]\n ordering = [\"domain\"]\n\n filter_backends = [SecretKeyFilter, OrderingFilter, SearchFilter]\n\n @extend_schema(\n responses=CurrentTenantSerializer(many=False),\n )\n @action(methods=[\"GET\"], detail=False, permission_classes=[AllowAny])\n def current(self, request: Request) -> Response:\n \"\"\"Get current tenant\"\"\"\n tenant: Tenant = request._request.tenant\n return Response(CurrentTenantSerializer(tenant).data)\n", "path": "authentik/tenants/api.py"}]} | 1,698 | 328 |
gh_patches_debug_10131 | rasdani/github-patches | git_diff | pre-commit__pre-commit-578 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nodeenv try to download non existing tar.gz prebuilt under Cygwin
Hi,
Strange issue: I suspect a recent change broke this as it used to work last week, on another Windows computer with Cygwin.
Bug reproduction: `pre-commit run` using e.g. https://github.com/Lucas-C/pre-commit-hooks-html v1.1.0
`pre-commit` execute the following command under the hood, a command that also fails if I execute it manually:
```
nodeenv --prebuilt /cygdrive/c/Users/admin/.pre-commit/repoYHJ85q/node_env-default
```
The error is the following:
```
urllib2.HTTPError: HTTP Error 404: Not Found
```
The `tar.gz` it tries to install is https://nodejs.org/dist/v7.2.1/node-v7.2.1-cygwin_nt-6.1-x64.tar.gz, which does not exist. My guess is that `nodeenv` should use the Windows prebuilts instead: https://nodejs.org/dist/v7.2.1/node-v7.2.1-win-x64.zip This is because `platform.system()` is used: https://github.com/ekalinin/nodeenv/blob/master/nodeenv.py#L503
I'm going to ask for help on the https://github.com/ekalinin/nodeenv project, but do you have any hint at what the root cause could be here ?
</issue>
<code>
[start of pre_commit/languages/node.py]
1 from __future__ import unicode_literals
2
3 import contextlib
4 import os
5 import sys
6
7 from pre_commit.envcontext import envcontext
8 from pre_commit.envcontext import Var
9 from pre_commit.languages import helpers
10 from pre_commit.util import clean_path_on_failure
11 from pre_commit.xargs import xargs
12
13
14 ENVIRONMENT_DIR = 'node_env'
15 get_default_version = helpers.basic_get_default_version
16 healthy = helpers.basic_healthy
17
18
19 def get_env_patch(venv): # pragma: windows no cover
20 return (
21 ('NODE_VIRTUAL_ENV', venv),
22 ('NPM_CONFIG_PREFIX', venv),
23 ('npm_config_prefix', venv),
24 ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),
25 ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),
26 )
27
28
29 @contextlib.contextmanager
30 def in_env(repo_cmd_runner, language_version): # pragma: windows no cover
31 envdir = repo_cmd_runner.path(
32 helpers.environment_dir(ENVIRONMENT_DIR, language_version),
33 )
34 with envcontext(get_env_patch(envdir)):
35 yield
36
37
38 def install_environment(
39 repo_cmd_runner, version, additional_dependencies,
40 ): # pragma: windows no cover
41 additional_dependencies = tuple(additional_dependencies)
42 assert repo_cmd_runner.exists('package.json')
43 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
44
45 env_dir = repo_cmd_runner.path(directory)
46 with clean_path_on_failure(env_dir):
47 cmd = [
48 sys.executable, '-m', 'nodeenv', '--prebuilt',
49 '{{prefix}}{}'.format(directory),
50 ]
51
52 if version != 'default':
53 cmd.extend(['-n', version])
54
55 repo_cmd_runner.run(cmd)
56
57 with in_env(repo_cmd_runner, version):
58 helpers.run_setup_cmd(
59 repo_cmd_runner,
60 ('npm', 'install', '-g', '.') + additional_dependencies,
61 )
62
63
64 def run_hook(repo_cmd_runner, hook, file_args): # pragma: windows no cover
65 with in_env(repo_cmd_runner, hook['language_version']):
66 return xargs(helpers.to_cmd(hook), file_args)
67
[end of pre_commit/languages/node.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py
--- a/pre_commit/languages/node.py
+++ b/pre_commit/languages/node.py
@@ -17,10 +17,11 @@
def get_env_patch(venv): # pragma: windows no cover
+ config = os.path.join(venv, 'bin') if sys.platform == 'cygwin' else venv
return (
('NODE_VIRTUAL_ENV', venv),
- ('NPM_CONFIG_PREFIX', venv),
- ('npm_config_prefix', venv),
+ ('NPM_CONFIG_PREFIX', config),
+ ('npm_config_prefix', config),
('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),
('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),
)
| {"golden_diff": "diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py\n--- a/pre_commit/languages/node.py\n+++ b/pre_commit/languages/node.py\n@@ -17,10 +17,11 @@\n \n \n def get_env_patch(venv): # pragma: windows no cover\n+ config = os.path.join(venv, 'bin') if sys.platform == 'cygwin' else venv\n return (\n ('NODE_VIRTUAL_ENV', venv),\n- ('NPM_CONFIG_PREFIX', venv),\n- ('npm_config_prefix', venv),\n+ ('NPM_CONFIG_PREFIX', config),\n+ ('npm_config_prefix', config),\n ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),\n ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n )\n", "issue": "nodeenv try to download non existing tar.gz prebuilt under Cygwin\nHi,\r\n\r\nStrange issue: I suspect a recent change broke this as it used to work last week, on another Windows computer with Cygwin.\r\n\r\nBug reproduction: `pre-commit run` using e.g. https://github.com/Lucas-C/pre-commit-hooks-html v1.1.0\r\n\r\n`pre-commit` execute the following command under the hood, a command that also fails if I execute it manually:\r\n```\r\nnodeenv --prebuilt /cygdrive/c/Users/admin/.pre-commit/repoYHJ85q/node_env-default\r\n```\r\nThe error is the following:\r\n```\r\nurllib2.HTTPError: HTTP Error 404: Not Found\r\n```\r\nThe `tar.gz` it tries to install is https://nodejs.org/dist/v7.2.1/node-v7.2.1-cygwin_nt-6.1-x64.tar.gz, which does not exist. My guess is that `nodeenv` should use the Windows prebuilts instead: https://nodejs.org/dist/v7.2.1/node-v7.2.1-win-x64.zip This is because `platform.system()` is used: https://github.com/ekalinin/nodeenv/blob/master/nodeenv.py#L503\r\n\r\nI'm going to ask for help on the https://github.com/ekalinin/nodeenv project, but do you have any hint at what the root cause could be here ?\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os\nimport sys\n\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.xargs import xargs\n\n\nENVIRONMENT_DIR = 'node_env'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(venv): # pragma: windows no cover\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', venv),\n ('npm_config_prefix', venv),\n ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),\n ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner, language_version): # pragma: windows no cover\n envdir = repo_cmd_runner.path(\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef install_environment(\n repo_cmd_runner, version, additional_dependencies,\n): # pragma: windows no cover\n additional_dependencies = tuple(additional_dependencies)\n assert repo_cmd_runner.exists('package.json')\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n env_dir = repo_cmd_runner.path(directory)\n with clean_path_on_failure(env_dir):\n cmd = [\n sys.executable, '-m', 'nodeenv', '--prebuilt',\n '{{prefix}}{}'.format(directory),\n ]\n\n if version != 'default':\n cmd.extend(['-n', version])\n\n repo_cmd_runner.run(cmd)\n\n with in_env(repo_cmd_runner, version):\n helpers.run_setup_cmd(\n repo_cmd_runner,\n ('npm', 'install', '-g', '.') + additional_dependencies,\n )\n\n\ndef run_hook(repo_cmd_runner, hook, file_args): # pragma: windows no cover\n with in_env(repo_cmd_runner, hook['language_version']):\n return xargs(helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/node.py"}]} | 1,441 | 192 |
gh_patches_debug_1913 | rasdani/github-patches | git_diff | falconry__falcon-1946 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate falcon.api_helpers
See https://github.com/falconry/falcon/issues/1902.
Starting with 3.1, mark `falcon.api_helpers` as deprecated. We could employ module-level `__getattr__` or redecorate re-imported functions.
</issue>
<code>
[start of falcon/api_helpers.py]
1 from .app_helpers import * # NOQA
2
3 # TODO deprecate
4 # import warnings
5 # from .util.deprecation import DeprecatedWarning
6
7 # warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)
8
[end of falcon/api_helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/falcon/api_helpers.py b/falcon/api_helpers.py
--- a/falcon/api_helpers.py
+++ b/falcon/api_helpers.py
@@ -1,7 +1,6 @@
-from .app_helpers import * # NOQA
+import warnings
-# TODO deprecate
-# import warnings
-# from .util.deprecation import DeprecatedWarning
+from .app_helpers import * # NOQA
+from .util.deprecation import DeprecatedWarning
-# warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)
+warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)
| {"golden_diff": "diff --git a/falcon/api_helpers.py b/falcon/api_helpers.py\n--- a/falcon/api_helpers.py\n+++ b/falcon/api_helpers.py\n@@ -1,7 +1,6 @@\n-from .app_helpers import * # NOQA\n+import warnings\n \n-# TODO deprecate\n-# import warnings\n-# from .util.deprecation import DeprecatedWarning\n+from .app_helpers import * # NOQA\n+from .util.deprecation import DeprecatedWarning\n \n-# warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)\n+warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)\n", "issue": "Deprecate falcon.api_helpers\nSee https://github.com/falconry/falcon/issues/1902.\r\n\r\nStarting with 3.1, mark `falcon.api_helpers` as deprecated. We could employ module-level `__getattr__` or redecorate re-imported functions.\n", "before_files": [{"content": "from .app_helpers import * # NOQA\n\n# TODO deprecate\n# import warnings\n# from .util.deprecation import DeprecatedWarning\n\n# warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)\n", "path": "falcon/api_helpers.py"}]} | 655 | 138 |
gh_patches_debug_22932 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-2568 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add type annontations
please add type annotations here
_Originally posted by @aabmass in https://github.com/open-telemetry/opentelemetry-python/pull/2400#discussion_r809406486_
</issue>
<code>
[start of opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 from logging import getLogger
17 from threading import Lock
18 from typing import TYPE_CHECKING, Iterable
19
20 from opentelemetry.sdk._metrics.aggregation import (
21 _convert_aggregation_temporality,
22 )
23 from opentelemetry.sdk._metrics.measurement import Measurement
24 from opentelemetry.sdk._metrics.point import AggregationTemporality, Metric
25 from opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration
26 from opentelemetry.sdk._metrics.view import View
27
28 if TYPE_CHECKING:
29 from opentelemetry.sdk._metrics.instrument import _Instrument
30
31 _logger = getLogger(__name__)
32
33
34 class _ViewInstrumentMatch:
35 def __init__(
36 self,
37 view: View,
38 instrument: "_Instrument",
39 sdk_config: SdkConfiguration,
40 ):
41 self._view = view
42 self._instrument = instrument
43 self._sdk_config = sdk_config
44 self._attributes_aggregation = {}
45 self._attributes_previous_point = {}
46 self._lock = Lock()
47
48 # pylint: disable=protected-access
49 def consume_measurement(self, measurement: Measurement) -> None:
50
51 if self._view._attribute_keys is not None:
52
53 attributes = {}
54
55 for key, value in (measurement.attributes or {}).items():
56 if key in self._view._attribute_keys:
57 attributes[key] = value
58 elif measurement.attributes is not None:
59 attributes = measurement.attributes
60 else:
61 attributes = {}
62
63 attributes = frozenset(attributes.items())
64
65 if attributes not in self._attributes_aggregation:
66 with self._lock:
67 if attributes not in self._attributes_aggregation:
68 if self._view._aggregation:
69 aggregation = (
70 self._view._aggregation._create_aggregation(
71 self._instrument
72 )
73 )
74 else:
75 aggregation = self._instrument._default_aggregation
76 self._attributes_aggregation[attributes] = aggregation
77
78 self._attributes_aggregation[attributes].aggregate(measurement)
79
80 def collect(self, temporality: int) -> Iterable[Metric]:
81
82 with self._lock:
83 for (
84 attributes,
85 aggregation,
86 ) in self._attributes_aggregation.items():
87
88 previous_point = self._attributes_previous_point.get(
89 attributes
90 )
91
92 current_point = aggregation.collect()
93
94 # pylint: disable=assignment-from-none
95 self._attributes_previous_point[
96 attributes
97 ] = _convert_aggregation_temporality(
98 previous_point,
99 current_point,
100 AggregationTemporality.CUMULATIVE,
101 )
102
103 if current_point is not None:
104
105 yield Metric(
106 attributes=dict(attributes),
107 description=(
108 self._view._description
109 or self._instrument.description
110 ),
111 instrumentation_info=self._instrument.instrumentation_info,
112 name=self._view._name or self._instrument.name,
113 resource=self._sdk_config.resource,
114 unit=self._instrument.unit,
115 point=_convert_aggregation_temporality(
116 previous_point,
117 current_point,
118 temporality,
119 ),
120 )
121
[end of opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py
@@ -15,10 +15,12 @@
from logging import getLogger
from threading import Lock
-from typing import TYPE_CHECKING, Iterable
+from typing import TYPE_CHECKING, Dict, Iterable
from opentelemetry.sdk._metrics.aggregation import (
+ _Aggregation,
_convert_aggregation_temporality,
+ _PointVarT,
)
from opentelemetry.sdk._metrics.measurement import Measurement
from opentelemetry.sdk._metrics.point import AggregationTemporality, Metric
@@ -41,8 +43,8 @@
self._view = view
self._instrument = instrument
self._sdk_config = sdk_config
- self._attributes_aggregation = {}
- self._attributes_previous_point = {}
+ self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}
+ self._attributes_previous_point: Dict[frozenset, _PointVarT] = {}
self._lock = Lock()
# pylint: disable=protected-access
| {"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py\n@@ -15,10 +15,12 @@\n \n from logging import getLogger\n from threading import Lock\n-from typing import TYPE_CHECKING, Iterable\n+from typing import TYPE_CHECKING, Dict, Iterable\n \n from opentelemetry.sdk._metrics.aggregation import (\n+ _Aggregation,\n _convert_aggregation_temporality,\n+ _PointVarT,\n )\n from opentelemetry.sdk._metrics.measurement import Measurement\n from opentelemetry.sdk._metrics.point import AggregationTemporality, Metric\n@@ -41,8 +43,8 @@\n self._view = view\n self._instrument = instrument\n self._sdk_config = sdk_config\n- self._attributes_aggregation = {}\n- self._attributes_previous_point = {}\n+ self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}\n+ self._attributes_previous_point: Dict[frozenset, _PointVarT] = {}\n self._lock = Lock()\n \n # pylint: disable=protected-access\n", "issue": "Add type annontations\nplease add type annotations here\r\n\r\n_Originally posted by @aabmass in https://github.com/open-telemetry/opentelemetry-python/pull/2400#discussion_r809406486_\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom logging import getLogger\nfrom threading import Lock\nfrom typing import TYPE_CHECKING, Iterable\n\nfrom opentelemetry.sdk._metrics.aggregation import (\n _convert_aggregation_temporality,\n)\nfrom opentelemetry.sdk._metrics.measurement import Measurement\nfrom opentelemetry.sdk._metrics.point import AggregationTemporality, Metric\nfrom opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration\nfrom opentelemetry.sdk._metrics.view import View\n\nif TYPE_CHECKING:\n from opentelemetry.sdk._metrics.instrument import _Instrument\n\n_logger = getLogger(__name__)\n\n\nclass _ViewInstrumentMatch:\n def __init__(\n self,\n view: View,\n instrument: \"_Instrument\",\n sdk_config: SdkConfiguration,\n ):\n self._view = view\n self._instrument = instrument\n self._sdk_config = sdk_config\n self._attributes_aggregation = {}\n self._attributes_previous_point = {}\n self._lock = Lock()\n\n # pylint: disable=protected-access\n def consume_measurement(self, measurement: Measurement) -> None:\n\n if self._view._attribute_keys is not None:\n\n attributes = {}\n\n for key, value in (measurement.attributes or {}).items():\n if key in self._view._attribute_keys:\n attributes[key] = value\n elif measurement.attributes is not None:\n attributes = measurement.attributes\n else:\n attributes = {}\n\n attributes = frozenset(attributes.items())\n\n if attributes not in self._attributes_aggregation:\n with self._lock:\n if attributes not in self._attributes_aggregation:\n if self._view._aggregation:\n aggregation = (\n self._view._aggregation._create_aggregation(\n self._instrument\n )\n )\n else:\n aggregation = self._instrument._default_aggregation\n self._attributes_aggregation[attributes] = aggregation\n\n self._attributes_aggregation[attributes].aggregate(measurement)\n\n def collect(self, temporality: int) -> Iterable[Metric]:\n\n with self._lock:\n for (\n attributes,\n aggregation,\n ) in self._attributes_aggregation.items():\n\n previous_point = self._attributes_previous_point.get(\n attributes\n )\n\n current_point = aggregation.collect()\n\n # pylint: disable=assignment-from-none\n self._attributes_previous_point[\n attributes\n ] = _convert_aggregation_temporality(\n previous_point,\n current_point,\n AggregationTemporality.CUMULATIVE,\n )\n\n if current_point is not None:\n\n yield Metric(\n attributes=dict(attributes),\n description=(\n self._view._description\n or self._instrument.description\n ),\n instrumentation_info=self._instrument.instrumentation_info,\n name=self._view._name or self._instrument.name,\n resource=self._sdk_config.resource,\n unit=self._instrument.unit,\n point=_convert_aggregation_temporality(\n previous_point,\n current_point,\n temporality,\n ),\n )\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py"}]} | 1,630 | 301 |
gh_patches_debug_3773 | rasdani/github-patches | git_diff | scikit-hep__pyhf-860 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Python 3.8 to CI
# Description
The branch [`ci/add-Python-3.8-to-CI`](https://github.com/diana-hep/pyhf/compare/ci/add-Python-3.8-to-CI) adds Python 3.8 to the CI. However, as [PyTorch won't have a Python 3.8 wheel until the next release](https://github.com/pytorch/pytorch/issues/21741#issuecomment-541242504) this won't be able to happen until around December 2019.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 from pathlib import Path
3
4 this_directory = Path(__file__).parent.resolve()
5 with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:
6 long_description = readme_rst.read()
7
8 extras_require = {
9 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
10 'torch': ['torch~=1.2'],
11 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
12 'xmlio': ['uproot'],
13 'minuit': ['iminuit'],
14 }
15 extras_require['backends'] = sorted(
16 set(
17 extras_require['tensorflow']
18 + extras_require['torch']
19 + extras_require['jax']
20 + extras_require['minuit']
21 )
22 )
23 extras_require['contrib'] = sorted(set(['matplotlib']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + [
31 'pyflakes',
32 'pytest~=3.5',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=2.0',
41 'nteract-scrapbook~=0.2',
42 'check-manifest',
43 'jupyter',
44 'uproot~=3.3',
45 'graphviz',
46 'jsonpatch',
47 'black',
48 ]
49 )
50 )
51 extras_require['docs'] = sorted(
52 set(
53 [
54 'sphinx',
55 'sphinxcontrib-bibtex',
56 'sphinx-click',
57 'sphinx_rtd_theme',
58 'nbsphinx',
59 'ipywidgets',
60 'sphinx-issues',
61 'sphinx-copybutton>0.2.9',
62 ]
63 )
64 )
65 extras_require['develop'] = sorted(
66 set(
67 extras_require['docs']
68 + extras_require['test']
69 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']
70 )
71 )
72 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
73
74
75 setup(
76 name='pyhf',
77 version='0.4.1',
78 description='(partial) pure python histfactory implementation',
79 long_description=long_description,
80 long_description_content_type='text/x-rst',
81 url='https://github.com/scikit-hep/pyhf',
82 author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
83 author_email='[email protected], [email protected], [email protected]',
84 license='Apache',
85 keywords='physics fitting numpy scipy tensorflow pytorch',
86 classifiers=[
87 "Programming Language :: Python :: 3",
88 "Programming Language :: Python :: 3.6",
89 "Programming Language :: Python :: 3.7",
90 ],
91 package_dir={'': 'src'},
92 packages=find_packages(where='src'),
93 include_package_data=True,
94 python_requires=">=3.6",
95 install_requires=[
96 'scipy', # requires numpy, which is required by pyhf and tensorflow
97 'click>=6.0', # for console scripts,
98 'tqdm', # for readxml
99 'jsonschema>=3.2.0', # for utils
100 'jsonpatch',
101 'pyyaml', # for parsing CLI equal-delimited options
102 ],
103 extras_require=extras_require,
104 entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},
105 dependency_links=[],
106 use_scm_version=lambda: {'local_scheme': lambda version: ''},
107 )
108
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -87,6 +87,7 @@
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
],
package_dir={'': 'src'},
packages=find_packages(where='src'),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -87,6 +87,7 @@\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n", "issue": "Add Python 3.8 to CI\n# Description\r\n\r\nThe branch [`ci/add-Python-3.8-to-CI`](https://github.com/diana-hep/pyhf/compare/ci/add-Python-3.8-to-CI) adds Python 3.8 to the CI. However, as [PyTorch won't have a Python 3.8 wheel until the next release](https://github.com/pytorch/pytorch/issues/21741#issuecomment-541242504) this won't be able to happen until around December 2019.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,720 | 98 |
gh_patches_debug_34144 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-3017 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Doesn't catch invalid `rate(1 hours)`
*cfn-lint version: (`cfn-lint --version`)*
0.44.7
*Description of issue.*
cfn-lint doesn't recognize that this ScheduledExpression is invalid (should be `rate(1 hour)`)
```yaml
ExampleRule:
Type: AWS::Events::Rule
Properties:
Description: desc
Name: name
ScheduleExpression: rate(1 hours)
State: ENABLED
```
But when building the cloudformation, I get the following error:
```
Parameter ScheduleExpression is not valid. (Service: AmazonCloudWatchEvents; Status Code: 400; Error Code: ValidationException; Request ID: ...; Proxy: null)
```
I saw #816, but since this is a `rate` issue, not a `cron` issue, I thought I should open a new ticket
</issue>
<code>
[start of src/cfnlint/rules/resources/events/RuleScheduleExpression.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from cfnlint.rules import CloudFormationLintRule, RuleMatch
6
7
8 class RuleScheduleExpression(CloudFormationLintRule):
9 """Validate AWS Events Schedule expression format"""
10
11 id = "E3027"
12 shortdesc = "Validate AWS Event ScheduleExpression format"
13 description = "Validate the formation of the AWS::Event ScheduleExpression"
14 source_url = "https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html"
15 tags = ["resources", "events"]
16
17 def initialize(self, cfn):
18 """Initialize the rule"""
19 self.resource_property_types = ["AWS::Events::Rule"]
20
21 def check_rate(self, value, path):
22 """Check Rate configuration"""
23 matches = []
24 # Extract the expression from rate(XXX)
25 rate_expression = value[value.find("(") + 1 : value.find(")")]
26
27 if not rate_expression:
28 matches.append(
29 RuleMatch(path, "Rate value of ScheduleExpression cannot be empty")
30 )
31 else:
32 # Rate format: rate(Value Unit)
33 items = rate_expression.split(" ")
34
35 if len(items) != 2:
36 message = "Rate expression must contain 2 elements (Value Unit), rate contains {} elements"
37 matches.append(RuleMatch(path, message.format(len(items))))
38 else:
39 # Check the Value
40 if not items[0].isdigit():
41 message = "Rate Value ({}) should be of type Integer."
42 extra_args = {
43 "actual_type": type(items[0]).__name__,
44 "expected_type": int.__name__,
45 }
46 matches.append(
47 RuleMatch(path, message.format(items[0]), **extra_args)
48 )
49
50 return matches
51
52 def check_cron(self, value, path):
53 """Check Cron configuration"""
54 matches = []
55 # Extract the expression from cron(XXX)
56 cron_expression = value[value.find("(") + 1 : value.find(")")]
57
58 if not cron_expression:
59 matches.append(
60 RuleMatch(path, "Cron value of ScheduleExpression cannot be empty")
61 )
62 else:
63 # Rate format: cron(Minutes Hours Day-of-month Month Day-of-week Year)
64 items = cron_expression.split(" ")
65
66 if len(items) != 6:
67 message = "Cron expression must contain 6 elements (Minutes Hours Day-of-month Month Day-of-week Year), cron contains {} elements"
68 matches.append(RuleMatch(path, message.format(len(items))))
69 return matches
70
71 _, _, day_of_month, _, day_of_week, _ = cron_expression.split(" ")
72 if day_of_month != "?" and day_of_week != "?":
73 matches.append(
74 RuleMatch(
75 path,
76 "Don't specify the Day-of-month and Day-of-week fields in the same cron expression",
77 )
78 )
79
80 return matches
81
82 def check_value(self, value, path):
83 """Count ScheduledExpression value"""
84 matches = []
85
86 # Value is either "cron()" or "rate()"
87 if value.startswith("rate(") and value.endswith(")"):
88 matches.extend(self.check_rate(value, path))
89 elif value.startswith("cron(") and value.endswith(")"):
90 matches.extend(self.check_cron(value, path))
91 else:
92 message = "Invalid ScheduledExpression specified ({}). Value has to be either cron() or rate()"
93 matches.append(RuleMatch(path, message.format(value)))
94
95 return matches
96
97 def match_resource_properties(self, properties, _, path, cfn):
98 """Check CloudFormation Properties"""
99 matches = []
100
101 matches.extend(
102 cfn.check_value(
103 obj=properties,
104 key="ScheduleExpression",
105 path=path[:],
106 check_value=self.check_value,
107 )
108 )
109
110 return matches
111
[end of src/cfnlint/rules/resources/events/RuleScheduleExpression.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py
--- a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py
+++ b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py
@@ -25,29 +25,43 @@
rate_expression = value[value.find("(") + 1 : value.find(")")]
if not rate_expression:
- matches.append(
- RuleMatch(path, "Rate value of ScheduleExpression cannot be empty")
- )
- else:
- # Rate format: rate(Value Unit)
- items = rate_expression.split(" ")
-
- if len(items) != 2:
- message = "Rate expression must contain 2 elements (Value Unit), rate contains {} elements"
- matches.append(RuleMatch(path, message.format(len(items))))
- else:
- # Check the Value
- if not items[0].isdigit():
- message = "Rate Value ({}) should be of type Integer."
- extra_args = {
- "actual_type": type(items[0]).__name__,
- "expected_type": int.__name__,
- }
- matches.append(
- RuleMatch(path, message.format(items[0]), **extra_args)
- )
+ return [RuleMatch(path, "Rate value of ScheduleExpression cannot be empty")]
+
+ # Rate format: rate(Value Unit)
+ items = rate_expression.split(" ")
+
+ if len(items) != 2:
+ message = "Rate expression must contain 2 elements (Value Unit), rate contains {} elements"
+ matches.append(RuleMatch(path, message.format(len(items))))
+ return [RuleMatch(path, message.format(len(items)))]
+
+ # Check the Value
+ if not items[0].isdigit():
+ message = "Rate Value ({}) should be of type Integer."
+ extra_args = {
+ "actual_type": type(items[0]).__name__,
+ "expected_type": int.__name__,
+ }
+ return [RuleMatch(path, message.format(items[0]), **extra_args)]
+
+ if float(items[0]) <= 0:
+ return [
+ RuleMatch(path, f"Rate Value {items[0]!r} should be greater than 0.")
+ ]
+
+ if float(items[0]) <= 1:
+ valid_periods = ["minute", "hour", "day"]
+ elif float(items[0]) > 1:
+ valid_periods = ["minutes", "hours", "days"]
+ # Check the Unit
+ if items[1] not in valid_periods:
+ return [
+ RuleMatch(
+ path, f"Rate Unit {items[1]!r} should be one of {valid_periods!r}."
+ )
+ ]
- return matches
+ return []
def check_cron(self, value, path):
"""Check Cron configuration"""
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py\n--- a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py\n+++ b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py\n@@ -25,29 +25,43 @@\n rate_expression = value[value.find(\"(\") + 1 : value.find(\")\")]\n \n if not rate_expression:\n- matches.append(\n- RuleMatch(path, \"Rate value of ScheduleExpression cannot be empty\")\n- )\n- else:\n- # Rate format: rate(Value Unit)\n- items = rate_expression.split(\" \")\n-\n- if len(items) != 2:\n- message = \"Rate expression must contain 2 elements (Value Unit), rate contains {} elements\"\n- matches.append(RuleMatch(path, message.format(len(items))))\n- else:\n- # Check the Value\n- if not items[0].isdigit():\n- message = \"Rate Value ({}) should be of type Integer.\"\n- extra_args = {\n- \"actual_type\": type(items[0]).__name__,\n- \"expected_type\": int.__name__,\n- }\n- matches.append(\n- RuleMatch(path, message.format(items[0]), **extra_args)\n- )\n+ return [RuleMatch(path, \"Rate value of ScheduleExpression cannot be empty\")]\n+\n+ # Rate format: rate(Value Unit)\n+ items = rate_expression.split(\" \")\n+\n+ if len(items) != 2:\n+ message = \"Rate expression must contain 2 elements (Value Unit), rate contains {} elements\"\n+ matches.append(RuleMatch(path, message.format(len(items))))\n+ return [RuleMatch(path, message.format(len(items)))]\n+\n+ # Check the Value\n+ if not items[0].isdigit():\n+ message = \"Rate Value ({}) should be of type Integer.\"\n+ extra_args = {\n+ \"actual_type\": type(items[0]).__name__,\n+ \"expected_type\": int.__name__,\n+ }\n+ return [RuleMatch(path, message.format(items[0]), **extra_args)]\n+\n+ if float(items[0]) <= 0:\n+ return [\n+ RuleMatch(path, f\"Rate Value {items[0]!r} should be greater than 0.\")\n+ ]\n+\n+ if float(items[0]) <= 1:\n+ valid_periods = [\"minute\", \"hour\", \"day\"]\n+ elif float(items[0]) > 1:\n+ valid_periods = [\"minutes\", \"hours\", \"days\"]\n+ # Check the Unit\n+ if items[1] not in valid_periods:\n+ return [\n+ RuleMatch(\n+ path, f\"Rate Unit {items[1]!r} should be one of {valid_periods!r}.\"\n+ )\n+ ]\n \n- return matches\n+ return []\n \n def check_cron(self, value, path):\n \"\"\"Check Cron configuration\"\"\"\n", "issue": "Doesn't catch invalid `rate(1 hours)`\n*cfn-lint version: (`cfn-lint --version`)*\r\n\r\n0.44.7\r\n\r\n*Description of issue.*\r\n\r\ncfn-lint doesn't recognize that this ScheduledExpression is invalid (should be `rate(1 hour)`)\r\n```yaml\r\n ExampleRule:\r\n Type: AWS::Events::Rule\r\n Properties:\r\n Description: desc\r\n Name: name\r\n ScheduleExpression: rate(1 hours)\r\n State: ENABLED\r\n```\r\n\r\nBut when building the cloudformation, I get the following error:\r\n\r\n```\r\nParameter ScheduleExpression is not valid. (Service: AmazonCloudWatchEvents; Status Code: 400; Error Code: ValidationException; Request ID: ...; Proxy: null)\r\n```\r\n\r\nI saw #816, but since this is a `rate` issue, not a `cron` issue, I thought I should open a new ticket\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass RuleScheduleExpression(CloudFormationLintRule):\n \"\"\"Validate AWS Events Schedule expression format\"\"\"\n\n id = \"E3027\"\n shortdesc = \"Validate AWS Event ScheduleExpression format\"\n description = \"Validate the formation of the AWS::Event ScheduleExpression\"\n source_url = \"https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html\"\n tags = [\"resources\", \"events\"]\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n self.resource_property_types = [\"AWS::Events::Rule\"]\n\n def check_rate(self, value, path):\n \"\"\"Check Rate configuration\"\"\"\n matches = []\n # Extract the expression from rate(XXX)\n rate_expression = value[value.find(\"(\") + 1 : value.find(\")\")]\n\n if not rate_expression:\n matches.append(\n RuleMatch(path, \"Rate value of ScheduleExpression cannot be empty\")\n )\n else:\n # Rate format: rate(Value Unit)\n items = rate_expression.split(\" \")\n\n if len(items) != 2:\n message = \"Rate expression must contain 2 elements (Value Unit), rate contains {} elements\"\n matches.append(RuleMatch(path, message.format(len(items))))\n else:\n # Check the Value\n if not items[0].isdigit():\n message = \"Rate Value ({}) should be of type Integer.\"\n extra_args = {\n \"actual_type\": type(items[0]).__name__,\n \"expected_type\": int.__name__,\n }\n matches.append(\n RuleMatch(path, message.format(items[0]), **extra_args)\n )\n\n return matches\n\n def check_cron(self, value, path):\n \"\"\"Check Cron configuration\"\"\"\n matches = []\n # Extract the expression from cron(XXX)\n cron_expression = value[value.find(\"(\") + 1 : value.find(\")\")]\n\n if not cron_expression:\n matches.append(\n RuleMatch(path, \"Cron value of ScheduleExpression cannot be empty\")\n )\n else:\n # Rate format: cron(Minutes Hours Day-of-month Month Day-of-week Year)\n items = cron_expression.split(\" \")\n\n if len(items) != 6:\n message = \"Cron expression must contain 6 elements (Minutes Hours Day-of-month Month Day-of-week Year), cron contains {} elements\"\n matches.append(RuleMatch(path, message.format(len(items))))\n return matches\n\n _, _, day_of_month, _, day_of_week, _ = cron_expression.split(\" \")\n if day_of_month != \"?\" and day_of_week != \"?\":\n matches.append(\n RuleMatch(\n path,\n \"Don't specify the Day-of-month and Day-of-week fields in the same cron expression\",\n )\n )\n\n return matches\n\n def check_value(self, value, path):\n \"\"\"Count ScheduledExpression value\"\"\"\n matches = []\n\n # Value is either \"cron()\" or \"rate()\"\n if value.startswith(\"rate(\") and value.endswith(\")\"):\n matches.extend(self.check_rate(value, path))\n elif value.startswith(\"cron(\") and value.endswith(\")\"):\n matches.extend(self.check_cron(value, path))\n else:\n message = \"Invalid ScheduledExpression specified ({}). Value has to be either cron() or rate()\"\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n obj=properties,\n key=\"ScheduleExpression\",\n path=path[:],\n check_value=self.check_value,\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/resources/events/RuleScheduleExpression.py"}]} | 1,789 | 655 |
gh_patches_debug_10525 | rasdani/github-patches | git_diff | easybuilders__easybuild-easyblocks-1924 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pylibdir of versionindependentpythonpackage.py is overwritten by pythonpackage.py
In 599869d `set_pylibdirs` of [pythonpackage.py](https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/generic/pythonpackage.py) was changed to always set `self.pylibdir` (not only if its value is `UNKNOWN`). This seems to break the `prepare_step` of [versionindependentpythonpackage.py](https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/generic/versionindependentpythonpackage.py) because pylibdir is now overwritten. This forces the `install_step` to crash afterwards due to missing subdirs within the `os.mkdir(full_pylibdir)` command.
</issue>
<code>
[start of easybuild/easyblocks/generic/versionindependentpythonpackage.py]
1 ##
2 # Copyright 2013-2020 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 EasyBuild support for building and installing a Pythonpackage independend of a python version as an easyblock.
27
28 Python installs libraries by defailt in site-packages/python-xxx/
29 But packages that are not dependend on the python version can be installed in a different prefix, e.g. lib
30 as long as we add this folder to the pythonpath.
31
32 @author: Kenneth Hoste, Jens Timmerman (Ghent University)
33 """
34 import os
35 import re
36
37 import easybuild.tools.environment as env
38 from easybuild.easyblocks.generic.pythonpackage import PythonPackage
39 from easybuild.tools.build_log import EasyBuildError
40 from easybuild.tools.run import run_cmd
41
42
43 class VersionIndependentPythonPackage(PythonPackage):
44 """Support for building/installing python packages without requiring a specific python package."""
45
46 def build_step(self):
47 """No build procedure."""
48 pass
49
50 def prepare_step(self, *args, **kwargs):
51 """Set pylibdir"""
52 self.pylibdir = 'lib'
53 super(VersionIndependentPythonPackage, self).prepare_step(*args, **kwargs)
54
55 def install_step(self):
56 """Custom install procedure to skip selection of python package versions."""
57 full_pylibdir = os.path.join(self.installdir, self.pylibdir)
58
59 env.setvar('PYTHONPATH', '%s:%s' % (full_pylibdir, os.getenv('PYTHONPATH')))
60
61 try:
62 os.mkdir(full_pylibdir)
63 except OSError as err:
64 # this will raise an error and not return
65 raise EasyBuildError("Failed to install: %s", err)
66
67 if self.use_setup_py:
68 extra_installopts = [
69 '--install-lib=%s' % full_pylibdir,
70 '--single-version-externally-managed',
71 '--record %s' % os.path.join(self.builddir, 'record'),
72 '--no-compile',
73 ]
74 self.cfg.update('installopts', ' '.join(extra_installopts))
75 else:
76 # using easy_install or pip always results in installation that is specific to Python version
77 eb_name = self.__class__.__name__
78 raise EasyBuildError("%s easyblock is not compatible with using easy_install or pip", eb_name)
79
80 cmd = self.compose_install_command(self.installdir)
81 run_cmd(cmd, log_all=True, simple=True, log_output=True)
82
83 # setuptools stubbornly replaces the shebang line in scripts with
84 # the full path to the Python interpreter used to install;
85 # we change it (back) to '#!/usr/bin/env python' here
86 shebang_re = re.compile("^#!/.*python")
87 bindir = os.path.join(self.installdir, 'bin')
88 if os.path.exists(bindir):
89 for script in os.listdir(bindir):
90 script = os.path.join(bindir, script)
91 if os.path.isfile(script):
92 try:
93 txt = open(script, 'r').read()
94 if shebang_re.search(txt):
95 new_shebang = "#!/usr/bin/env python"
96 self.log.debug("Patching shebang header line in %s to '%s'" % (script, new_shebang))
97 txt = shebang_re.sub(new_shebang, txt)
98 open(script, 'w').write(txt)
99 except IOError as err:
100 raise EasyBuildError("Failed to patch shebang header line in %s: %s", script, err)
101
[end of easybuild/easyblocks/generic/versionindependentpythonpackage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/easybuild/easyblocks/generic/versionindependentpythonpackage.py b/easybuild/easyblocks/generic/versionindependentpythonpackage.py
--- a/easybuild/easyblocks/generic/versionindependentpythonpackage.py
+++ b/easybuild/easyblocks/generic/versionindependentpythonpackage.py
@@ -47,10 +47,11 @@
"""No build procedure."""
pass
- def prepare_step(self, *args, **kwargs):
- """Set pylibdir"""
+ def set_pylibdirs(self):
+ """Set pylibdir."""
+ super(VersionIndependentPythonPackage, self).set_pylibdirs()
self.pylibdir = 'lib'
- super(VersionIndependentPythonPackage, self).prepare_step(*args, **kwargs)
+ self.all_pylibdirs = ['lib']
def install_step(self):
"""Custom install procedure to skip selection of python package versions."""
| {"golden_diff": "diff --git a/easybuild/easyblocks/generic/versionindependentpythonpackage.py b/easybuild/easyblocks/generic/versionindependentpythonpackage.py\n--- a/easybuild/easyblocks/generic/versionindependentpythonpackage.py\n+++ b/easybuild/easyblocks/generic/versionindependentpythonpackage.py\n@@ -47,10 +47,11 @@\n \"\"\"No build procedure.\"\"\"\n pass\n \n- def prepare_step(self, *args, **kwargs):\n- \"\"\"Set pylibdir\"\"\"\n+ def set_pylibdirs(self):\n+ \"\"\"Set pylibdir.\"\"\"\n+ super(VersionIndependentPythonPackage, self).set_pylibdirs()\n self.pylibdir = 'lib'\n- super(VersionIndependentPythonPackage, self).prepare_step(*args, **kwargs)\n+ self.all_pylibdirs = ['lib']\n \n def install_step(self):\n \"\"\"Custom install procedure to skip selection of python package versions.\"\"\"\n", "issue": "pylibdir of versionindependentpythonpackage.py is overwritten by pythonpackage.py\nIn 599869d `set_pylibdirs` of [pythonpackage.py](https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/generic/pythonpackage.py) was changed to always set `self.pylibdir` (not only if its value is `UNKNOWN`). This seems to break the `prepare_step` of [versionindependentpythonpackage.py](https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/generic/versionindependentpythonpackage.py) because pylibdir is now overwritten. This forces the `install_step` to crash afterwards due to missing subdirs within the `os.mkdir(full_pylibdir)` command.\n", "before_files": [{"content": "##\n# Copyright 2013-2020 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing a Pythonpackage independend of a python version as an easyblock.\n\nPython installs libraries by defailt in site-packages/python-xxx/\nBut packages that are not dependend on the python version can be installed in a different prefix, e.g. lib\nas long as we add this folder to the pythonpath.\n\n@author: Kenneth Hoste, Jens Timmerman (Ghent University)\n\"\"\"\nimport os\nimport re\n\nimport easybuild.tools.environment as env\nfrom easybuild.easyblocks.generic.pythonpackage import PythonPackage\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.run import run_cmd\n\n\nclass VersionIndependentPythonPackage(PythonPackage):\n \"\"\"Support for building/installing python packages without requiring a specific python package.\"\"\"\n\n def build_step(self):\n \"\"\"No build procedure.\"\"\"\n pass\n\n def prepare_step(self, *args, **kwargs):\n \"\"\"Set pylibdir\"\"\"\n self.pylibdir = 'lib'\n super(VersionIndependentPythonPackage, self).prepare_step(*args, **kwargs)\n\n def install_step(self):\n \"\"\"Custom install procedure to skip selection of python package versions.\"\"\"\n full_pylibdir = os.path.join(self.installdir, self.pylibdir)\n\n env.setvar('PYTHONPATH', '%s:%s' % (full_pylibdir, os.getenv('PYTHONPATH')))\n\n try:\n os.mkdir(full_pylibdir)\n except OSError as err:\n # this will raise an error and not return\n raise EasyBuildError(\"Failed to install: %s\", err)\n\n if self.use_setup_py:\n extra_installopts = [\n '--install-lib=%s' % full_pylibdir,\n '--single-version-externally-managed',\n '--record %s' % os.path.join(self.builddir, 'record'),\n '--no-compile',\n ]\n self.cfg.update('installopts', ' '.join(extra_installopts))\n else:\n # using easy_install or pip always results in installation that is specific to Python version\n eb_name = self.__class__.__name__\n raise EasyBuildError(\"%s easyblock is not compatible with using easy_install or pip\", eb_name)\n\n cmd = self.compose_install_command(self.installdir)\n run_cmd(cmd, log_all=True, simple=True, log_output=True)\n\n # setuptools stubbornly replaces the shebang line in scripts with\n # the full path to the Python interpreter used to install;\n # we change it (back) to '#!/usr/bin/env python' here\n shebang_re = re.compile(\"^#!/.*python\")\n bindir = os.path.join(self.installdir, 'bin')\n if os.path.exists(bindir):\n for script in os.listdir(bindir):\n script = os.path.join(bindir, script)\n if os.path.isfile(script):\n try:\n txt = open(script, 'r').read()\n if shebang_re.search(txt):\n new_shebang = \"#!/usr/bin/env python\"\n self.log.debug(\"Patching shebang header line in %s to '%s'\" % (script, new_shebang))\n txt = shebang_re.sub(new_shebang, txt)\n open(script, 'w').write(txt)\n except IOError as err:\n raise EasyBuildError(\"Failed to patch shebang header line in %s: %s\", script, err)\n", "path": "easybuild/easyblocks/generic/versionindependentpythonpackage.py"}]} | 1,916 | 205 |
gh_patches_debug_22476 | rasdani/github-patches | git_diff | carpentries__amy-714 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tokenize person lookup to work with "name last_name"
`lookups.PersonLookup` won't show 'Piotr Banaszkiewicz' for 'Piotr Ban' input – because it only looks up `personal` or `family` or `email`…
</issue>
<code>
[start of workshops/lookups.py]
1 from django.contrib.auth.models import Group
2 from django.db.models import Q
3
4 from selectable.base import ModelLookup
5 from selectable.registry import registry
6 from selectable.decorators import login_required
7
8 from workshops import models
9
10
11 @login_required
12 class EventLookup(ModelLookup):
13 model = models.Event
14 search_fields = ('slug__icontains', )
15
16
17 @login_required
18 class HostLookup(ModelLookup):
19 model = models.Host
20 search_fields = (
21 'domain__icontains',
22 'fullname__icontains'
23 )
24
25
26 @login_required
27 class PersonLookup(ModelLookup):
28 model = models.Person
29 search_fields = (
30 'personal__icontains',
31 'family__icontains',
32 'email__icontains',
33 'username__icontains'
34 )
35
36
37 @login_required
38 class AdminLookup(ModelLookup):
39 """The same as PersonLookup, but allows only to select administrators.
40
41 Administrator is anyone with superuser power or in "administrators" group.
42 """
43 model = models.Person
44 search_fields = (
45 'personal__icontains',
46 'family__icontains',
47 'email__icontains',
48 'username__icontains'
49 )
50
51 def get_query(self, request, term):
52 results = super().get_query(request, term)
53 admin_group = Group.objects.get(name='administrators')
54 results = results.filter(
55 Q(is_superuser=True) | Q(groups__in=[admin_group])
56 )
57 return results
58
59
60 @login_required
61 class AirportLookup(ModelLookup):
62 model = models.Airport
63 search_fields = (
64 'iata__icontains',
65 'fullname__icontains'
66 )
67
68
69 registry.register(EventLookup)
70 registry.register(HostLookup)
71 registry.register(PersonLookup)
72 registry.register(AdminLookup)
73 registry.register(AirportLookup)
74
[end of workshops/lookups.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/workshops/lookups.py b/workshops/lookups.py
--- a/workshops/lookups.py
+++ b/workshops/lookups.py
@@ -1,3 +1,7 @@
+from functools import reduce
+import operator
+import re
+
from django.contrib.auth.models import Group
from django.db.models import Q
@@ -33,6 +37,32 @@
'username__icontains'
)
+ def get_query(self, request, term):
+ """Override this method to allow for additional lookup method: """
+ # original code from selectable.base.ModelLookup.get_query:
+ qs = self.get_queryset()
+ if term:
+ search_filters = []
+ if self.search_fields:
+ for field in self.search_fields:
+ search_filters.append(Q(**{field: term}))
+
+ # tokenizing part
+ tokens = re.split('\s+', term)
+ if len(tokens) == 2:
+ name1, name2 = tokens
+ complex_q = (
+ Q(personal__icontains=name1) & Q(family__icontains=name2)
+ ) | (
+ Q(personal__icontains=name2) & Q(family__icontains=name1)
+ )
+ search_filters.append(complex_q)
+
+ # this is brilliant: it applies OR to all search filters
+ qs = qs.filter(reduce(operator.or_, search_filters))
+
+ return qs
+
@login_required
class AdminLookup(ModelLookup):
| {"golden_diff": "diff --git a/workshops/lookups.py b/workshops/lookups.py\n--- a/workshops/lookups.py\n+++ b/workshops/lookups.py\n@@ -1,3 +1,7 @@\n+from functools import reduce\n+import operator\n+import re\n+\n from django.contrib.auth.models import Group\n from django.db.models import Q\n \n@@ -33,6 +37,32 @@\n 'username__icontains'\n )\n \n+ def get_query(self, request, term):\n+ \"\"\"Override this method to allow for additional lookup method: \"\"\"\n+ # original code from selectable.base.ModelLookup.get_query:\n+ qs = self.get_queryset()\n+ if term:\n+ search_filters = []\n+ if self.search_fields:\n+ for field in self.search_fields:\n+ search_filters.append(Q(**{field: term}))\n+\n+ # tokenizing part\n+ tokens = re.split('\\s+', term)\n+ if len(tokens) == 2:\n+ name1, name2 = tokens\n+ complex_q = (\n+ Q(personal__icontains=name1) & Q(family__icontains=name2)\n+ ) | (\n+ Q(personal__icontains=name2) & Q(family__icontains=name1)\n+ )\n+ search_filters.append(complex_q)\n+\n+ # this is brilliant: it applies OR to all search filters\n+ qs = qs.filter(reduce(operator.or_, search_filters))\n+\n+ return qs\n+\n \n @login_required\n class AdminLookup(ModelLookup):\n", "issue": "Tokenize person lookup to work with \"name last_name\"\n`lookups.PersonLookup` won't show 'Piotr Banaszkiewicz' for 'Piotr Ban' input \u2013 because it only looks up `personal` or `family` or `email`\u2026\n\n", "before_files": [{"content": "from django.contrib.auth.models import Group\nfrom django.db.models import Q\n\nfrom selectable.base import ModelLookup\nfrom selectable.registry import registry\nfrom selectable.decorators import login_required\n\nfrom workshops import models\n\n\n@login_required\nclass EventLookup(ModelLookup):\n model = models.Event\n search_fields = ('slug__icontains', )\n\n\n@login_required\nclass HostLookup(ModelLookup):\n model = models.Host\n search_fields = (\n 'domain__icontains',\n 'fullname__icontains'\n )\n\n\n@login_required\nclass PersonLookup(ModelLookup):\n model = models.Person\n search_fields = (\n 'personal__icontains',\n 'family__icontains',\n 'email__icontains',\n 'username__icontains'\n )\n\n\n@login_required\nclass AdminLookup(ModelLookup):\n \"\"\"The same as PersonLookup, but allows only to select administrators.\n\n Administrator is anyone with superuser power or in \"administrators\" group.\n \"\"\"\n model = models.Person\n search_fields = (\n 'personal__icontains',\n 'family__icontains',\n 'email__icontains',\n 'username__icontains'\n )\n\n def get_query(self, request, term):\n results = super().get_query(request, term)\n admin_group = Group.objects.get(name='administrators')\n results = results.filter(\n Q(is_superuser=True) | Q(groups__in=[admin_group])\n )\n return results\n\n\n@login_required\nclass AirportLookup(ModelLookup):\n model = models.Airport\n search_fields = (\n 'iata__icontains',\n 'fullname__icontains'\n )\n\n\nregistry.register(EventLookup)\nregistry.register(HostLookup)\nregistry.register(PersonLookup)\nregistry.register(AdminLookup)\nregistry.register(AirportLookup)\n", "path": "workshops/lookups.py"}]} | 1,103 | 332 |
gh_patches_debug_8564 | rasdani/github-patches | git_diff | comfyanonymous__ComfyUI-2859 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Perp-Neg implementation is wrong, parallel component not ignored
https://github.com/comfyanonymous/ComfyUI/blob/18c151b3e3f6838fab4028e7a8ba526e30e610d3/comfy_extras/nodes_perpneg.py#L38-L40
The Perp-Neg node does not match the [paper](https://arxiv.org/pdf/2304.04968.pdf) (pytorch code in Appendix A.1).
When positive and negative prompt are the same, the result should be the same as an empty negative prompt because the prompts are completely parallel (i.e. there is no perpendicular component).
Positive: "forest"
Negative: ""

Positive: "forest"
Negative: "forest"

I'll submit a PR in a bit.
</issue>
<code>
[start of comfy_extras/nodes_perpneg.py]
1 import torch
2 import comfy.model_management
3 import comfy.sample
4 import comfy.samplers
5 import comfy.utils
6
7
8 class PerpNeg:
9 @classmethod
10 def INPUT_TYPES(s):
11 return {"required": {"model": ("MODEL", ),
12 "empty_conditioning": ("CONDITIONING", ),
13 "neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}),
14 }}
15 RETURN_TYPES = ("MODEL",)
16 FUNCTION = "patch"
17
18 CATEGORY = "_for_testing"
19
20 def patch(self, model, empty_conditioning, neg_scale):
21 m = model.clone()
22 nocond = comfy.sample.convert_cond(empty_conditioning)
23
24 def cfg_function(args):
25 model = args["model"]
26 noise_pred_pos = args["cond_denoised"]
27 noise_pred_neg = args["uncond_denoised"]
28 cond_scale = args["cond_scale"]
29 x = args["input"]
30 sigma = args["sigma"]
31 model_options = args["model_options"]
32 nocond_processed = comfy.samplers.encode_model_conds(model.extra_conds, nocond, x, x.device, "negative")
33
34 (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond_processed, None, x, sigma, model_options)
35
36 pos = noise_pred_pos - noise_pred_nocond
37 neg = noise_pred_neg - noise_pred_nocond
38 perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg
39 perp_neg = perp * neg_scale
40 cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)
41 cfg_result = x - cfg_result
42 return cfg_result
43
44 m.set_model_sampler_cfg_function(cfg_function)
45
46 return (m, )
47
48
49 NODE_CLASS_MAPPINGS = {
50 "PerpNeg": PerpNeg,
51 }
52
53 NODE_DISPLAY_NAME_MAPPINGS = {
54 "PerpNeg": "Perp-Neg",
55 }
56
[end of comfy_extras/nodes_perpneg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py
--- a/comfy_extras/nodes_perpneg.py
+++ b/comfy_extras/nodes_perpneg.py
@@ -35,7 +35,7 @@
pos = noise_pred_pos - noise_pred_nocond
neg = noise_pred_neg - noise_pred_nocond
- perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg
+ perp = neg - ((torch.mul(neg, pos).sum())/(torch.norm(pos)**2)) * pos
perp_neg = perp * neg_scale
cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)
cfg_result = x - cfg_result
| {"golden_diff": "diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py\n--- a/comfy_extras/nodes_perpneg.py\n+++ b/comfy_extras/nodes_perpneg.py\n@@ -35,7 +35,7 @@\n \n pos = noise_pred_pos - noise_pred_nocond\n neg = noise_pred_neg - noise_pred_nocond\n- perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg\n+ perp = neg - ((torch.mul(neg, pos).sum())/(torch.norm(pos)**2)) * pos\n perp_neg = perp * neg_scale\n cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)\n cfg_result = x - cfg_result\n", "issue": "Perp-Neg implementation is wrong, parallel component not ignored\nhttps://github.com/comfyanonymous/ComfyUI/blob/18c151b3e3f6838fab4028e7a8ba526e30e610d3/comfy_extras/nodes_perpneg.py#L38-L40\r\n\r\nThe Perp-Neg node does not match the [paper](https://arxiv.org/pdf/2304.04968.pdf) (pytorch code in Appendix A.1).\r\nWhen positive and negative prompt are the same, the result should be the same as an empty negative prompt because the prompts are completely parallel (i.e. there is no perpendicular component).\r\n\r\nPositive: \"forest\"\r\nNegative: \"\"\r\n\r\n\r\nPositive: \"forest\"\r\nNegative: \"forest\"\r\n\r\n\r\nI'll submit a PR in a bit.\n", "before_files": [{"content": "import torch\nimport comfy.model_management\nimport comfy.sample\nimport comfy.samplers\nimport comfy.utils\n\n\nclass PerpNeg:\n @classmethod\n def INPUT_TYPES(s):\n return {\"required\": {\"model\": (\"MODEL\", ),\n \"empty_conditioning\": (\"CONDITIONING\", ),\n \"neg_scale\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 100.0}),\n }}\n RETURN_TYPES = (\"MODEL\",)\n FUNCTION = \"patch\"\n\n CATEGORY = \"_for_testing\"\n\n def patch(self, model, empty_conditioning, neg_scale):\n m = model.clone()\n nocond = comfy.sample.convert_cond(empty_conditioning)\n\n def cfg_function(args):\n model = args[\"model\"]\n noise_pred_pos = args[\"cond_denoised\"]\n noise_pred_neg = args[\"uncond_denoised\"]\n cond_scale = args[\"cond_scale\"]\n x = args[\"input\"]\n sigma = args[\"sigma\"]\n model_options = args[\"model_options\"]\n nocond_processed = comfy.samplers.encode_model_conds(model.extra_conds, nocond, x, x.device, \"negative\")\n\n (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond_processed, None, x, sigma, model_options)\n\n pos = noise_pred_pos - noise_pred_nocond\n neg = noise_pred_neg - noise_pred_nocond\n perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg\n perp_neg = perp * neg_scale\n cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)\n cfg_result = x - cfg_result\n return cfg_result\n\n m.set_model_sampler_cfg_function(cfg_function)\n\n return (m, )\n\n\nNODE_CLASS_MAPPINGS = {\n \"PerpNeg\": PerpNeg,\n}\n\nNODE_DISPLAY_NAME_MAPPINGS = {\n \"PerpNeg\": \"Perp-Neg\",\n}\n", "path": "comfy_extras/nodes_perpneg.py"}]} | 1,407 | 180 |
gh_patches_debug_6676 | rasdani/github-patches | git_diff | streamlink__streamlink-1513 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fox.com.tr not work with Streamlink
## **Checklist**
- [x] This is a bug report.
- [ ] This is a feature request.
- [ ] ] This is a plugin (improvement) request.
- [ ] I have read the contribution guidelines.
## **Description**
i cant see anything at fox.com.tr
i have test it with this links but i became black screen
## **Reproduction steps / Explicit stream URLs to test**
https://www.fox.com.tr/canli-yayin
</issue>
<code>
[start of src/streamlink/plugins/foxtr.py]
1 from __future__ import print_function
2 import re
3
4 from streamlink.plugin import Plugin
5 from streamlink.plugin.api import http
6 from streamlink.plugin.api import validate
7 from streamlink.stream import HLSStream
8
9
10 class FoxTR(Plugin):
11 """
12 Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin
13 """
14 url_re = re.compile(r"https?://www.fox.com.tr/canli-yayin")
15 playervars_re = re.compile(r"desktop\s*:\s*\[\s*\{\s*src\s*:\s*'(.*?)'", re.DOTALL)
16
17 @classmethod
18 def can_handle_url(cls, url):
19 return cls.url_re.match(url) is not None
20
21 def _get_streams(self):
22 res = http.get(self.url)
23 match = self.playervars_re.search(res.text)
24 if match:
25 stream_url = match.group(1)
26 return HLSStream.parse_variant_playlist(self.session, stream_url)
27
28
29 __plugin__ = FoxTR
30
[end of src/streamlink/plugins/foxtr.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/foxtr.py b/src/streamlink/plugins/foxtr.py
--- a/src/streamlink/plugins/foxtr.py
+++ b/src/streamlink/plugins/foxtr.py
@@ -12,7 +12,7 @@
Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin
"""
url_re = re.compile(r"https?://www.fox.com.tr/canli-yayin")
- playervars_re = re.compile(r"desktop\s*:\s*\[\s*\{\s*src\s*:\s*'(.*?)'", re.DOTALL)
+ playervars_re = re.compile(r"source\s*:\s*\[\s*\{\s*videoSrc\s*:\s*'(.*?)'", re.DOTALL)
@classmethod
def can_handle_url(cls, url):
| {"golden_diff": "diff --git a/src/streamlink/plugins/foxtr.py b/src/streamlink/plugins/foxtr.py\n--- a/src/streamlink/plugins/foxtr.py\n+++ b/src/streamlink/plugins/foxtr.py\n@@ -12,7 +12,7 @@\n Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin\n \"\"\"\n url_re = re.compile(r\"https?://www.fox.com.tr/canli-yayin\")\n- playervars_re = re.compile(r\"desktop\\s*:\\s*\\[\\s*\\{\\s*src\\s*:\\s*'(.*?)'\", re.DOTALL)\n+ playervars_re = re.compile(r\"source\\s*:\\s*\\[\\s*\\{\\s*videoSrc\\s*:\\s*'(.*?)'\", re.DOTALL)\n \n @classmethod\n def can_handle_url(cls, url):\n", "issue": "Fox.com.tr not work with Streamlink\n## **Checklist**\r\n\r\n- [x] This is a bug report.\r\n- [ ] This is a feature request.\r\n- [ ] ] This is a plugin (improvement) request.\r\n- [ ] I have read the contribution guidelines.\r\n\r\n## **Description**\r\n\r\n i cant see anything at fox.com.tr \r\n i have test it with this links but i became black screen \r\n\r\n## **Reproduction steps / Explicit stream URLs to test**\r\n\r\nhttps://www.fox.com.tr/canli-yayin\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass FoxTR(Plugin):\n \"\"\"\n Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin\n \"\"\"\n url_re = re.compile(r\"https?://www.fox.com.tr/canli-yayin\")\n playervars_re = re.compile(r\"desktop\\s*:\\s*\\[\\s*\\{\\s*src\\s*:\\s*'(.*?)'\", re.DOTALL)\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n res = http.get(self.url)\n match = self.playervars_re.search(res.text)\n if match:\n stream_url = match.group(1)\n return HLSStream.parse_variant_playlist(self.session, stream_url)\n\n\n__plugin__ = FoxTR\n", "path": "src/streamlink/plugins/foxtr.py"}]} | 931 | 192 |
gh_patches_debug_2182 | rasdani/github-patches | git_diff | modin-project__modin-4769 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
test_join_sort.py in CI failed by timeout with new Dask release - 2022.2.0
Error: https://github.com/modin-project/modin/runs/5195622251?check_suite_focus=true
Dask release - https://github.com/dask/dask/releases/tag/2022.02.0
Fastest option here - pin `dask<2022.2.0`, but it also requires an investigation into the cause.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 import versioneer
3 import sys
4
5 PANDAS_VERSION = "1.4.3" if sys.version_info >= (3, 8) else "1.1.5"
6
7 with open("README.md", "r", encoding="utf-8") as fh:
8 long_description = fh.read()
9
10 dask_deps = ["dask>=2.22.0,<2022.2.0", "distributed>=2.22.0,<2022.2.0"]
11 if sys.version_info < (3, 8):
12 dask_deps.append("pickle5")
13
14 ray_deps = [
15 "ray[default]>=1.4.0",
16 "pyarrow>=4.0.1",
17 "redis>=3.5.0,<4.0.0",
18 ]
19 remote_deps = ["rpyc==4.1.5", "cloudpickle", "boto3"]
20 spreadsheet_deps = ["modin-spreadsheet>=0.1.0"]
21 sql_deps = ["dfsql>=0.4.2", "pyparsing<=2.4.7"]
22 all_deps = dask_deps + ray_deps + remote_deps + spreadsheet_deps
23
24 # Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.
25 # This file provides the "import pandas before Ray init" feature if specific
26 # environment variable is set (see https://github.com/modin-project/modin/issues/4564).
27 cmdclass = versioneer.get_cmdclass()
28 extra_files = ["modin-autoimport-pandas.pth"]
29
30
31 class AddPthFileBuild(cmdclass["build_py"]):
32 def _get_data_files(self):
33 return (super()._get_data_files() or []) + [
34 (".", ".", self.build_lib, extra_files)
35 ]
36
37
38 class AddPthFileSDist(cmdclass["sdist"]):
39 def make_distribution(self):
40 self.filelist.extend(extra_files)
41 return super().make_distribution()
42
43
44 cmdclass["build_py"] = AddPthFileBuild
45 cmdclass["sdist"] = AddPthFileSDist
46
47 setup(
48 name="modin",
49 version=versioneer.get_version(),
50 cmdclass=cmdclass,
51 description="Modin: Make your pandas code run faster by changing one line of code.",
52 packages=find_packages(exclude=["scripts", "scripts.*"]),
53 include_package_data=True,
54 license="Apache 2",
55 url="https://github.com/modin-project/modin",
56 long_description=long_description,
57 long_description_content_type="text/markdown",
58 install_requires=[f"pandas=={PANDAS_VERSION}", "packaging", "numpy>=1.18.5", "fsspec", "psutil"],
59 extras_require={
60 # can be installed by pip install modin[dask]
61 "dask": dask_deps,
62 "ray": ray_deps,
63 "remote": remote_deps,
64 "spreadsheet": spreadsheet_deps,
65 "sql": sql_deps,
66 "all": all_deps,
67 },
68 python_requires=">=3.6",
69 )
70
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
-dask_deps = ["dask>=2.22.0,<2022.2.0", "distributed>=2.22.0,<2022.2.0"]
+dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"]
if sys.version_info < (3, 8):
dask_deps.append("pickle5")
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,7 +7,7 @@\n with open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n \n-dask_deps = [\"dask>=2.22.0,<2022.2.0\", \"distributed>=2.22.0,<2022.2.0\"]\n+dask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\n if sys.version_info < (3, 8):\n dask_deps.append(\"pickle5\")\n", "issue": "test_join_sort.py in CI failed by timeout with new Dask release - 2022.2.0\nError: https://github.com/modin-project/modin/runs/5195622251?check_suite_focus=true\r\n\r\nDask release - https://github.com/dask/dask/releases/tag/2022.02.0\r\n\r\nFastest option here - pin `dask<2022.2.0`, but it also requires an investigation into the cause.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\nimport sys\n\nPANDAS_VERSION = \"1.4.3\" if sys.version_info >= (3, 8) else \"1.1.5\"\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.22.0,<2022.2.0\", \"distributed>=2.22.0,<2022.2.0\"]\nif sys.version_info < (3, 8):\n dask_deps.append(\"pickle5\")\n\nray_deps = [\n \"ray[default]>=1.4.0\",\n \"pyarrow>=4.0.1\",\n \"redis>=3.5.0,<4.0.0\",\n]\nremote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\nspreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\nsql_deps = [\"dfsql>=0.4.2\", \"pyparsing<=2.4.7\"]\nall_deps = dask_deps + ray_deps + remote_deps + spreadsheet_deps\n\n# Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.\n# This file provides the \"import pandas before Ray init\" feature if specific\n# environment variable is set (see https://github.com/modin-project/modin/issues/4564).\ncmdclass = versioneer.get_cmdclass()\nextra_files = [\"modin-autoimport-pandas.pth\"]\n\n\nclass AddPthFileBuild(cmdclass[\"build_py\"]):\n def _get_data_files(self):\n return (super()._get_data_files() or []) + [\n (\".\", \".\", self.build_lib, extra_files)\n ]\n\n\nclass AddPthFileSDist(cmdclass[\"sdist\"]):\n def make_distribution(self):\n self.filelist.extend(extra_files)\n return super().make_distribution()\n\n\ncmdclass[\"build_py\"] = AddPthFileBuild\ncmdclass[\"sdist\"] = AddPthFileSDist\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=cmdclass,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(exclude=[\"scripts\", \"scripts.*\"]),\n include_package_data=True,\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[f\"pandas=={PANDAS_VERSION}\", \"packaging\", \"numpy>=1.18.5\", \"fsspec\", \"psutil\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"remote\": remote_deps,\n \"spreadsheet\": spreadsheet_deps,\n \"sql\": sql_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.6\",\n)\n", "path": "setup.py"}]} | 1,438 | 145 |
gh_patches_debug_33208 | rasdani/github-patches | git_diff | Azure__azure-cli-extensions-3135 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Transition to GA: costmanagement
Command module `costmanagement` has been released for a long time and is using stable sdk version `2019-11-01`.
Please check [Extension GA guidelines](https://github.com/Azure/azure-cli/blob/dev/doc/onboarding_guide.md#preview-extension-to-ga-extension) and remove `experimental` tag if necessary.
</issue>
<code>
[start of src/costmanagement/setup.py]
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8
9 from codecs import open
10 from setuptools import setup, find_packages
11
12 # TODO: Confirm this is the right version number you want and it matches your
13 # HISTORY.rst entry.
14 VERSION = '0.1.0'
15
16 # The full list of classifiers is available at
17 # https://pypi.python.org/pypi?%3Aaction=list_classifiers
18 CLASSIFIERS = [
19 'Development Status :: 4 - Beta',
20 'Intended Audience :: Developers',
21 'Intended Audience :: System Administrators',
22 'Programming Language :: Python',
23 'Programming Language :: Python :: 3',
24 'Programming Language :: Python :: 3.6',
25 'Programming Language :: Python :: 3.7',
26 'Programming Language :: Python :: 3.8',
27 'License :: OSI Approved :: MIT License',
28 ]
29
30 # TODO: Add any additional SDK dependencies here
31 DEPENDENCIES = []
32
33 with open('README.md', 'r', encoding='utf-8') as f:
34 README = f.read()
35 with open('HISTORY.rst', 'r', encoding='utf-8') as f:
36 HISTORY = f.read()
37
38 setup(
39 name='costmanagement',
40 version=VERSION,
41 description='Microsoft Azure Command-Line Tools CostManagementClient Extension',
42 # TODO: Update author and email, if applicable
43 author='Microsoft Corporation',
44 author_email='[email protected]',
45 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/costmanagement',
46 long_description=README + '\n\n' + HISTORY,
47 license='MIT',
48 classifiers=CLASSIFIERS,
49 packages=find_packages(),
50 install_requires=DEPENDENCIES,
51 package_data={'azext_costmanagement': ['azext_metadata.json']},
52 )
53
[end of src/costmanagement/setup.py]
[start of src/costmanagement/azext_costmanagement/manual/commands.py]
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 from azure.cli.core.commands import CliCommandType
7
8
9 def load_command_table(self, _):
10
11 from azext_costmanagement.generated._client_factory import cf_query
12 costmanagement_query = CliCommandType(
13 operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations.'
14 '_query_operations#QueryOperations.{}',
15 client_factory=cf_query)
16 with self.command_group('costmanagement', costmanagement_query,
17 client_factory=cf_query, is_experimental=True) as g:
18 g.custom_command('query', 'costmanagement_query')
19
20 from azext_costmanagement.generated._client_factory import cf_export
21 costmanagement_export = CliCommandType(
22 operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations._export_operations#ExportOperatio'
23 'ns.{}',
24 client_factory=cf_export)
25 with self.command_group('costmanagement export', costmanagement_export, client_factory=cf_export,
26 is_experimental=True) as g:
27 g.custom_command('list', 'costmanagement_export_list')
28 g.custom_show_command('show', 'costmanagement_export_show')
29 g.custom_command('create', 'costmanagement_export_create')
30 g.custom_command('update', 'costmanagement_export_update')
31 g.custom_command('delete', 'costmanagement_export_delete', confirmation=True)
32
[end of src/costmanagement/azext_costmanagement/manual/commands.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/costmanagement/azext_costmanagement/manual/commands.py b/src/costmanagement/azext_costmanagement/manual/commands.py
--- a/src/costmanagement/azext_costmanagement/manual/commands.py
+++ b/src/costmanagement/azext_costmanagement/manual/commands.py
@@ -13,8 +13,7 @@
operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations.'
'_query_operations#QueryOperations.{}',
client_factory=cf_query)
- with self.command_group('costmanagement', costmanagement_query,
- client_factory=cf_query, is_experimental=True) as g:
+ with self.command_group('costmanagement', costmanagement_query, client_factory=cf_query) as g:
g.custom_command('query', 'costmanagement_query')
from azext_costmanagement.generated._client_factory import cf_export
@@ -22,8 +21,7 @@
operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations._export_operations#ExportOperatio'
'ns.{}',
client_factory=cf_export)
- with self.command_group('costmanagement export', costmanagement_export, client_factory=cf_export,
- is_experimental=True) as g:
+ with self.command_group('costmanagement export', costmanagement_export, client_factory=cf_export) as g:
g.custom_command('list', 'costmanagement_export_list')
g.custom_show_command('show', 'costmanagement_export_show')
g.custom_command('create', 'costmanagement_export_create')
diff --git a/src/costmanagement/setup.py b/src/costmanagement/setup.py
--- a/src/costmanagement/setup.py
+++ b/src/costmanagement/setup.py
@@ -11,7 +11,7 @@
# TODO: Confirm this is the right version number you want and it matches your
# HISTORY.rst entry.
-VERSION = '0.1.0'
+VERSION = '0.1.1'
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
| {"golden_diff": "diff --git a/src/costmanagement/azext_costmanagement/manual/commands.py b/src/costmanagement/azext_costmanagement/manual/commands.py\n--- a/src/costmanagement/azext_costmanagement/manual/commands.py\n+++ b/src/costmanagement/azext_costmanagement/manual/commands.py\n@@ -13,8 +13,7 @@\n operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations.'\n '_query_operations#QueryOperations.{}',\n client_factory=cf_query)\n- with self.command_group('costmanagement', costmanagement_query,\n- client_factory=cf_query, is_experimental=True) as g:\n+ with self.command_group('costmanagement', costmanagement_query, client_factory=cf_query) as g:\n g.custom_command('query', 'costmanagement_query')\n \n from azext_costmanagement.generated._client_factory import cf_export\n@@ -22,8 +21,7 @@\n operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations._export_operations#ExportOperatio'\n 'ns.{}',\n client_factory=cf_export)\n- with self.command_group('costmanagement export', costmanagement_export, client_factory=cf_export,\n- is_experimental=True) as g:\n+ with self.command_group('costmanagement export', costmanagement_export, client_factory=cf_export) as g:\n g.custom_command('list', 'costmanagement_export_list')\n g.custom_show_command('show', 'costmanagement_export_show')\n g.custom_command('create', 'costmanagement_export_create')\ndiff --git a/src/costmanagement/setup.py b/src/costmanagement/setup.py\n--- a/src/costmanagement/setup.py\n+++ b/src/costmanagement/setup.py\n@@ -11,7 +11,7 @@\n \r\n # TODO: Confirm this is the right version number you want and it matches your\r\n # HISTORY.rst entry.\r\n-VERSION = '0.1.0'\r\n+VERSION = '0.1.1'\r\n \r\n # The full list of classifiers is available at\r\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n", "issue": "Transition to GA: costmanagement\nCommand module `costmanagement` has been released for a long time and is using stable sdk version `2019-11-01`.\r\n\r\nPlease check [Extension GA guidelines](https://github.com/Azure/azure-cli/blob/dev/doc/onboarding_guide.md#preview-extension-to-ga-extension) and remove `experimental` tag if necessary.\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\r\n\r\n# --------------------------------------------------------------------------------------------\r\n# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License. See License.txt in the project root for license information.\r\n# --------------------------------------------------------------------------------------------\r\n\r\n\r\nfrom codecs import open\r\nfrom setuptools import setup, find_packages\r\n\r\n# TODO: Confirm this is the right version number you want and it matches your\r\n# HISTORY.rst entry.\r\nVERSION = '0.1.0'\r\n\r\n# The full list of classifiers is available at\r\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\r\nCLASSIFIERS = [\r\n 'Development Status :: 4 - Beta',\r\n 'Intended Audience :: Developers',\r\n 'Intended Audience :: System Administrators',\r\n 'Programming Language :: Python',\r\n 'Programming Language :: Python :: 3',\r\n 'Programming Language :: Python :: 3.6',\r\n 'Programming Language :: Python :: 3.7',\r\n 'Programming Language :: Python :: 3.8',\r\n 'License :: OSI Approved :: MIT License',\r\n]\r\n\r\n# TODO: Add any additional SDK dependencies here\r\nDEPENDENCIES = []\r\n\r\nwith open('README.md', 'r', encoding='utf-8') as f:\r\n README = f.read()\r\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\r\n HISTORY = f.read()\r\n\r\nsetup(\r\n name='costmanagement',\r\n version=VERSION,\r\n description='Microsoft Azure Command-Line Tools CostManagementClient Extension',\r\n # TODO: Update author and email, if applicable\r\n author='Microsoft Corporation',\r\n author_email='[email protected]',\r\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/costmanagement',\r\n long_description=README + '\\n\\n' + HISTORY,\r\n license='MIT',\r\n classifiers=CLASSIFIERS,\r\n packages=find_packages(),\r\n install_requires=DEPENDENCIES,\r\n package_data={'azext_costmanagement': ['azext_metadata.json']},\r\n)\r\n", "path": "src/costmanagement/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom azure.cli.core.commands import CliCommandType\n\n\ndef load_command_table(self, _):\n\n from azext_costmanagement.generated._client_factory import cf_query\n costmanagement_query = CliCommandType(\n operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations.'\n '_query_operations#QueryOperations.{}',\n client_factory=cf_query)\n with self.command_group('costmanagement', costmanagement_query,\n client_factory=cf_query, is_experimental=True) as g:\n g.custom_command('query', 'costmanagement_query')\n\n from azext_costmanagement.generated._client_factory import cf_export\n costmanagement_export = CliCommandType(\n operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations._export_operations#ExportOperatio'\n 'ns.{}',\n client_factory=cf_export)\n with self.command_group('costmanagement export', costmanagement_export, client_factory=cf_export,\n is_experimental=True) as g:\n g.custom_command('list', 'costmanagement_export_list')\n g.custom_show_command('show', 'costmanagement_export_show')\n g.custom_command('create', 'costmanagement_export_create')\n g.custom_command('update', 'costmanagement_export_update')\n g.custom_command('delete', 'costmanagement_export_delete', confirmation=True)\n", "path": "src/costmanagement/azext_costmanagement/manual/commands.py"}]} | 1,540 | 458 |
gh_patches_debug_38816 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3454 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider valero is broken
During the global build at 2021-07-14-14-42-22, spider **valero** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/logs/valero.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/valero.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/valero.geojson))
</issue>
<code>
[start of locations/spiders/valero.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 from locations.items import GeojsonPointItem
5
6
7 class ValeroSpider(scrapy.Spider):
8 name = "valero"
9 item_attributes = {'brand': "Valero", 'brand_wikidata': 'Q1283291'}
10 allowed_domains = ["valeromaps.valero.com"]
11
12 def start_requests(self):
13 yield scrapy.FormRequest(
14 'https://valeromaps.valero.com/Home/Search?SPHostUrl=https:%2F%2Fwww.valero.com%2Fen-us',
15 method='POST',
16 headers={
17 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
18 },
19 formdata={
20 'NEBound_Lat': '90',
21 'NEBound_Long': '180',
22 'SWBound_Lat': '-90',
23 'SWBound_Long': '-180',
24 'center_Lat': '0',
25 'center_Long': '0'
26 }
27 )
28
29 def parse(self, response):
30 result = json.loads(response.body_as_unicode())
31 for store in result['StoreList']:
32 details = ', '.join([d['DetailName'] for d in store['Details']])
33 yield GeojsonPointItem(
34 lon=store['Longitude'],
35 lat=store['Latitude'],
36 ref=store['UniqueID'],
37 name=store['StationName'],
38 addr_full=store['Address'],
39 phone=store['Phone'],
40 opening_hours='24/7' if '24 Hours' in details else None,
41 extras={
42 'amenity:fuel': True,
43 'amenity:toilets': 'Restroom' in details or None,
44 'atm': 'ATM' in details,
45 'car_wash': 'Car Wash' in details,
46 'fuel:diesel': 'Diesel' in details or None,
47 'fuel:e85': 'E-85' in details or None,
48 }
49 )
50
[end of locations/spiders/valero.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/valero.py b/locations/spiders/valero.py
--- a/locations/spiders/valero.py
+++ b/locations/spiders/valero.py
@@ -1,49 +1,39 @@
# -*- coding: utf-8 -*-
import scrapy
import json
+
from locations.items import GeojsonPointItem
class ValeroSpider(scrapy.Spider):
name = "valero"
- item_attributes = {'brand': "Valero", 'brand_wikidata': 'Q1283291'}
- allowed_domains = ["valeromaps.valero.com"]
-
- def start_requests(self):
- yield scrapy.FormRequest(
- 'https://valeromaps.valero.com/Home/Search?SPHostUrl=https:%2F%2Fwww.valero.com%2Fen-us',
- method='POST',
- headers={
- 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
- },
- formdata={
- 'NEBound_Lat': '90',
- 'NEBound_Long': '180',
- 'SWBound_Lat': '-90',
- 'SWBound_Long': '-180',
- 'center_Lat': '0',
- 'center_Long': '0'
- }
- )
+ item_attributes = {"brand": "Valero", "brand_wikidata": "Q1283291"}
+ allowed_domains = ["valero.com"]
+ start_urls = ["https://locations.valero.com/sitemap.xml"]
def parse(self, response):
- result = json.loads(response.body_as_unicode())
- for store in result['StoreList']:
- details = ', '.join([d['DetailName'] for d in store['Details']])
- yield GeojsonPointItem(
- lon=store['Longitude'],
- lat=store['Latitude'],
- ref=store['UniqueID'],
- name=store['StationName'],
- addr_full=store['Address'],
- phone=store['Phone'],
- opening_hours='24/7' if '24 Hours' in details else None,
- extras={
- 'amenity:fuel': True,
- 'amenity:toilets': 'Restroom' in details or None,
- 'atm': 'ATM' in details,
- 'car_wash': 'Car Wash' in details,
- 'fuel:diesel': 'Diesel' in details or None,
- 'fuel:e85': 'E-85' in details or None,
- }
- )
+ response.selector.remove_namespaces()
+ for url in response.xpath("//loc/text()").extract():
+ yield scrapy.Request(url, callback=self.parse_store)
+
+ def parse_store(self, response):
+ amenities = [s.strip() for s in response.xpath('//div[@class="amenityIconLabel"]/text()').extract()]
+ properties = {
+ "lat": response.xpath('//meta[@property="place:location:latitude"]/@content').get(),
+ "lon": response.xpath('//meta[@property="place:location:longitude"]/@content').get(),
+ "ref": response.url.rsplit("/", 1)[-1],
+ "website": response.url,
+ "name": response.xpath('normalize-space(//*[@id="pageTitleStoreName"])').get(),
+ "addr_full": response.xpath('normalize-space(//div[@class="locationDetailsContactRow"][1]//br/..)').get(),
+ "phone": response.xpath('//a[contains(@href,"tel:")]/text()').get(),
+ "opening_hours": "24/7" if "24 Hour" in amenities else None,
+ "extras": {
+ "atm": "ATM" in amenities,
+ "amenity:fuel": True,
+ "amenity:toilets": "Public Restroom" in amenities or None,
+ "car_wash": "Car Wash" in amenities,
+ "fuel:diesel": "Diesel" in amenities or None,
+ "fuel:e85": "E-85" in amenities or None,
+ },
+ }
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/valero.py b/locations/spiders/valero.py\n--- a/locations/spiders/valero.py\n+++ b/locations/spiders/valero.py\n@@ -1,49 +1,39 @@\n # -*- coding: utf-8 -*-\n import scrapy\n import json\n+\n from locations.items import GeojsonPointItem\n \n \n class ValeroSpider(scrapy.Spider):\n name = \"valero\"\n- item_attributes = {'brand': \"Valero\", 'brand_wikidata': 'Q1283291'}\n- allowed_domains = [\"valeromaps.valero.com\"]\n-\n- def start_requests(self):\n- yield scrapy.FormRequest(\n- 'https://valeromaps.valero.com/Home/Search?SPHostUrl=https:%2F%2Fwww.valero.com%2Fen-us',\n- method='POST',\n- headers={\n- 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'\n- },\n- formdata={\n- 'NEBound_Lat': '90',\n- 'NEBound_Long': '180',\n- 'SWBound_Lat': '-90',\n- 'SWBound_Long': '-180',\n- 'center_Lat': '0',\n- 'center_Long': '0'\n- }\n- )\n+ item_attributes = {\"brand\": \"Valero\", \"brand_wikidata\": \"Q1283291\"}\n+ allowed_domains = [\"valero.com\"]\n+ start_urls = [\"https://locations.valero.com/sitemap.xml\"]\n \n def parse(self, response):\n- result = json.loads(response.body_as_unicode())\n- for store in result['StoreList']:\n- details = ', '.join([d['DetailName'] for d in store['Details']])\n- yield GeojsonPointItem(\n- lon=store['Longitude'],\n- lat=store['Latitude'],\n- ref=store['UniqueID'],\n- name=store['StationName'],\n- addr_full=store['Address'],\n- phone=store['Phone'],\n- opening_hours='24/7' if '24 Hours' in details else None,\n- extras={\n- 'amenity:fuel': True,\n- 'amenity:toilets': 'Restroom' in details or None,\n- 'atm': 'ATM' in details,\n- 'car_wash': 'Car Wash' in details,\n- 'fuel:diesel': 'Diesel' in details or None,\n- 'fuel:e85': 'E-85' in details or None,\n- }\n- )\n+ response.selector.remove_namespaces()\n+ for url in response.xpath(\"//loc/text()\").extract():\n+ yield scrapy.Request(url, callback=self.parse_store)\n+\n+ def parse_store(self, response):\n+ amenities = [s.strip() for s in response.xpath('//div[@class=\"amenityIconLabel\"]/text()').extract()]\n+ properties = {\n+ \"lat\": response.xpath('//meta[@property=\"place:location:latitude\"]/@content').get(),\n+ \"lon\": response.xpath('//meta[@property=\"place:location:longitude\"]/@content').get(),\n+ \"ref\": response.url.rsplit(\"/\", 1)[-1],\n+ \"website\": response.url,\n+ \"name\": response.xpath('normalize-space(//*[@id=\"pageTitleStoreName\"])').get(),\n+ \"addr_full\": response.xpath('normalize-space(//div[@class=\"locationDetailsContactRow\"][1]//br/..)').get(),\n+ \"phone\": response.xpath('//a[contains(@href,\"tel:\")]/text()').get(),\n+ \"opening_hours\": \"24/7\" if \"24 Hour\" in amenities else None,\n+ \"extras\": {\n+ \"atm\": \"ATM\" in amenities,\n+ \"amenity:fuel\": True,\n+ \"amenity:toilets\": \"Public Restroom\" in amenities or None,\n+ \"car_wash\": \"Car Wash\" in amenities,\n+ \"fuel:diesel\": \"Diesel\" in amenities or None,\n+ \"fuel:e85\": \"E-85\" in amenities or None,\n+ },\n+ }\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider valero is broken\nDuring the global build at 2021-07-14-14-42-22, spider **valero** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/logs/valero.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/valero.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/valero.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom locations.items import GeojsonPointItem\n\n\nclass ValeroSpider(scrapy.Spider):\n name = \"valero\"\n item_attributes = {'brand': \"Valero\", 'brand_wikidata': 'Q1283291'}\n allowed_domains = [\"valeromaps.valero.com\"]\n\n def start_requests(self):\n yield scrapy.FormRequest(\n 'https://valeromaps.valero.com/Home/Search?SPHostUrl=https:%2F%2Fwww.valero.com%2Fen-us',\n method='POST',\n headers={\n 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'\n },\n formdata={\n 'NEBound_Lat': '90',\n 'NEBound_Long': '180',\n 'SWBound_Lat': '-90',\n 'SWBound_Long': '-180',\n 'center_Lat': '0',\n 'center_Long': '0'\n }\n )\n\n def parse(self, response):\n result = json.loads(response.body_as_unicode())\n for store in result['StoreList']:\n details = ', '.join([d['DetailName'] for d in store['Details']])\n yield GeojsonPointItem(\n lon=store['Longitude'],\n lat=store['Latitude'],\n ref=store['UniqueID'],\n name=store['StationName'],\n addr_full=store['Address'],\n phone=store['Phone'],\n opening_hours='24/7' if '24 Hours' in details else None,\n extras={\n 'amenity:fuel': True,\n 'amenity:toilets': 'Restroom' in details or None,\n 'atm': 'ATM' in details,\n 'car_wash': 'Car Wash' in details,\n 'fuel:diesel': 'Diesel' in details or None,\n 'fuel:e85': 'E-85' in details or None,\n }\n )\n", "path": "locations/spiders/valero.py"}]} | 1,248 | 939 |
gh_patches_debug_14564 | rasdani/github-patches | git_diff | WordPress__openverse-api-477 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Distinguish between staging & prod when sending data refresh slack updates
## Problem
<!-- Describe a problem solved by this feature; or delete the section entirely. -->
It's not easy to tell based on the slack messages produced by the ingestion server whether a given ingestion is being run in production or staging.
## Description
<!-- Describe the feature and how it solves the problem. -->
We should pre-pend the messages (or change the bot's name) based on which environment the data refresh is running in.
## Alternatives
<!-- Describe any alternative solutions or features you have considered. How is this feature better? -->
We _could_ turn off messages altogether for staging (how it is currently) but that would make it harder for us to know when a staging data refresh is complete.
## Additional context
<!-- Add any other context about the feature here; or delete the section entirely. -->
## Implementation
<!-- Replace the [ ] with [x] to check the box. -->
- [x] 🙋 I would be interested in implementing this feature.
</issue>
<code>
[start of ingestion_server/ingestion_server/slack.py]
1 import logging
2 import os
3
4 import requests
5
6
7 log = logging.getLogger(__name__)
8 SLACK_WEBHOOK = "SLACK_WEBHOOK"
9
10
11 def message(text: str, summary: str = None) -> None:
12 """
13 Send a Slack message to a channel specified by a Slack webhook variable.
14
15 A message is only sent if the SLACK_WEBHOOK environment variable is undefined.
16 """
17 if not (webhook := os.getenv(SLACK_WEBHOOK)):
18 log.debug(
19 f"{SLACK_WEBHOOK} variable not defined, skipping slack message: {text}"
20 )
21 return
22 if not summary:
23 if "\n" in text:
24 summary = "Ingestion server message"
25 else:
26 summary = text
27
28 data = {
29 "blocks": [{"text": {"text": text, "type": "mrkdwn"}, "type": "section"}],
30 "text": summary,
31 "username": "Data Refresh Notification",
32 "icon_emoji": "arrows_counterclockwise",
33 }
34 try:
35 requests.post(webhook, json=data)
36 except Exception as err:
37 log.exception(f"Unable to issue slack message: {err}")
38 pass
39
[end of ingestion_server/ingestion_server/slack.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ingestion_server/ingestion_server/slack.py b/ingestion_server/ingestion_server/slack.py
--- a/ingestion_server/ingestion_server/slack.py
+++ b/ingestion_server/ingestion_server/slack.py
@@ -2,6 +2,7 @@
import os
import requests
+from decouple import config
log = logging.getLogger(__name__)
@@ -25,10 +26,12 @@
else:
summary = text
+ environment = config("ENVIRONMENT", default="local")
+
data = {
"blocks": [{"text": {"text": text, "type": "mrkdwn"}, "type": "section"}],
"text": summary,
- "username": "Data Refresh Notification",
+ "username": f"Data Refresh Notification | {environment.upper()}",
"icon_emoji": "arrows_counterclockwise",
}
try:
| {"golden_diff": "diff --git a/ingestion_server/ingestion_server/slack.py b/ingestion_server/ingestion_server/slack.py\n--- a/ingestion_server/ingestion_server/slack.py\n+++ b/ingestion_server/ingestion_server/slack.py\n@@ -2,6 +2,7 @@\n import os\n \n import requests\n+from decouple import config\n \n \n log = logging.getLogger(__name__)\n@@ -25,10 +26,12 @@\n else:\n summary = text\n \n+ environment = config(\"ENVIRONMENT\", default=\"local\")\n+\n data = {\n \"blocks\": [{\"text\": {\"text\": text, \"type\": \"mrkdwn\"}, \"type\": \"section\"}],\n \"text\": summary,\n- \"username\": \"Data Refresh Notification\",\n+ \"username\": f\"Data Refresh Notification | {environment.upper()}\",\n \"icon_emoji\": \"arrows_counterclockwise\",\n }\n try:\n", "issue": "Distinguish between staging & prod when sending data refresh slack updates\n## Problem\r\n<!-- Describe a problem solved by this feature; or delete the section entirely. -->\r\nIt's not easy to tell based on the slack messages produced by the ingestion server whether a given ingestion is being run in production or staging.\r\n\r\n## Description\r\n<!-- Describe the feature and how it solves the problem. -->\r\nWe should pre-pend the messages (or change the bot's name) based on which environment the data refresh is running in. \r\n\r\n## Alternatives\r\n<!-- Describe any alternative solutions or features you have considered. How is this feature better? -->\r\nWe _could_ turn off messages altogether for staging (how it is currently) but that would make it harder for us to know when a staging data refresh is complete.\r\n\r\n## Additional context\r\n<!-- Add any other context about the feature here; or delete the section entirely. -->\r\n\r\n## Implementation\r\n<!-- Replace the [ ] with [x] to check the box. -->\r\n- [x] \ud83d\ude4b I would be interested in implementing this feature.\r\n\n", "before_files": [{"content": "import logging\nimport os\n\nimport requests\n\n\nlog = logging.getLogger(__name__)\nSLACK_WEBHOOK = \"SLACK_WEBHOOK\"\n\n\ndef message(text: str, summary: str = None) -> None:\n \"\"\"\n Send a Slack message to a channel specified by a Slack webhook variable.\n\n A message is only sent if the SLACK_WEBHOOK environment variable is undefined.\n \"\"\"\n if not (webhook := os.getenv(SLACK_WEBHOOK)):\n log.debug(\n f\"{SLACK_WEBHOOK} variable not defined, skipping slack message: {text}\"\n )\n return\n if not summary:\n if \"\\n\" in text:\n summary = \"Ingestion server message\"\n else:\n summary = text\n\n data = {\n \"blocks\": [{\"text\": {\"text\": text, \"type\": \"mrkdwn\"}, \"type\": \"section\"}],\n \"text\": summary,\n \"username\": \"Data Refresh Notification\",\n \"icon_emoji\": \"arrows_counterclockwise\",\n }\n try:\n requests.post(webhook, json=data)\n except Exception as err:\n log.exception(f\"Unable to issue slack message: {err}\")\n pass\n", "path": "ingestion_server/ingestion_server/slack.py"}]} | 1,084 | 206 |
gh_patches_debug_26399 | rasdani/github-patches | git_diff | quantumlib__Cirq-1503 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix `cirq.control` documentation talking about `__control__` instead of `controlled_by`
</issue>
<code>
[start of cirq/protocols/control.py]
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Any, TYPE_CHECKING, TypeVar, Union, Sequence
16
17 import collections
18
19 from cirq.ops import op_tree
20
21 if TYPE_CHECKING:
22 # pylint: disable=unused-import
23 import cirq
24
25 # This is a special indicator value used by the control method to determine
26 # whether or not the caller provided a 'default' argument.
27 RaiseTypeErrorIfNotProvided = ([],) # type: Any
28
29
30 TDefault = TypeVar('TDefault')
31
32 def control(controllee: Union['cirq.Gate', op_tree.OP_TREE],
33 control_qubits: Sequence['cirq.Qid'] = None,
34 default: Any = RaiseTypeErrorIfNotProvided) -> Any:
35 """Returns a Controlled version of the given value, if defined.
36
37 Controllees define how to be controlled by defining a method
38 __control__(self, control_qubits). Note that the method may return
39 NotImplemented to indicate a particular controlling can't be done.
40
41 Args:
42 controllee: The gate, operation or iterable of operations to control.
43 control_qubits: A list of Qids that would control this controllee.
44 default: Determines the fallback behavior when `controllee` doesn't
45 have a controlling defined. If `default` is not set and the
46 fallback occurs, a TypeError is raised instead.
47
48 Returns:
49 If `controllee` has a __control__ method that returns something besides
50 NotImplemented, that result is returned. For an OP_TREE, transformation
51 is applied at the leaf. Otherwise, if a default value was specified,
52 the default value is returned.
53
54 Raises:
55 TypeError: `controllee` doesn't have a __control__ method (or that
56 method returned NotImplemented) and no `default` was specified.
57 """
58 if control_qubits is None:
59 control_qubits = []
60 controller = getattr(controllee, 'controlled_by', None)
61 result = NotImplemented if controller is None else controller(
62 *control_qubits)
63 if result is not NotImplemented:
64 return result
65
66 if isinstance(controllee, collections.Iterable):
67 return op_tree.transform_op_tree(controllee, op_transformation=
68 lambda op: control(op, control_qubits))
69
70 if default is not RaiseTypeErrorIfNotProvided:
71 return default
72
73 if controller is None:
74 raise TypeError("object of type '{}' has no controlled_by "
75 "method.".format(type(controllee)))
76 raise TypeError("object of type '{}' does have a controlled_by method, "
77 "but it returned NotImplemented.".format(type(controllee)))
78
[end of cirq/protocols/control.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq/protocols/control.py b/cirq/protocols/control.py
--- a/cirq/protocols/control.py
+++ b/cirq/protocols/control.py
@@ -35,7 +35,7 @@
"""Returns a Controlled version of the given value, if defined.
Controllees define how to be controlled by defining a method
- __control__(self, control_qubits). Note that the method may return
+ controlled_by(self, control_qubits). Note that the method may return
NotImplemented to indicate a particular controlling can't be done.
Args:
@@ -46,13 +46,13 @@
fallback occurs, a TypeError is raised instead.
Returns:
- If `controllee` has a __control__ method that returns something besides
- NotImplemented, that result is returned. For an OP_TREE, transformation
- is applied at the leaf. Otherwise, if a default value was specified,
- the default value is returned.
+ If `controllee` has a controlled_by method that returns something
+ besides NotImplemented, that result is returned. For an OP_TREE,
+ transformation is applied at the leaf. Otherwise, if a default value
+ was specified, the default value is returned.
Raises:
- TypeError: `controllee` doesn't have a __control__ method (or that
+ TypeError: `controllee` doesn't have a controlled_by method (or that
method returned NotImplemented) and no `default` was specified.
"""
if control_qubits is None:
| {"golden_diff": "diff --git a/cirq/protocols/control.py b/cirq/protocols/control.py\n--- a/cirq/protocols/control.py\n+++ b/cirq/protocols/control.py\n@@ -35,7 +35,7 @@\n \"\"\"Returns a Controlled version of the given value, if defined.\n \n Controllees define how to be controlled by defining a method\n- __control__(self, control_qubits). Note that the method may return\n+ controlled_by(self, control_qubits). Note that the method may return\n NotImplemented to indicate a particular controlling can't be done.\n \n Args:\n@@ -46,13 +46,13 @@\n fallback occurs, a TypeError is raised instead.\n \n Returns:\n- If `controllee` has a __control__ method that returns something besides\n- NotImplemented, that result is returned. For an OP_TREE, transformation\n- is applied at the leaf. Otherwise, if a default value was specified,\n- the default value is returned.\n+ If `controllee` has a controlled_by method that returns something\n+ besides NotImplemented, that result is returned. For an OP_TREE,\n+ transformation is applied at the leaf. Otherwise, if a default value\n+ was specified, the default value is returned.\n \n Raises:\n- TypeError: `controllee` doesn't have a __control__ method (or that\n+ TypeError: `controllee` doesn't have a controlled_by method (or that\n method returned NotImplemented) and no `default` was specified.\n \"\"\"\n if control_qubits is None:\n", "issue": "Fix `cirq.control` documentation talking about `__control__` instead of `controlled_by`\n\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, TYPE_CHECKING, TypeVar, Union, Sequence\n\nimport collections\n\nfrom cirq.ops import op_tree\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import\n import cirq\n\n# This is a special indicator value used by the control method to determine\n# whether or not the caller provided a 'default' argument.\nRaiseTypeErrorIfNotProvided = ([],) # type: Any\n\n\nTDefault = TypeVar('TDefault')\n\ndef control(controllee: Union['cirq.Gate', op_tree.OP_TREE],\n control_qubits: Sequence['cirq.Qid'] = None,\n default: Any = RaiseTypeErrorIfNotProvided) -> Any:\n \"\"\"Returns a Controlled version of the given value, if defined.\n\n Controllees define how to be controlled by defining a method\n __control__(self, control_qubits). Note that the method may return\n NotImplemented to indicate a particular controlling can't be done.\n\n Args:\n controllee: The gate, operation or iterable of operations to control.\n control_qubits: A list of Qids that would control this controllee.\n default: Determines the fallback behavior when `controllee` doesn't\n have a controlling defined. If `default` is not set and the\n fallback occurs, a TypeError is raised instead.\n\n Returns:\n If `controllee` has a __control__ method that returns something besides\n NotImplemented, that result is returned. For an OP_TREE, transformation\n is applied at the leaf. Otherwise, if a default value was specified,\n the default value is returned.\n\n Raises:\n TypeError: `controllee` doesn't have a __control__ method (or that\n method returned NotImplemented) and no `default` was specified.\n \"\"\"\n if control_qubits is None:\n control_qubits = []\n controller = getattr(controllee, 'controlled_by', None)\n result = NotImplemented if controller is None else controller(\n *control_qubits)\n if result is not NotImplemented:\n return result\n\n if isinstance(controllee, collections.Iterable):\n return op_tree.transform_op_tree(controllee, op_transformation=\n lambda op: control(op, control_qubits))\n\n if default is not RaiseTypeErrorIfNotProvided:\n return default\n\n if controller is None:\n raise TypeError(\"object of type '{}' has no controlled_by \"\n \"method.\".format(type(controllee)))\n raise TypeError(\"object of type '{}' does have a controlled_by method, \"\n \"but it returned NotImplemented.\".format(type(controllee)))\n", "path": "cirq/protocols/control.py"}]} | 1,389 | 340 |
gh_patches_debug_13663 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-493 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New CSV imports use autogenerated column names.
## Description
<!-- A clear and concise description of what the bug is. -->
According to #459, newly imported CSVs are supposed to use the first row as headers by default. However, newly uploaded CSVs are showing autogenerated column names.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
The first row of the CSV should be used as header names by default.
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
Upload a CSV from the Mathesar UI. Column names will be of the form `column_0`, etc.
</issue>
<code>
[start of mathesar/serializers.py]
1 from django.urls import reverse
2 from rest_framework import serializers
3
4 from mathesar.models import Table, Schema, DataFile, Database
5
6
7 class NestedTableSerializer(serializers.HyperlinkedModelSerializer):
8 url = serializers.SerializerMethodField()
9
10 class Meta:
11 model = Table
12 fields = ['id', 'name', 'url']
13
14 def get_url(self, obj):
15 request = self.context['request']
16 return request.build_absolute_uri(reverse('table-detail', kwargs={'pk': obj.pk}))
17
18
19 class ModelNameField(serializers.CharField):
20 """
21 De-serializes the request field as a string, but serializes the response field as
22 `model.name`. Required to support passing and returing a model name from the
23 endpoint, while also storing the model as a related field.
24 """
25 def to_representation(self, value):
26 return value.name
27
28
29 class SchemaSerializer(serializers.HyperlinkedModelSerializer):
30 tables = NestedTableSerializer(many=True, read_only=True)
31 name = serializers.CharField()
32 database = ModelNameField(max_length=128)
33
34 class Meta:
35 model = Schema
36 fields = ['id', 'name', 'tables', 'database', 'has_dependencies']
37
38
39 class SimpleColumnSerializer(serializers.Serializer):
40 name = serializers.CharField()
41 type = serializers.CharField()
42
43
44 class ColumnSerializer(SimpleColumnSerializer):
45 index = serializers.IntegerField(source='column_index', read_only=True)
46 nullable = serializers.BooleanField(default=True)
47 primary_key = serializers.BooleanField(default=False)
48 valid_target_types = serializers.ListField(read_only=True)
49
50
51 class TableSerializer(serializers.ModelSerializer):
52 columns = SimpleColumnSerializer(many=True, read_only=True, source='sa_columns')
53 records = serializers.SerializerMethodField()
54 name = serializers.CharField()
55 data_files = serializers.PrimaryKeyRelatedField(required=False, many=True, queryset=DataFile.objects.all())
56
57 class Meta:
58 model = Table
59 fields = ['id', 'name', 'schema', 'created_at', 'updated_at',
60 'columns', 'records', 'data_files', 'has_dependencies']
61
62 def get_records(self, obj):
63 if isinstance(obj, Table):
64 # Only get records if we are serializing an existing table
65 request = self.context['request']
66 return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))
67 else:
68 return None
69
70
71 class RecordSerializer(serializers.BaseSerializer):
72 def to_representation(self, instance):
73 return instance._asdict()
74
75
76 class RecordListParameterSerializer(serializers.Serializer):
77 filters = serializers.JSONField(required=False, default=[])
78 order_by = serializers.JSONField(required=False, default=[])
79 group_count_by = serializers.JSONField(required=False, default=[])
80
81
82 class DatabaseSerializer(serializers.ModelSerializer):
83 supported_types = serializers.ListField(child=serializers.CharField())
84
85 class Meta:
86 model = Database
87 fields = ['id', 'name', 'deleted', 'supported_types']
88 read_only_fields = ['id', 'name', 'deleted', 'supported_types']
89
90
91 class DataFileSerializer(serializers.ModelSerializer):
92 user = serializers.PrimaryKeyRelatedField(
93 default=serializers.CurrentUserDefault(), read_only=True
94 )
95
96 class Meta:
97 model = DataFile
98 fields = [
99 'id', 'file', 'table_imported_to', 'user', 'header', 'delimiter', 'escapechar', 'quotechar'
100 ]
101 extra_kwargs = {'delimiter': {'trim_whitespace': False},
102 'escapechar': {'trim_whitespace': False},
103 'quotechar': {'trim_whitespace': False}}
104 # We only currently support importing to a new table, so setting a table via API is invalid.
105 # User should be set automatically, not submitted via the API.
106 read_only_fields = ['table_imported_to']
107
108 def save(self, **kwargs):
109 """
110 Set user to current user while saving the data file.
111 """
112 current_user = self.fields['user'].get_default()
113 if current_user.is_authenticated:
114 kwargs['user'] = current_user
115 return super().save(**kwargs)
116
[end of mathesar/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/serializers.py b/mathesar/serializers.py
--- a/mathesar/serializers.py
+++ b/mathesar/serializers.py
@@ -100,7 +100,8 @@
]
extra_kwargs = {'delimiter': {'trim_whitespace': False},
'escapechar': {'trim_whitespace': False},
- 'quotechar': {'trim_whitespace': False}}
+ 'quotechar': {'trim_whitespace': False},
+ 'header': {'required': True}}
# We only currently support importing to a new table, so setting a table via API is invalid.
# User should be set automatically, not submitted via the API.
read_only_fields = ['table_imported_to']
| {"golden_diff": "diff --git a/mathesar/serializers.py b/mathesar/serializers.py\n--- a/mathesar/serializers.py\n+++ b/mathesar/serializers.py\n@@ -100,7 +100,8 @@\n ]\n extra_kwargs = {'delimiter': {'trim_whitespace': False},\n 'escapechar': {'trim_whitespace': False},\n- 'quotechar': {'trim_whitespace': False}}\n+ 'quotechar': {'trim_whitespace': False},\n+ 'header': {'required': True}}\n # We only currently support importing to a new table, so setting a table via API is invalid.\n # User should be set automatically, not submitted via the API.\n read_only_fields = ['table_imported_to']\n", "issue": "New CSV imports use autogenerated column names.\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\nAccording to #459, newly imported CSVs are supposed to use the first row as headers by default. However, newly uploaded CSVs are showing autogenerated column names.\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe first row of the CSV should be used as header names by default.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\nUpload a CSV from the Mathesar UI. Column names will be of the form `column_0`, etc.\n", "before_files": [{"content": "from django.urls import reverse\nfrom rest_framework import serializers\n\nfrom mathesar.models import Table, Schema, DataFile, Database\n\n\nclass NestedTableSerializer(serializers.HyperlinkedModelSerializer):\n url = serializers.SerializerMethodField()\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'url']\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-detail', kwargs={'pk': obj.pk}))\n\n\nclass ModelNameField(serializers.CharField):\n \"\"\"\n De-serializes the request field as a string, but serializes the response field as\n `model.name`. Required to support passing and returing a model name from the\n endpoint, while also storing the model as a related field.\n \"\"\"\n def to_representation(self, value):\n return value.name\n\n\nclass SchemaSerializer(serializers.HyperlinkedModelSerializer):\n tables = NestedTableSerializer(many=True, read_only=True)\n name = serializers.CharField()\n database = ModelNameField(max_length=128)\n\n class Meta:\n model = Schema\n fields = ['id', 'name', 'tables', 'database', 'has_dependencies']\n\n\nclass SimpleColumnSerializer(serializers.Serializer):\n name = serializers.CharField()\n type = serializers.CharField()\n\n\nclass ColumnSerializer(SimpleColumnSerializer):\n index = serializers.IntegerField(source='column_index', read_only=True)\n nullable = serializers.BooleanField(default=True)\n primary_key = serializers.BooleanField(default=False)\n valid_target_types = serializers.ListField(read_only=True)\n\n\nclass TableSerializer(serializers.ModelSerializer):\n columns = SimpleColumnSerializer(many=True, read_only=True, source='sa_columns')\n records = serializers.SerializerMethodField()\n name = serializers.CharField()\n data_files = serializers.PrimaryKeyRelatedField(required=False, many=True, queryset=DataFile.objects.all())\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'schema', 'created_at', 'updated_at',\n 'columns', 'records', 'data_files', 'has_dependencies']\n\n def get_records(self, obj):\n if isinstance(obj, Table):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n\nclass RecordSerializer(serializers.BaseSerializer):\n def to_representation(self, instance):\n return instance._asdict()\n\n\nclass RecordListParameterSerializer(serializers.Serializer):\n filters = serializers.JSONField(required=False, default=[])\n order_by = serializers.JSONField(required=False, default=[])\n group_count_by = serializers.JSONField(required=False, default=[])\n\n\nclass DatabaseSerializer(serializers.ModelSerializer):\n supported_types = serializers.ListField(child=serializers.CharField())\n\n class Meta:\n model = Database\n fields = ['id', 'name', 'deleted', 'supported_types']\n read_only_fields = ['id', 'name', 'deleted', 'supported_types']\n\n\nclass DataFileSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(\n default=serializers.CurrentUserDefault(), read_only=True\n )\n\n class Meta:\n model = DataFile\n fields = [\n 'id', 'file', 'table_imported_to', 'user', 'header', 'delimiter', 'escapechar', 'quotechar'\n ]\n extra_kwargs = {'delimiter': {'trim_whitespace': False},\n 'escapechar': {'trim_whitespace': False},\n 'quotechar': {'trim_whitespace': False}}\n # We only currently support importing to a new table, so setting a table via API is invalid.\n # User should be set automatically, not submitted via the API.\n read_only_fields = ['table_imported_to']\n\n def save(self, **kwargs):\n \"\"\"\n Set user to current user while saving the data file.\n \"\"\"\n current_user = self.fields['user'].get_default()\n if current_user.is_authenticated:\n kwargs['user'] = current_user\n return super().save(**kwargs)\n", "path": "mathesar/serializers.py"}]} | 1,795 | 157 |
gh_patches_debug_4145 | rasdani/github-patches | git_diff | benoitc__gunicorn-1414 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gunicorn.pidfile.validate crashes gunicorn when PID exists but is from a different user
```
Traceback (most recent call last):
File "/opt/python2.7/bin/gunicorn", line 11, in <module>
sys.exit(run())
File "/opt/python2.7/lib/python2.7/site-packages/gunicorn/app/wsgiapp.py", line 74, in run
WSGIApplication("%(prog)s [OPTIONS] [APP_MODULE]").run()
File "/opt/python2.7/lib/python2.7/site-packages/gunicorn/app/base.py", line 189, in run
super(Application, self).run()
File "/opt/python2.7/lib/python2.7/site-packages/gunicorn/app/base.py", line 72, in run
Arbiter(self).run()
File "/opt/python2.7/lib/python2.7/site-packages/gunicorn/arbiter.py", line 171, in run
self.start()
File "/opt/python2.7/lib/python2.7/site-packages/gunicorn/arbiter.py", line 125, in start
self.pidfile.create(self.pid)
File "/opt/python2.7/lib/python2.7/site-packages/gunicorn/pidfile.py", line 23, in create
oldpid = self.validate()
File "/opt/python2.7/lib/python2.7/site-packages/gunicorn/pidfile.py", line 75, in validate
os.kill(wpid, 0)
OSError: [Errno 1] Operation not permitted
```
This happens because the process identified by the pid-file exists, but belongs to a different user than the one starting gunicorn.
(This is with gunicorn 19.3.0)
</issue>
<code>
[start of gunicorn/pidfile.py]
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 import errno
7 import os
8 import tempfile
9
10
11 class Pidfile(object):
12 """\
13 Manage a PID file. If a specific name is provided
14 it and '"%s.oldpid" % name' will be used. Otherwise
15 we create a temp file using os.mkstemp.
16 """
17
18 def __init__(self, fname):
19 self.fname = fname
20 self.pid = None
21
22 def create(self, pid):
23 oldpid = self.validate()
24 if oldpid:
25 if oldpid == os.getpid():
26 return
27 msg = "Already running on PID %s (or pid file '%s' is stale)"
28 raise RuntimeError(msg % (oldpid, self.fname))
29
30 self.pid = pid
31
32 # Write pidfile
33 fdir = os.path.dirname(self.fname)
34 if fdir and not os.path.isdir(fdir):
35 raise RuntimeError("%s doesn't exist. Can't create pidfile." % fdir)
36 fd, fname = tempfile.mkstemp(dir=fdir)
37 os.write(fd, ("%s\n" % self.pid).encode('utf-8'))
38 if self.fname:
39 os.rename(fname, self.fname)
40 else:
41 self.fname = fname
42 os.close(fd)
43
44 # set permissions to -rw-r--r--
45 os.chmod(self.fname, 420)
46
47 def rename(self, path):
48 self.unlink()
49 self.fname = path
50 self.create(self.pid)
51
52 def unlink(self):
53 """ delete pidfile"""
54 try:
55 with open(self.fname, "r") as f:
56 pid1 = int(f.read() or 0)
57
58 if pid1 == self.pid:
59 os.unlink(self.fname)
60 except:
61 pass
62
63 def validate(self):
64 """ Validate pidfile and make it stale if needed"""
65 if not self.fname:
66 return
67 try:
68 with open(self.fname, "r") as f:
69 try:
70 wpid = int(f.read())
71 except ValueError:
72 return
73
74 try:
75 os.kill(wpid, 0)
76 return wpid
77 except OSError as e:
78 if e.args[0] == errno.ESRCH:
79 return
80 raise
81 except IOError as e:
82 if e.args[0] == errno.ENOENT:
83 return
84 raise
85
[end of gunicorn/pidfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gunicorn/pidfile.py b/gunicorn/pidfile.py
--- a/gunicorn/pidfile.py
+++ b/gunicorn/pidfile.py
@@ -75,6 +75,8 @@
os.kill(wpid, 0)
return wpid
except OSError as e:
+ if e.args[0] == errno.EPERM:
+ return wpid
if e.args[0] == errno.ESRCH:
return
raise
| {"golden_diff": "diff --git a/gunicorn/pidfile.py b/gunicorn/pidfile.py\n--- a/gunicorn/pidfile.py\n+++ b/gunicorn/pidfile.py\n@@ -75,6 +75,8 @@\n os.kill(wpid, 0)\n return wpid\n except OSError as e:\n+ if e.args[0] == errno.EPERM:\n+ return wpid\n if e.args[0] == errno.ESRCH:\n return\n raise\n", "issue": "gunicorn.pidfile.validate crashes gunicorn when PID exists but is from a different user\n```\nTraceback (most recent call last):\n File \"/opt/python2.7/bin/gunicorn\", line 11, in <module>\n sys.exit(run())\n File \"/opt/python2.7/lib/python2.7/site-packages/gunicorn/app/wsgiapp.py\", line 74, in run\n WSGIApplication(\"%(prog)s [OPTIONS] [APP_MODULE]\").run()\n File \"/opt/python2.7/lib/python2.7/site-packages/gunicorn/app/base.py\", line 189, in run\n super(Application, self).run()\n File \"/opt/python2.7/lib/python2.7/site-packages/gunicorn/app/base.py\", line 72, in run\n Arbiter(self).run()\n File \"/opt/python2.7/lib/python2.7/site-packages/gunicorn/arbiter.py\", line 171, in run\n self.start()\n File \"/opt/python2.7/lib/python2.7/site-packages/gunicorn/arbiter.py\", line 125, in start\n self.pidfile.create(self.pid)\n File \"/opt/python2.7/lib/python2.7/site-packages/gunicorn/pidfile.py\", line 23, in create\n oldpid = self.validate()\n File \"/opt/python2.7/lib/python2.7/site-packages/gunicorn/pidfile.py\", line 75, in validate\n os.kill(wpid, 0)\nOSError: [Errno 1] Operation not permitted\n```\n\nThis happens because the process identified by the pid-file exists, but belongs to a different user than the one starting gunicorn.\n\n(This is with gunicorn 19.3.0)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport errno\nimport os\nimport tempfile\n\n\nclass Pidfile(object):\n \"\"\"\\\n Manage a PID file. If a specific name is provided\n it and '\"%s.oldpid\" % name' will be used. Otherwise\n we create a temp file using os.mkstemp.\n \"\"\"\n\n def __init__(self, fname):\n self.fname = fname\n self.pid = None\n\n def create(self, pid):\n oldpid = self.validate()\n if oldpid:\n if oldpid == os.getpid():\n return\n msg = \"Already running on PID %s (or pid file '%s' is stale)\"\n raise RuntimeError(msg % (oldpid, self.fname))\n\n self.pid = pid\n\n # Write pidfile\n fdir = os.path.dirname(self.fname)\n if fdir and not os.path.isdir(fdir):\n raise RuntimeError(\"%s doesn't exist. Can't create pidfile.\" % fdir)\n fd, fname = tempfile.mkstemp(dir=fdir)\n os.write(fd, (\"%s\\n\" % self.pid).encode('utf-8'))\n if self.fname:\n os.rename(fname, self.fname)\n else:\n self.fname = fname\n os.close(fd)\n\n # set permissions to -rw-r--r--\n os.chmod(self.fname, 420)\n\n def rename(self, path):\n self.unlink()\n self.fname = path\n self.create(self.pid)\n\n def unlink(self):\n \"\"\" delete pidfile\"\"\"\n try:\n with open(self.fname, \"r\") as f:\n pid1 = int(f.read() or 0)\n\n if pid1 == self.pid:\n os.unlink(self.fname)\n except:\n pass\n\n def validate(self):\n \"\"\" Validate pidfile and make it stale if needed\"\"\"\n if not self.fname:\n return\n try:\n with open(self.fname, \"r\") as f:\n try:\n wpid = int(f.read())\n except ValueError:\n return\n\n try:\n os.kill(wpid, 0)\n return wpid\n except OSError as e:\n if e.args[0] == errno.ESRCH:\n return\n raise\n except IOError as e:\n if e.args[0] == errno.ENOENT:\n return\n raise\n", "path": "gunicorn/pidfile.py"}]} | 1,613 | 109 |
gh_patches_debug_64391 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-5886 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
name=Bankomat should not be added for amenity=atm
It is like `name=ATM`
The same goes for `name=Wpłatomat` (for ATM accepting cash)
https://www.alltheplaces.xyz/map/#16.82/50.072257/20.036549

</issue>
<code>
[start of locations/spiders/santander_pl.py]
1 import chompjs
2 from scrapy import Spider
3
4 from locations.categories import Categories, apply_category, apply_yes_no
5 from locations.dict_parser import DictParser
6 from locations.hours import DAYS, OpeningHours
7 from locations.items import Feature
8
9
10 class SantanderPLSpider(Spider):
11 name = "santander_pl"
12 item_attributes = {"brand": "Santander", "brand_wikidata": "Q806653"}
13 # The "20000000000000" needs to be a valid date time, but it seems it's just there to stop the page being cached by
14 # the CDN. We always get the same data.
15 start_urls = ["https://www.santander.pl/_js_places/time20000000000000/places.js"]
16
17 def parse(self, response, **kwargs):
18 data = chompjs.parse_js_object(response.text)
19 for ref, branch in data["atm"].items():
20 yield self.parse_item(ref, branch, Categories.ATM)
21 for ref, branch in data["branch"].items():
22 yield self.parse_item(ref, branch, Categories.BANK)
23 for ref, branch in data["cashin"].items():
24 item = self.parse_item(ref, branch, Categories.ATM)
25 apply_yes_no("cash_in", item, True)
26 yield item
27
28 @staticmethod
29 def parse_item(ref: str, data: dict, category) -> Feature:
30 data["basicParameters"]["street_address"] = data["basicParameters"].pop("street")
31 item = DictParser.parse(data["basicParameters"])
32 item["ref"] = ref
33
34 if data["open_24h"]:
35 item["opening_hours"] = "24/7"
36 else:
37 item["opening_hours"] = OpeningHours()
38 for day, hours in data["basicParameters"]["opening_hours"].items():
39 start_time, end_time = hours.split("-")
40 item["opening_hours"].add_range(DAYS[int(day) - 2], start_time.strip(), end_time.strip())
41
42 apply_category(category, item)
43
44 return item
45
[end of locations/spiders/santander_pl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/santander_pl.py b/locations/spiders/santander_pl.py
--- a/locations/spiders/santander_pl.py
+++ b/locations/spiders/santander_pl.py
@@ -39,6 +39,9 @@
start_time, end_time = hours.split("-")
item["opening_hours"].add_range(DAYS[int(day) - 2], start_time.strip(), end_time.strip())
+ if category == Categories.ATM:
+ item["name"] = None
+
apply_category(category, item)
return item
| {"golden_diff": "diff --git a/locations/spiders/santander_pl.py b/locations/spiders/santander_pl.py\n--- a/locations/spiders/santander_pl.py\n+++ b/locations/spiders/santander_pl.py\n@@ -39,6 +39,9 @@\n start_time, end_time = hours.split(\"-\")\n item[\"opening_hours\"].add_range(DAYS[int(day) - 2], start_time.strip(), end_time.strip())\n \n+ if category == Categories.ATM:\n+ item[\"name\"] = None\n+\n apply_category(category, item)\n \n return item\n", "issue": "name=Bankomat should not be added for amenity=atm\nIt is like `name=ATM`\r\n\r\nThe same goes for `name=Wp\u0142atomat` (for ATM accepting cash)\r\n\r\nhttps://www.alltheplaces.xyz/map/#16.82/50.072257/20.036549\r\n\r\n\r\n\n", "before_files": [{"content": "import chompjs\nfrom scrapy import Spider\n\nfrom locations.categories import Categories, apply_category, apply_yes_no\nfrom locations.dict_parser import DictParser\nfrom locations.hours import DAYS, OpeningHours\nfrom locations.items import Feature\n\n\nclass SantanderPLSpider(Spider):\n name = \"santander_pl\"\n item_attributes = {\"brand\": \"Santander\", \"brand_wikidata\": \"Q806653\"}\n # The \"20000000000000\" needs to be a valid date time, but it seems it's just there to stop the page being cached by\n # the CDN. We always get the same data.\n start_urls = [\"https://www.santander.pl/_js_places/time20000000000000/places.js\"]\n\n def parse(self, response, **kwargs):\n data = chompjs.parse_js_object(response.text)\n for ref, branch in data[\"atm\"].items():\n yield self.parse_item(ref, branch, Categories.ATM)\n for ref, branch in data[\"branch\"].items():\n yield self.parse_item(ref, branch, Categories.BANK)\n for ref, branch in data[\"cashin\"].items():\n item = self.parse_item(ref, branch, Categories.ATM)\n apply_yes_no(\"cash_in\", item, True)\n yield item\n\n @staticmethod\n def parse_item(ref: str, data: dict, category) -> Feature:\n data[\"basicParameters\"][\"street_address\"] = data[\"basicParameters\"].pop(\"street\")\n item = DictParser.parse(data[\"basicParameters\"])\n item[\"ref\"] = ref\n\n if data[\"open_24h\"]:\n item[\"opening_hours\"] = \"24/7\"\n else:\n item[\"opening_hours\"] = OpeningHours()\n for day, hours in data[\"basicParameters\"][\"opening_hours\"].items():\n start_time, end_time = hours.split(\"-\")\n item[\"opening_hours\"].add_range(DAYS[int(day) - 2], start_time.strip(), end_time.strip())\n\n apply_category(category, item)\n\n return item\n", "path": "locations/spiders/santander_pl.py"}]} | 1,223 | 130 |
gh_patches_debug_24114 | rasdani/github-patches | git_diff | akvo__akvo-rsr-5188 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Program PDF report returns an XLS report
# What were you doing?
Follow the steps in test "Download PDF Program overview".
# What should've happened?
A PDF report should've been received in the email, but an XLS report was delivered.
</issue>
<code>
[start of akvo/rsr/views/py_reports/email_report.py]
1 import logging
2
3 from datetime import timedelta
4 from django.db.models import Q
5 from django.utils.timezone import now
6 from akvo.rsr.models import EmailReportJob
7
8 from . import (
9 program_overview_pdf_report,
10 program_overview_excel_report,
11 program_period_labels_overview_pdf_report,
12 results_indicators_with_map_pdf_reports,
13 nuffic_country_level_map_report,
14 )
15
16 TIMEOUT = timedelta(minutes=30)
17 MAX_ATTEMPTS = 3
18 HANDLER = {
19 program_overview_pdf_report.REPORT_NAME: program_overview_excel_report.handle_email_report,
20 program_overview_excel_report.REPORT_NAME: program_overview_excel_report.handle_email_report,
21 program_period_labels_overview_pdf_report.REPORT_NAME: program_period_labels_overview_pdf_report.handle_email_report,
22 results_indicators_with_map_pdf_reports.ORG_PROJECTS_REPORT_NAME: results_indicators_with_map_pdf_reports.handle_org_projects_email_report,
23 nuffic_country_level_map_report.REPORT_NAME: nuffic_country_level_map_report.handle_email_report,
24 }
25
26 logger = logging.getLogger(__name__)
27
28
29 def run_job():
30 pending_jobs = _get_pending_jobs()
31 if not pending_jobs.exists():
32 return
33 job = pending_jobs.first()
34 job.mark_started()
35 try:
36 handler = HANDLER.get(job.report, None)
37 if handler:
38 handler(job.payload, job.recipient)
39 job.mark_finished()
40 except Exception:
41 logger.exception(f'Failed to genereate report {job.report} for {job.recipient}')
42
43
44 def _get_pending_jobs():
45 started_timeout = now() - TIMEOUT
46 return EmailReportJob.objects\
47 .order_by('created_at')\
48 .filter(finished_at__isnull=True)\
49 .exclude(Q(attempts__gte=MAX_ATTEMPTS) | Q(started_at__gte=started_timeout))
50
[end of akvo/rsr/views/py_reports/email_report.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/views/py_reports/email_report.py b/akvo/rsr/views/py_reports/email_report.py
--- a/akvo/rsr/views/py_reports/email_report.py
+++ b/akvo/rsr/views/py_reports/email_report.py
@@ -16,7 +16,7 @@
TIMEOUT = timedelta(minutes=30)
MAX_ATTEMPTS = 3
HANDLER = {
- program_overview_pdf_report.REPORT_NAME: program_overview_excel_report.handle_email_report,
+ program_overview_pdf_report.REPORT_NAME: program_overview_pdf_report.handle_email_report,
program_overview_excel_report.REPORT_NAME: program_overview_excel_report.handle_email_report,
program_period_labels_overview_pdf_report.REPORT_NAME: program_period_labels_overview_pdf_report.handle_email_report,
results_indicators_with_map_pdf_reports.ORG_PROJECTS_REPORT_NAME: results_indicators_with_map_pdf_reports.handle_org_projects_email_report,
@@ -35,6 +35,7 @@
try:
handler = HANDLER.get(job.report, None)
if handler:
+ logger.info("Handling job %s for report %s with %s", job.id, job.report)
handler(job.payload, job.recipient)
job.mark_finished()
except Exception:
| {"golden_diff": "diff --git a/akvo/rsr/views/py_reports/email_report.py b/akvo/rsr/views/py_reports/email_report.py\n--- a/akvo/rsr/views/py_reports/email_report.py\n+++ b/akvo/rsr/views/py_reports/email_report.py\n@@ -16,7 +16,7 @@\n TIMEOUT = timedelta(minutes=30)\n MAX_ATTEMPTS = 3\n HANDLER = {\n- program_overview_pdf_report.REPORT_NAME: program_overview_excel_report.handle_email_report,\n+ program_overview_pdf_report.REPORT_NAME: program_overview_pdf_report.handle_email_report,\n program_overview_excel_report.REPORT_NAME: program_overview_excel_report.handle_email_report,\n program_period_labels_overview_pdf_report.REPORT_NAME: program_period_labels_overview_pdf_report.handle_email_report,\n results_indicators_with_map_pdf_reports.ORG_PROJECTS_REPORT_NAME: results_indicators_with_map_pdf_reports.handle_org_projects_email_report,\n@@ -35,6 +35,7 @@\n try:\n handler = HANDLER.get(job.report, None)\n if handler:\n+ logger.info(\"Handling job %s for report %s with %s\", job.id, job.report)\n handler(job.payload, job.recipient)\n job.mark_finished()\n except Exception:\n", "issue": "Program PDF report returns an XLS report\n# What were you doing?\n\nFollow the steps in test \"Download PDF Program overview\".\n\n# What should've happened?\n\nA PDF report should've been received in the email, but an XLS report was delivered.\n", "before_files": [{"content": "import logging\n\nfrom datetime import timedelta\nfrom django.db.models import Q\nfrom django.utils.timezone import now\nfrom akvo.rsr.models import EmailReportJob\n\nfrom . import (\n program_overview_pdf_report,\n program_overview_excel_report,\n program_period_labels_overview_pdf_report,\n results_indicators_with_map_pdf_reports,\n nuffic_country_level_map_report,\n)\n\nTIMEOUT = timedelta(minutes=30)\nMAX_ATTEMPTS = 3\nHANDLER = {\n program_overview_pdf_report.REPORT_NAME: program_overview_excel_report.handle_email_report,\n program_overview_excel_report.REPORT_NAME: program_overview_excel_report.handle_email_report,\n program_period_labels_overview_pdf_report.REPORT_NAME: program_period_labels_overview_pdf_report.handle_email_report,\n results_indicators_with_map_pdf_reports.ORG_PROJECTS_REPORT_NAME: results_indicators_with_map_pdf_reports.handle_org_projects_email_report,\n nuffic_country_level_map_report.REPORT_NAME: nuffic_country_level_map_report.handle_email_report,\n}\n\nlogger = logging.getLogger(__name__)\n\n\ndef run_job():\n pending_jobs = _get_pending_jobs()\n if not pending_jobs.exists():\n return\n job = pending_jobs.first()\n job.mark_started()\n try:\n handler = HANDLER.get(job.report, None)\n if handler:\n handler(job.payload, job.recipient)\n job.mark_finished()\n except Exception:\n logger.exception(f'Failed to genereate report {job.report} for {job.recipient}')\n\n\ndef _get_pending_jobs():\n started_timeout = now() - TIMEOUT\n return EmailReportJob.objects\\\n .order_by('created_at')\\\n .filter(finished_at__isnull=True)\\\n .exclude(Q(attempts__gte=MAX_ATTEMPTS) | Q(started_at__gte=started_timeout))\n", "path": "akvo/rsr/views/py_reports/email_report.py"}]} | 1,074 | 270 |
gh_patches_debug_5510 | rasdani/github-patches | git_diff | pyca__cryptography-3215 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HKDF key-length inconsistency
For too small key sizes, `HKDF.derive()` outputs an empty array instead of a small key:
Program:
```python
#!/usr/bin/env python3.5
import cryptography
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from cryptography.hazmat.backends import default_backend
print("cryptography.io:{}".format(cryptography.__version__))
hkdf = HKDF(algorithm=hashes.SHA256(), length=4, salt=b"salt",
info=b"some-test", backend=default_backend())
key = hkdf.derive(b"my secret passphrase")
print("Derived key: {}".format(key))
```
Output:
```
cryptography.io:1.5.2
Derived key: b''
```
Suggested fix:
I am not quite sure why the division by 8 in the snippet below was added. The cumulative size of the output array is always `self._algorithm.digest_size * len(output)` and thus we can stop after `self._algorithm.digest_size * len(output) >= self._length`. At first I thought this might be a clever trick taken from the paper, but I didn't find it there. I guess there was a mixup between bits and bytes at some point.
```python
# class HKDFExpand
def _expand(self, key_material):
output = [b""]
counter = 1
while (self._algorithm.digest_size // 8) * len(output) < self._length:
h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)
h.update(output[-1])
h.update(self._info)
h.update(six.int2byte(counter))
output.append(h.finalize())
counter += 1
return b"".join(output)[:self._length]
```
</issue>
<code>
[start of src/cryptography/hazmat/primitives/kdf/hkdf.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import six
8
9 from cryptography import utils
10 from cryptography.exceptions import (
11 AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons
12 )
13 from cryptography.hazmat.backends.interfaces import HMACBackend
14 from cryptography.hazmat.primitives import constant_time, hmac
15 from cryptography.hazmat.primitives.kdf import KeyDerivationFunction
16
17
18 @utils.register_interface(KeyDerivationFunction)
19 class HKDF(object):
20 def __init__(self, algorithm, length, salt, info, backend):
21 if not isinstance(backend, HMACBackend):
22 raise UnsupportedAlgorithm(
23 "Backend object does not implement HMACBackend.",
24 _Reasons.BACKEND_MISSING_INTERFACE
25 )
26
27 self._algorithm = algorithm
28
29 if not (salt is None or isinstance(salt, bytes)):
30 raise TypeError("salt must be bytes.")
31
32 if salt is None:
33 salt = b"\x00" * (self._algorithm.digest_size // 8)
34
35 self._salt = salt
36
37 self._backend = backend
38
39 self._hkdf_expand = HKDFExpand(self._algorithm, length, info, backend)
40
41 def _extract(self, key_material):
42 h = hmac.HMAC(self._salt, self._algorithm, backend=self._backend)
43 h.update(key_material)
44 return h.finalize()
45
46 def derive(self, key_material):
47 if not isinstance(key_material, bytes):
48 raise TypeError("key_material must be bytes.")
49
50 return self._hkdf_expand.derive(self._extract(key_material))
51
52 def verify(self, key_material, expected_key):
53 if not constant_time.bytes_eq(self.derive(key_material), expected_key):
54 raise InvalidKey
55
56
57 @utils.register_interface(KeyDerivationFunction)
58 class HKDFExpand(object):
59 def __init__(self, algorithm, length, info, backend):
60 if not isinstance(backend, HMACBackend):
61 raise UnsupportedAlgorithm(
62 "Backend object does not implement HMACBackend.",
63 _Reasons.BACKEND_MISSING_INTERFACE
64 )
65
66 self._algorithm = algorithm
67
68 self._backend = backend
69
70 max_length = 255 * (algorithm.digest_size // 8)
71
72 if length > max_length:
73 raise ValueError(
74 "Can not derive keys larger than {0} octets.".format(
75 max_length
76 ))
77
78 self._length = length
79
80 if not (info is None or isinstance(info, bytes)):
81 raise TypeError("info must be bytes.")
82
83 if info is None:
84 info = b""
85
86 self._info = info
87
88 self._used = False
89
90 def _expand(self, key_material):
91 output = [b""]
92 counter = 1
93
94 while (self._algorithm.digest_size // 8) * len(output) < self._length:
95 h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)
96 h.update(output[-1])
97 h.update(self._info)
98 h.update(six.int2byte(counter))
99 output.append(h.finalize())
100 counter += 1
101
102 return b"".join(output)[:self._length]
103
104 def derive(self, key_material):
105 if not isinstance(key_material, bytes):
106 raise TypeError("key_material must be bytes.")
107
108 if self._used:
109 raise AlreadyFinalized
110
111 self._used = True
112 return self._expand(key_material)
113
114 def verify(self, key_material, expected_key):
115 if not constant_time.bytes_eq(self.derive(key_material), expected_key):
116 raise InvalidKey
117
[end of src/cryptography/hazmat/primitives/kdf/hkdf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/primitives/kdf/hkdf.py b/src/cryptography/hazmat/primitives/kdf/hkdf.py
--- a/src/cryptography/hazmat/primitives/kdf/hkdf.py
+++ b/src/cryptography/hazmat/primitives/kdf/hkdf.py
@@ -91,7 +91,7 @@
output = [b""]
counter = 1
- while (self._algorithm.digest_size // 8) * len(output) < self._length:
+ while self._algorithm.digest_size * (len(output) - 1) < self._length:
h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)
h.update(output[-1])
h.update(self._info)
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/kdf/hkdf.py b/src/cryptography/hazmat/primitives/kdf/hkdf.py\n--- a/src/cryptography/hazmat/primitives/kdf/hkdf.py\n+++ b/src/cryptography/hazmat/primitives/kdf/hkdf.py\n@@ -91,7 +91,7 @@\n output = [b\"\"]\n counter = 1\n \n- while (self._algorithm.digest_size // 8) * len(output) < self._length:\n+ while self._algorithm.digest_size * (len(output) - 1) < self._length:\n h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)\n h.update(output[-1])\n h.update(self._info)\n", "issue": "HKDF key-length inconsistency\nFor too small key sizes, `HKDF.derive()` outputs an empty array instead of a small key:\r\n\r\nProgram:\r\n```python\r\n#!/usr/bin/env python3.5\r\nimport cryptography\r\nfrom cryptography.hazmat.primitives import hashes\r\nfrom cryptography.hazmat.primitives.kdf.hkdf import HKDF\r\nfrom cryptography.hazmat.backends import default_backend\r\n\r\nprint(\"cryptography.io:{}\".format(cryptography.__version__))\r\n\r\nhkdf = HKDF(algorithm=hashes.SHA256(), length=4, salt=b\"salt\",\r\n info=b\"some-test\", backend=default_backend())\r\n\r\nkey = hkdf.derive(b\"my secret passphrase\")\r\nprint(\"Derived key: {}\".format(key))\r\n```\r\n\r\nOutput:\r\n```\r\ncryptography.io:1.5.2\r\nDerived key: b''\r\n```\r\n\r\nSuggested fix:\r\n\r\nI am not quite sure why the division by 8 in the snippet below was added. The cumulative size of the output array is always `self._algorithm.digest_size * len(output)` and thus we can stop after `self._algorithm.digest_size * len(output) >= self._length`. At first I thought this might be a clever trick taken from the paper, but I didn't find it there. I guess there was a mixup between bits and bytes at some point.\r\n\r\n```python\r\n# class HKDFExpand\r\ndef _expand(self, key_material):\r\n output = [b\"\"]\r\n counter = 1\r\n\r\n while (self._algorithm.digest_size // 8) * len(output) < self._length:\r\n h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)\r\n h.update(output[-1])\r\n h.update(self._info)\r\n h.update(six.int2byte(counter))\r\n output.append(h.finalize())\r\n counter += 1\r\n\r\n return b\"\".join(output)[:self._length]\r\n```\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import HMACBackend\nfrom cryptography.hazmat.primitives import constant_time, hmac\nfrom cryptography.hazmat.primitives.kdf import KeyDerivationFunction\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDF(object):\n def __init__(self, algorithm, length, salt, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n if not (salt is None or isinstance(salt, bytes)):\n raise TypeError(\"salt must be bytes.\")\n\n if salt is None:\n salt = b\"\\x00\" * (self._algorithm.digest_size // 8)\n\n self._salt = salt\n\n self._backend = backend\n\n self._hkdf_expand = HKDFExpand(self._algorithm, length, info, backend)\n\n def _extract(self, key_material):\n h = hmac.HMAC(self._salt, self._algorithm, backend=self._backend)\n h.update(key_material)\n return h.finalize()\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n return self._hkdf_expand.derive(self._extract(key_material))\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDFExpand(object):\n def __init__(self, algorithm, length, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n self._backend = backend\n\n max_length = 255 * (algorithm.digest_size // 8)\n\n if length > max_length:\n raise ValueError(\n \"Can not derive keys larger than {0} octets.\".format(\n max_length\n ))\n\n self._length = length\n\n if not (info is None or isinstance(info, bytes)):\n raise TypeError(\"info must be bytes.\")\n\n if info is None:\n info = b\"\"\n\n self._info = info\n\n self._used = False\n\n def _expand(self, key_material):\n output = [b\"\"]\n counter = 1\n\n while (self._algorithm.digest_size // 8) * len(output) < self._length:\n h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)\n h.update(output[-1])\n h.update(self._info)\n h.update(six.int2byte(counter))\n output.append(h.finalize())\n counter += 1\n\n return b\"\".join(output)[:self._length]\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n if self._used:\n raise AlreadyFinalized\n\n self._used = True\n return self._expand(key_material)\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n", "path": "src/cryptography/hazmat/primitives/kdf/hkdf.py"}]} | 1,999 | 170 |
gh_patches_debug_14774 | rasdani/github-patches | git_diff | speechbrain__speechbrain-124 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Replicability Issue
Every time I run the same experiment (e.,g minimal_examples/neural_networks/autoencoder) on the same machine I got slightly different results. Since we set up the seed, this shouldn't happen.
</issue>
<code>
[start of recipes/TIMIT/ASR_CTC/experiment.py]
1 #!/usr/bin/env python3
2 import os
3 import sys
4 import torch
5 import speechbrain as sb
6 import speechbrain.data_io.wer as wer_io
7 import speechbrain.utils.edit_distance as edit_distance
8 from speechbrain.data_io.data_io import convert_index_to_lab
9 from speechbrain.decoders.ctc import ctc_greedy_decode
10 from speechbrain.decoders.decoders import undo_padding
11 from speechbrain.utils.checkpoints import ckpt_recency
12 from speechbrain.utils.train_logger import summarize_error_rate
13
14 # This hack needed to import data preparation script from ..
15 current_dir = os.path.dirname(os.path.abspath(__file__))
16 sys.path.append(os.path.dirname(current_dir))
17 from timit_prepare import TIMITPreparer # noqa E402
18
19 # Load hyperparameters file with command-line overrides
20 params_file, overrides = sb.core.parse_arguments(sys.argv[1:])
21 if "seed" in overrides:
22 torch.manual_seed(overrides["seed"])
23 with open(params_file) as fin:
24 params = sb.yaml.load_extended_yaml(fin, overrides)
25
26 # Create experiment directory
27 sb.core.create_experiment_directory(
28 experiment_directory=params.output_folder,
29 params_to_save=params_file,
30 overrides=overrides,
31 )
32
33
34 # Define training procedure
35 class ASR(sb.core.Brain):
36 def compute_forward(self, x, train_mode=True, init_params=False):
37 ids, wavs, wav_lens = x
38 wavs, wav_lens = wavs.to(params.device), wav_lens.to(params.device)
39 if hasattr(params, "augmentation"):
40 wavs = params.augmentation(wavs, wav_lens, init_params)
41 feats = params.compute_features(wavs, init_params)
42 feats = params.normalize(feats, wav_lens)
43 out = params.model(feats, init_params)
44 out = params.output(out, init_params)
45 pout = params.log_softmax(out)
46 return pout, wav_lens
47
48 def compute_objectives(self, predictions, targets, train_mode=True):
49 pout, pout_lens = predictions
50 ids, phns, phn_lens = targets
51 phns, phn_lens = phns.to(params.device), phn_lens.to(params.device)
52 loss = params.compute_cost(pout, phns, [pout_lens, phn_lens])
53
54 if not train_mode:
55 ind2lab = params.train_loader.label_dict["phn"]["index2lab"]
56 sequence = ctc_greedy_decode(pout, pout_lens, blank_id=-1)
57 sequence = convert_index_to_lab(sequence, ind2lab)
58 phns = undo_padding(phns, phn_lens)
59 phns = convert_index_to_lab(phns, ind2lab)
60 stats = edit_distance.wer_details_for_batch(
61 ids, phns, sequence, compute_alignments=True
62 )
63 stats = {"PER": stats}
64 return loss, stats
65
66 return loss
67
68 def on_epoch_end(self, epoch, train_stats, valid_stats=None):
69 per = summarize_error_rate(valid_stats["PER"])
70 old_lr, new_lr = params.lr_annealing([params.optimizer], epoch, per)
71 epoch_stats = {"epoch": epoch, "lr": old_lr}
72 params.train_logger.log_stats(epoch_stats, train_stats, valid_stats)
73
74 params.checkpointer.save_and_keep_only(
75 meta={"PER": per},
76 importance_keys=[ckpt_recency, lambda c: -c.meta["PER"]],
77 )
78
79
80 # Prepare data
81 prepare = TIMITPreparer(
82 data_folder=params.data_folder,
83 splits=["train", "dev", "test"],
84 save_folder=params.data_folder,
85 )
86 prepare()
87 train_set = params.train_loader()
88 valid_set = params.valid_loader()
89 first_x, first_y = next(zip(*train_set))
90
91 # Modules are passed to optimizer and have train/eval called on them
92 modules = [params.model, params.output]
93 if hasattr(params, "augmentation"):
94 modules.append(params.augmentation)
95
96 # Create brain object for training
97 asr_brain = ASR(
98 modules=modules, optimizer=params.optimizer, first_inputs=[first_x],
99 )
100
101 # Load latest checkpoint to resume training
102 params.checkpointer.recover_if_possible()
103 asr_brain.fit(params.epoch_counter, train_set, valid_set)
104
105 # Load best checkpoint for evaluation
106 params.checkpointer.recover_if_possible(lambda c: -c.meta["PER"])
107 test_stats = asr_brain.evaluate(params.test_loader())
108 params.train_logger.log_stats(
109 stats_meta={"Epoch loaded": params.epoch_counter.current},
110 test_stats=test_stats,
111 )
112
113 # Write alignments to file
114 per_summary = edit_distance.wer_summary(test_stats["PER"])
115 with open(params.wer_file, "w") as fo:
116 wer_io.print_wer_summary(per_summary, fo)
117 wer_io.print_alignments(test_stats["PER"], fo)
118
[end of recipes/TIMIT/ASR_CTC/experiment.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/TIMIT/ASR_CTC/experiment.py b/recipes/TIMIT/ASR_CTC/experiment.py
--- a/recipes/TIMIT/ASR_CTC/experiment.py
+++ b/recipes/TIMIT/ASR_CTC/experiment.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python3
import os
import sys
-import torch
import speechbrain as sb
import speechbrain.data_io.wer as wer_io
import speechbrain.utils.edit_distance as edit_distance
@@ -18,8 +17,6 @@
# Load hyperparameters file with command-line overrides
params_file, overrides = sb.core.parse_arguments(sys.argv[1:])
-if "seed" in overrides:
- torch.manual_seed(overrides["seed"])
with open(params_file) as fin:
params = sb.yaml.load_extended_yaml(fin, overrides)
| {"golden_diff": "diff --git a/recipes/TIMIT/ASR_CTC/experiment.py b/recipes/TIMIT/ASR_CTC/experiment.py\n--- a/recipes/TIMIT/ASR_CTC/experiment.py\n+++ b/recipes/TIMIT/ASR_CTC/experiment.py\n@@ -1,7 +1,6 @@\n #!/usr/bin/env python3\n import os\n import sys\n-import torch\n import speechbrain as sb\n import speechbrain.data_io.wer as wer_io\n import speechbrain.utils.edit_distance as edit_distance\n@@ -18,8 +17,6 @@\n \n # Load hyperparameters file with command-line overrides\n params_file, overrides = sb.core.parse_arguments(sys.argv[1:])\n-if \"seed\" in overrides:\n- torch.manual_seed(overrides[\"seed\"])\n with open(params_file) as fin:\n params = sb.yaml.load_extended_yaml(fin, overrides)\n", "issue": "Replicability Issue\nEvery time I run the same experiment (e.,g minimal_examples/neural_networks/autoencoder) on the same machine I got slightly different results. Since we set up the seed, this shouldn't happen. \n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nimport sys\nimport torch\nimport speechbrain as sb\nimport speechbrain.data_io.wer as wer_io\nimport speechbrain.utils.edit_distance as edit_distance\nfrom speechbrain.data_io.data_io import convert_index_to_lab\nfrom speechbrain.decoders.ctc import ctc_greedy_decode\nfrom speechbrain.decoders.decoders import undo_padding\nfrom speechbrain.utils.checkpoints import ckpt_recency\nfrom speechbrain.utils.train_logger import summarize_error_rate\n\n# This hack needed to import data preparation script from ..\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.dirname(current_dir))\nfrom timit_prepare import TIMITPreparer # noqa E402\n\n# Load hyperparameters file with command-line overrides\nparams_file, overrides = sb.core.parse_arguments(sys.argv[1:])\nif \"seed\" in overrides:\n torch.manual_seed(overrides[\"seed\"])\nwith open(params_file) as fin:\n params = sb.yaml.load_extended_yaml(fin, overrides)\n\n# Create experiment directory\nsb.core.create_experiment_directory(\n experiment_directory=params.output_folder,\n params_to_save=params_file,\n overrides=overrides,\n)\n\n\n# Define training procedure\nclass ASR(sb.core.Brain):\n def compute_forward(self, x, train_mode=True, init_params=False):\n ids, wavs, wav_lens = x\n wavs, wav_lens = wavs.to(params.device), wav_lens.to(params.device)\n if hasattr(params, \"augmentation\"):\n wavs = params.augmentation(wavs, wav_lens, init_params)\n feats = params.compute_features(wavs, init_params)\n feats = params.normalize(feats, wav_lens)\n out = params.model(feats, init_params)\n out = params.output(out, init_params)\n pout = params.log_softmax(out)\n return pout, wav_lens\n\n def compute_objectives(self, predictions, targets, train_mode=True):\n pout, pout_lens = predictions\n ids, phns, phn_lens = targets\n phns, phn_lens = phns.to(params.device), phn_lens.to(params.device)\n loss = params.compute_cost(pout, phns, [pout_lens, phn_lens])\n\n if not train_mode:\n ind2lab = params.train_loader.label_dict[\"phn\"][\"index2lab\"]\n sequence = ctc_greedy_decode(pout, pout_lens, blank_id=-1)\n sequence = convert_index_to_lab(sequence, ind2lab)\n phns = undo_padding(phns, phn_lens)\n phns = convert_index_to_lab(phns, ind2lab)\n stats = edit_distance.wer_details_for_batch(\n ids, phns, sequence, compute_alignments=True\n )\n stats = {\"PER\": stats}\n return loss, stats\n\n return loss\n\n def on_epoch_end(self, epoch, train_stats, valid_stats=None):\n per = summarize_error_rate(valid_stats[\"PER\"])\n old_lr, new_lr = params.lr_annealing([params.optimizer], epoch, per)\n epoch_stats = {\"epoch\": epoch, \"lr\": old_lr}\n params.train_logger.log_stats(epoch_stats, train_stats, valid_stats)\n\n params.checkpointer.save_and_keep_only(\n meta={\"PER\": per},\n importance_keys=[ckpt_recency, lambda c: -c.meta[\"PER\"]],\n )\n\n\n# Prepare data\nprepare = TIMITPreparer(\n data_folder=params.data_folder,\n splits=[\"train\", \"dev\", \"test\"],\n save_folder=params.data_folder,\n)\nprepare()\ntrain_set = params.train_loader()\nvalid_set = params.valid_loader()\nfirst_x, first_y = next(zip(*train_set))\n\n# Modules are passed to optimizer and have train/eval called on them\nmodules = [params.model, params.output]\nif hasattr(params, \"augmentation\"):\n modules.append(params.augmentation)\n\n# Create brain object for training\nasr_brain = ASR(\n modules=modules, optimizer=params.optimizer, first_inputs=[first_x],\n)\n\n# Load latest checkpoint to resume training\nparams.checkpointer.recover_if_possible()\nasr_brain.fit(params.epoch_counter, train_set, valid_set)\n\n# Load best checkpoint for evaluation\nparams.checkpointer.recover_if_possible(lambda c: -c.meta[\"PER\"])\ntest_stats = asr_brain.evaluate(params.test_loader())\nparams.train_logger.log_stats(\n stats_meta={\"Epoch loaded\": params.epoch_counter.current},\n test_stats=test_stats,\n)\n\n# Write alignments to file\nper_summary = edit_distance.wer_summary(test_stats[\"PER\"])\nwith open(params.wer_file, \"w\") as fo:\n wer_io.print_wer_summary(per_summary, fo)\n wer_io.print_alignments(test_stats[\"PER\"], fo)\n", "path": "recipes/TIMIT/ASR_CTC/experiment.py"}]} | 1,859 | 189 |
gh_patches_debug_32284 | rasdani/github-patches | git_diff | getnikola__nikola-1011 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
reST listings can’t number lines, while code-blocks do it incompatibly
1. One can’t do a `.. listing::` in Nikola with line numbers.
2. In `.. code-block::`s, one must use the reST `:number-lines:` option syntax. However, Sphinx wants `:linenos:` instead. (#770)
</issue>
<code>
[start of nikola/plugins/compile/rest/listing.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27
28 """ Define and register a listing directive using the existing CodeBlock """
29
30
31 from __future__ import unicode_literals
32 from codecs import open as codecs_open # for patching purposes
33 import os
34 try:
35 from urlparse import urlunsplit
36 except ImportError:
37 from urllib.parse import urlunsplit # NOQA
38
39 from docutils import core
40 from docutils import nodes
41 from docutils.parsers.rst import Directive, directives
42 from docutils.parsers.rst.directives.misc import Include
43 try:
44 from docutils.parsers.rst.directives.body import CodeBlock
45 except ImportError: # docutils < 0.9 (Debian Sid For The Loss)
46 class CodeBlock(Directive):
47 required_arguments = 1
48 has_content = True
49 CODE = '<pre>{0}</pre>'
50
51 def run(self):
52 """ Required by the Directive interface. Create docutils nodes """
53 return [nodes.raw('', self.CODE.format('\n'.join(self.content)), format='html')]
54 directives.register_directive('code', CodeBlock)
55
56
57 from nikola.plugin_categories import RestExtension
58
59
60 class Plugin(RestExtension):
61
62 name = "rest_listing"
63
64 def set_site(self, site):
65 self.site = site
66 # Even though listings don't use CodeBlock anymore, I am
67 # leaving these to make the code directive work with
68 # docutils < 0.9
69 directives.register_directive('code-block', CodeBlock)
70 directives.register_directive('sourcecode', CodeBlock)
71 directives.register_directive('listing', Listing)
72 return super(Plugin, self).set_site(site)
73
74
75 class Listing(Include):
76 """ listing directive: create a highlighted block of code from a file in listings/
77
78 Usage:
79
80 .. listing:: nikola.py python
81 :number-lines:
82
83 """
84 has_content = False
85 required_arguments = 1
86 optional_arguments = 1
87
88 def run(self):
89 fname = self.arguments.pop(0)
90 lang = self.arguments.pop(0)
91 fpath = os.path.join('listings', fname)
92 self.arguments.insert(0, fpath)
93 self.options['code'] = lang
94 with codecs_open(fpath, 'rb+', 'utf8') as fileobject:
95 self.content = fileobject.read().splitlines()
96 self.state.document.settings.record_dependencies.add(fpath)
97 target = urlunsplit(("link", 'listing', fname, '', ''))
98 generated_nodes = (
99 [core.publish_doctree('`{0} <{1}>`_'.format(fname, target))[0]])
100 generated_nodes += self.get_code_from_file(fileobject)
101 return generated_nodes
102
103 def get_code_from_file(self, data):
104 """ Create CodeBlock nodes from file object content """
105 return super(Listing, self).run()
106
107 def assert_has_content(self):
108 """ Listing has no content, override check from superclass """
109 pass
110
[end of nikola/plugins/compile/rest/listing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nikola/plugins/compile/rest/listing.py b/nikola/plugins/compile/rest/listing.py
--- a/nikola/plugins/compile/rest/listing.py
+++ b/nikola/plugins/compile/rest/listing.py
@@ -56,6 +56,18 @@
from nikola.plugin_categories import RestExtension
+# Add sphinx compatibility option
+CodeBlock.option_spec['linenos'] = directives.unchanged
+
+
+class FlexibleCodeBlock(CodeBlock):
+
+ def run(self):
+ if 'linenos' in self.options:
+ self.options['number-lines'] = self.options['linenos']
+ return super(FlexibleCodeBlock, self).run()
+CodeBlock = FlexibleCodeBlock
+
class Plugin(RestExtension):
@@ -71,6 +83,11 @@
directives.register_directive('listing', Listing)
return super(Plugin, self).set_site(site)
+# Add sphinx compatibility option
+listing_spec = Include.option_spec
+listing_spec['linenos'] = directives.unchanged
+print(listing_spec)
+
class Listing(Include):
""" listing directive: create a highlighted block of code from a file in listings/
@@ -84,6 +101,7 @@
has_content = False
required_arguments = 1
optional_arguments = 1
+ option_spec = listing_spec
def run(self):
fname = self.arguments.pop(0)
@@ -91,6 +109,8 @@
fpath = os.path.join('listings', fname)
self.arguments.insert(0, fpath)
self.options['code'] = lang
+ if 'linenos' in self.options:
+ self.options['number-lines'] = self.options['linenos']
with codecs_open(fpath, 'rb+', 'utf8') as fileobject:
self.content = fileobject.read().splitlines()
self.state.document.settings.record_dependencies.add(fpath)
| {"golden_diff": "diff --git a/nikola/plugins/compile/rest/listing.py b/nikola/plugins/compile/rest/listing.py\n--- a/nikola/plugins/compile/rest/listing.py\n+++ b/nikola/plugins/compile/rest/listing.py\n@@ -56,6 +56,18 @@\n \n from nikola.plugin_categories import RestExtension\n \n+# Add sphinx compatibility option\n+CodeBlock.option_spec['linenos'] = directives.unchanged\n+\n+\n+class FlexibleCodeBlock(CodeBlock):\n+\n+ def run(self):\n+ if 'linenos' in self.options:\n+ self.options['number-lines'] = self.options['linenos']\n+ return super(FlexibleCodeBlock, self).run()\n+CodeBlock = FlexibleCodeBlock\n+\n \n class Plugin(RestExtension):\n \n@@ -71,6 +83,11 @@\n directives.register_directive('listing', Listing)\n return super(Plugin, self).set_site(site)\n \n+# Add sphinx compatibility option\n+listing_spec = Include.option_spec\n+listing_spec['linenos'] = directives.unchanged\n+print(listing_spec)\n+\n \n class Listing(Include):\n \"\"\" listing directive: create a highlighted block of code from a file in listings/\n@@ -84,6 +101,7 @@\n has_content = False\n required_arguments = 1\n optional_arguments = 1\n+ option_spec = listing_spec\n \n def run(self):\n fname = self.arguments.pop(0)\n@@ -91,6 +109,8 @@\n fpath = os.path.join('listings', fname)\n self.arguments.insert(0, fpath)\n self.options['code'] = lang\n+ if 'linenos' in self.options:\n+ self.options['number-lines'] = self.options['linenos']\n with codecs_open(fpath, 'rb+', 'utf8') as fileobject:\n self.content = fileobject.read().splitlines()\n self.state.document.settings.record_dependencies.add(fpath)\n", "issue": "reST listings can\u2019t number lines, while code-blocks do it incompatibly\n1. One can\u2019t do a `.. listing::` in Nikola with line numbers.\n2. In `.. code-block::`s, one must use the reST `:number-lines:` option syntax. However, Sphinx wants `:linenos:` instead. (#770)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\n\"\"\" Define and register a listing directive using the existing CodeBlock \"\"\"\n\n\nfrom __future__ import unicode_literals\nfrom codecs import open as codecs_open # for patching purposes\nimport os\ntry:\n from urlparse import urlunsplit\nexcept ImportError:\n from urllib.parse import urlunsplit # NOQA\n\nfrom docutils import core\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive, directives\nfrom docutils.parsers.rst.directives.misc import Include\ntry:\n from docutils.parsers.rst.directives.body import CodeBlock\nexcept ImportError: # docutils < 0.9 (Debian Sid For The Loss)\n class CodeBlock(Directive):\n required_arguments = 1\n has_content = True\n CODE = '<pre>{0}</pre>'\n\n def run(self):\n \"\"\" Required by the Directive interface. Create docutils nodes \"\"\"\n return [nodes.raw('', self.CODE.format('\\n'.join(self.content)), format='html')]\n directives.register_directive('code', CodeBlock)\n\n\nfrom nikola.plugin_categories import RestExtension\n\n\nclass Plugin(RestExtension):\n\n name = \"rest_listing\"\n\n def set_site(self, site):\n self.site = site\n # Even though listings don't use CodeBlock anymore, I am\n # leaving these to make the code directive work with\n # docutils < 0.9\n directives.register_directive('code-block', CodeBlock)\n directives.register_directive('sourcecode', CodeBlock)\n directives.register_directive('listing', Listing)\n return super(Plugin, self).set_site(site)\n\n\nclass Listing(Include):\n \"\"\" listing directive: create a highlighted block of code from a file in listings/\n\n Usage:\n\n .. listing:: nikola.py python\n :number-lines:\n\n \"\"\"\n has_content = False\n required_arguments = 1\n optional_arguments = 1\n\n def run(self):\n fname = self.arguments.pop(0)\n lang = self.arguments.pop(0)\n fpath = os.path.join('listings', fname)\n self.arguments.insert(0, fpath)\n self.options['code'] = lang\n with codecs_open(fpath, 'rb+', 'utf8') as fileobject:\n self.content = fileobject.read().splitlines()\n self.state.document.settings.record_dependencies.add(fpath)\n target = urlunsplit((\"link\", 'listing', fname, '', ''))\n generated_nodes = (\n [core.publish_doctree('`{0} <{1}>`_'.format(fname, target))[0]])\n generated_nodes += self.get_code_from_file(fileobject)\n return generated_nodes\n\n def get_code_from_file(self, data):\n \"\"\" Create CodeBlock nodes from file object content \"\"\"\n return super(Listing, self).run()\n\n def assert_has_content(self):\n \"\"\" Listing has no content, override check from superclass \"\"\"\n pass\n", "path": "nikola/plugins/compile/rest/listing.py"}]} | 1,729 | 431 |
gh_patches_debug_4104 | rasdani/github-patches | git_diff | google__TensorNetwork-250 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python version requirement
Our current setup file is still happy with python>=3.5, whereas we stopped testing for it and it seems that Jax also discontinued support. Should we not be stricter in the setup especially if we move to Jax as default?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # Copyright 2019 The TensorNetwork Developers
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # https://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from setuptools import find_packages, setup
17
18 # This reads the __version__ variable from tensornetwork/version.py
19 with open('tensornetwork/version.py') as f:
20 exec(f.read(), globals())
21
22 description = ('A high level tensor network API for tensorflow.')
23
24 # Reading long Description from README.md file.
25 with open("README.md", "r") as fh:
26 long_description = fh.read()
27
28 # Read in requirements
29 requirements = [
30 requirement.strip() for requirement in open('requirements.txt').readlines()
31 ]
32
33 setup(
34 name='tensornetwork',
35 version=__version__,
36 url='http://github.com/google/TensorNetwork',
37 author='The TensorNetwork Developers',
38 author_email='[email protected]',
39 python_requires=('>=3.5.0'),
40 install_requires=requirements,
41 license='Apache 2.0',
42 description=description,
43 long_description=long_description,
44 long_description_content_type="text/markdown",
45 packages=find_packages(),
46 )
47
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,7 @@
url='http://github.com/google/TensorNetwork',
author='The TensorNetwork Developers',
author_email='[email protected]',
- python_requires=('>=3.5.0'),
+ python_requires=('>=3.6.0'),
install_requires=requirements,
license='Apache 2.0',
description=description,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -36,7 +36,7 @@\n url='http://github.com/google/TensorNetwork',\n author='The TensorNetwork Developers',\n author_email='[email protected]',\n- python_requires=('>=3.5.0'),\n+ python_requires=('>=3.6.0'),\n install_requires=requirements,\n license='Apache 2.0',\n description=description,\n", "issue": "Python version requirement\nOur current setup file is still happy with python>=3.5, whereas we stopped testing for it and it seems that Jax also discontinued support. Should we not be stricter in the setup especially if we move to Jax as default?\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright 2019 The TensorNetwork Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\n# This reads the __version__ variable from tensornetwork/version.py\nwith open('tensornetwork/version.py') as f:\n exec(f.read(), globals())\n\ndescription = ('A high level tensor network API for tensorflow.')\n\n# Reading long Description from README.md file.\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n# Read in requirements\nrequirements = [\n requirement.strip() for requirement in open('requirements.txt').readlines()\n]\n\nsetup(\n name='tensornetwork',\n version=__version__,\n url='http://github.com/google/TensorNetwork',\n author='The TensorNetwork Developers',\n author_email='[email protected]',\n python_requires=('>=3.5.0'),\n install_requires=requirements,\n license='Apache 2.0',\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(),\n)\n", "path": "setup.py"}]} | 1,019 | 108 |
gh_patches_debug_1639 | rasdani/github-patches | git_diff | joke2k__faker-318 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Access to the Generator.random
It would be nice if one could gain access to the Generator.random variable so that one could save/set the state. I realize I can pass in the seed, but one currently has no way of gathering what the seed/state is if using the automatically generated seed. I don't want to use a fixed seed, but I do want to log/print the seed used _if_ the tests fail.
That is, I'd like to be able to do something like: `faker.generator.getstate()` (which gets the random state w/o exposing random) or `faker.generator.random.getstate()` (which gives access to the random variable)
For now, the workaround appears to be to create a Faker object with your own Generator.
</issue>
<code>
[start of faker/generator.py]
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 import re
6 import random
7
8
9 _re_token = re.compile(r'\{\{(\s?)(\w+)(\s?)\}\}')
10 random = random.Random()
11
12
13 class Generator(object):
14
15 __config = {}
16
17 def __init__(self, **config):
18 self.providers = []
19 self.__config = dict(
20 list(self.__config.items()) + list(config.items()))
21
22 def add_provider(self, provider):
23
24 if type(provider) is type:
25 provider = provider(self)
26
27 self.providers.insert(0, provider)
28
29 for method_name in dir(provider):
30 # skip 'private' method
31 if method_name.startswith('_'):
32 continue
33
34 faker_function = getattr(provider, method_name)
35
36 if hasattr(faker_function, '__call__') or \
37 isinstance(faker_function, (classmethod, staticmethod)):
38 # add all faker method to generator
39 self.set_formatter(method_name, faker_function)
40
41 def provider(self, name):
42 try:
43 lst = [p for p in self.get_providers()
44 if p.__provider__ == name.lower()]
45 return lst[0]
46 except IndexError:
47 return None
48
49 def get_providers(self):
50 """Returns added providers."""
51 return self.providers
52
53 def seed(self, seed=None):
54 """Calls random.seed"""
55 random.seed(seed)
56
57 def format(self, formatter, *args, **kwargs):
58 """
59 This is a secure way to make a fake from another Provider.
60 """
61 # TODO: data export?
62 return self.get_formatter(formatter)(*args, **kwargs)
63
64 def get_formatter(self, formatter):
65 try:
66 return getattr(self, formatter)
67 except AttributeError:
68 raise AttributeError('Unknown formatter "{0}"'.format(formatter))
69
70 def set_formatter(self, name, method):
71 """
72 This method adds a provider method to generator.
73 Override this method to add some decoration or logging stuff.
74 """
75 setattr(self, name, method)
76
77 def parse(self, text):
78 """
79 Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')
80 with the result from the token method call.
81 """
82 return _re_token.sub(self.__format_token, text)
83
84 def __format_token(self, matches):
85 formatter = list(matches.groups())
86 formatter[1] = self.format(formatter[1])
87 return ''.join(formatter)
88
[end of faker/generator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/generator.py b/faker/generator.py
--- a/faker/generator.py
+++ b/faker/generator.py
@@ -50,6 +50,10 @@
"""Returns added providers."""
return self.providers
+ @property
+ def random(self):
+ return random
+
def seed(self, seed=None):
"""Calls random.seed"""
random.seed(seed)
| {"golden_diff": "diff --git a/faker/generator.py b/faker/generator.py\n--- a/faker/generator.py\n+++ b/faker/generator.py\n@@ -50,6 +50,10 @@\n \"\"\"Returns added providers.\"\"\"\n return self.providers\n \n+ @property\n+ def random(self):\n+ return random\n+\n def seed(self, seed=None):\n \"\"\"Calls random.seed\"\"\"\n random.seed(seed)\n", "issue": "Access to the Generator.random\nIt would be nice if one could gain access to the Generator.random variable so that one could save/set the state. I realize I can pass in the seed, but one currently has no way of gathering what the seed/state is if using the automatically generated seed. I don't want to use a fixed seed, but I do want to log/print the seed used _if_ the tests fail.\n\nThat is, I'd like to be able to do something like: `faker.generator.getstate()` (which gets the random state w/o exposing random) or `faker.generator.random.getstate()` (which gives access to the random variable)\n\nFor now, the workaround appears to be to create a Faker object with your own Generator.\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport re\nimport random\n\n\n_re_token = re.compile(r'\\{\\{(\\s?)(\\w+)(\\s?)\\}\\}')\nrandom = random.Random()\n\n\nclass Generator(object):\n\n __config = {}\n\n def __init__(self, **config):\n self.providers = []\n self.__config = dict(\n list(self.__config.items()) + list(config.items()))\n\n def add_provider(self, provider):\n\n if type(provider) is type:\n provider = provider(self)\n\n self.providers.insert(0, provider)\n\n for method_name in dir(provider):\n # skip 'private' method\n if method_name.startswith('_'):\n continue\n\n faker_function = getattr(provider, method_name)\n\n if hasattr(faker_function, '__call__') or \\\n isinstance(faker_function, (classmethod, staticmethod)):\n # add all faker method to generator\n self.set_formatter(method_name, faker_function)\n\n def provider(self, name):\n try:\n lst = [p for p in self.get_providers()\n if p.__provider__ == name.lower()]\n return lst[0]\n except IndexError:\n return None\n\n def get_providers(self):\n \"\"\"Returns added providers.\"\"\"\n return self.providers\n\n def seed(self, seed=None):\n \"\"\"Calls random.seed\"\"\"\n random.seed(seed)\n\n def format(self, formatter, *args, **kwargs):\n \"\"\"\n This is a secure way to make a fake from another Provider.\n \"\"\"\n # TODO: data export?\n return self.get_formatter(formatter)(*args, **kwargs)\n\n def get_formatter(self, formatter):\n try:\n return getattr(self, formatter)\n except AttributeError:\n raise AttributeError('Unknown formatter \"{0}\"'.format(formatter))\n\n def set_formatter(self, name, method):\n \"\"\"\n This method adds a provider method to generator.\n Override this method to add some decoration or logging stuff.\n \"\"\"\n setattr(self, name, method)\n\n def parse(self, text):\n \"\"\"\n Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')\n with the result from the token method call.\n \"\"\"\n return _re_token.sub(self.__format_token, text)\n\n def __format_token(self, matches):\n formatter = list(matches.groups())\n formatter[1] = self.format(formatter[1])\n return ''.join(formatter)\n", "path": "faker/generator.py"}]} | 1,378 | 93 |
gh_patches_debug_12588 | rasdani/github-patches | git_diff | goauthentik__authentik-9255 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The SCIM JSON for User and Group schema attribute allows only one value
The SCIM JSON for User and Group schema attribute allows only one value. The schemas attribute check should only check for a minimum number of entries not for a maximum.
For example I would like to be able so support the following
"schemas": [
"urn:ietf:params:scim:schemas:extension:servicenow:2.0:User",
"urn:ietf:params:scim:schemas:core:2.0:User",
"urn:ietf:params:scim:schemas:extension:enterprise:2.0:User"
]
To Reproduce
Steps to reproduce the behavior:
Create Property mapping, add
return {
"schemas" : [
"urn:ietf:params:scim:schemas:core:2.0:User",
"urn:ietf:params:scim:schemas:extention:enterprise:2.0:User"
],
"userName": request.user.username,
"name": {
"formatted": request.user.name,
"givenName": givenName,
"familyName": familyName,
},
Expected behavior
JSON containing schemas attribute
Instead I get
Stopping sync due to error: Error 1 validation error for User schemas Tuple should have at most 1 item after validation, not 2 [type=too_long, input_value=['urn:ietf:params:scim:sc...on:enterprise:2.0:User'], input_type=list] For further information visit https://errors.pydantic.dev/2.4/v/too_long
Version and Deployment (please complete the following information):
authentik version: [2023.10.2]
Deployment: [docker-compose]
</issue>
<code>
[start of authentik/providers/scim/clients/schema.py]
1 """Custom SCIM schemas"""
2
3 from pydanticscim.group import Group as BaseGroup
4 from pydanticscim.responses import PatchRequest as BasePatchRequest
5 from pydanticscim.responses import SCIMError as BaseSCIMError
6 from pydanticscim.service_provider import Bulk, ChangePassword, Filter, Patch, Sort
7 from pydanticscim.service_provider import (
8 ServiceProviderConfiguration as BaseServiceProviderConfiguration,
9 )
10 from pydanticscim.user import User as BaseUser
11
12
13 class User(BaseUser):
14 """Modified User schema with added externalId field"""
15
16 schemas: tuple[str] = ("urn:ietf:params:scim:schemas:core:2.0:User",)
17 externalId: str | None = None
18
19
20 class Group(BaseGroup):
21 """Modified Group schema with added externalId field"""
22
23 schemas: tuple[str] = ("urn:ietf:params:scim:schemas:core:2.0:Group",)
24 externalId: str | None = None
25
26
27 class ServiceProviderConfiguration(BaseServiceProviderConfiguration):
28 """ServiceProviderConfig with fallback"""
29
30 _is_fallback: bool | None = False
31
32 @property
33 def is_fallback(self) -> bool:
34 """Check if this service provider config was retrieved from the API endpoint
35 or a fallback was used"""
36 return self._is_fallback
37
38 @staticmethod
39 def default() -> "ServiceProviderConfiguration":
40 """Get default configuration, which doesn't support any optional features as fallback"""
41 return ServiceProviderConfiguration(
42 patch=Patch(supported=False),
43 bulk=Bulk(supported=False),
44 filter=Filter(supported=False),
45 changePassword=ChangePassword(supported=False),
46 sort=Sort(supported=False),
47 authenticationSchemes=[],
48 _is_fallback=True,
49 )
50
51
52 class PatchRequest(BasePatchRequest):
53 """PatchRequest which correctly sets schemas"""
54
55 schemas: tuple[str] = ("urn:ietf:params:scim:api:messages:2.0:PatchOp",)
56
57
58 class SCIMError(BaseSCIMError):
59 """SCIM error with optional status code"""
60
61 status: int | None
62
[end of authentik/providers/scim/clients/schema.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/authentik/providers/scim/clients/schema.py b/authentik/providers/scim/clients/schema.py
--- a/authentik/providers/scim/clients/schema.py
+++ b/authentik/providers/scim/clients/schema.py
@@ -13,14 +13,18 @@
class User(BaseUser):
"""Modified User schema with added externalId field"""
- schemas: tuple[str] = ("urn:ietf:params:scim:schemas:core:2.0:User",)
+ schemas: list[str] = [
+ "urn:ietf:params:scim:schemas:core:2.0:User",
+ ]
externalId: str | None = None
class Group(BaseGroup):
"""Modified Group schema with added externalId field"""
- schemas: tuple[str] = ("urn:ietf:params:scim:schemas:core:2.0:Group",)
+ schemas: list[str] = [
+ "urn:ietf:params:scim:schemas:core:2.0:Group",
+ ]
externalId: str | None = None
| {"golden_diff": "diff --git a/authentik/providers/scim/clients/schema.py b/authentik/providers/scim/clients/schema.py\n--- a/authentik/providers/scim/clients/schema.py\n+++ b/authentik/providers/scim/clients/schema.py\n@@ -13,14 +13,18 @@\n class User(BaseUser):\n \"\"\"Modified User schema with added externalId field\"\"\"\n \n- schemas: tuple[str] = (\"urn:ietf:params:scim:schemas:core:2.0:User\",)\n+ schemas: list[str] = [\n+ \"urn:ietf:params:scim:schemas:core:2.0:User\",\n+ ]\n externalId: str | None = None\n \n \n class Group(BaseGroup):\n \"\"\"Modified Group schema with added externalId field\"\"\"\n \n- schemas: tuple[str] = (\"urn:ietf:params:scim:schemas:core:2.0:Group\",)\n+ schemas: list[str] = [\n+ \"urn:ietf:params:scim:schemas:core:2.0:Group\",\n+ ]\n externalId: str | None = None\n", "issue": "The SCIM JSON for User and Group schema attribute allows only one value\nThe SCIM JSON for User and Group schema attribute allows only one value. The schemas attribute check should only check for a minimum number of entries not for a maximum. \r\n\r\nFor example I would like to be able so support the following\r\n\r\n \"schemas\": [\r\n \"urn:ietf:params:scim:schemas:extension:servicenow:2.0:User\",\r\n \"urn:ietf:params:scim:schemas:core:2.0:User\",\r\n \"urn:ietf:params:scim:schemas:extension:enterprise:2.0:User\"\r\n ]\r\n\r\nTo Reproduce\r\nSteps to reproduce the behavior:\r\nCreate Property mapping, add\r\n\r\nreturn {\r\n \"schemas\" : [\r\n \"urn:ietf:params:scim:schemas:core:2.0:User\",\r\n \"urn:ietf:params:scim:schemas:extention:enterprise:2.0:User\"\r\n ],\r\n \"userName\": request.user.username,\r\n \"name\": {\r\n \"formatted\": request.user.name,\r\n \"givenName\": givenName,\r\n \"familyName\": familyName,\r\n },\r\n\r\nExpected behavior\r\nJSON containing schemas attribute \r\n\r\nInstead I get \r\nStopping sync due to error: Error 1 validation error for User schemas Tuple should have at most 1 item after validation, not 2 [type=too_long, input_value=['urn:ietf:params:scim:sc...on:enterprise:2.0:User'], input_type=list] For further information visit https://errors.pydantic.dev/2.4/v/too_long\r\n\r\nVersion and Deployment (please complete the following information):\r\n\r\nauthentik version: [2023.10.2]\r\nDeployment: [docker-compose]\n", "before_files": [{"content": "\"\"\"Custom SCIM schemas\"\"\"\n\nfrom pydanticscim.group import Group as BaseGroup\nfrom pydanticscim.responses import PatchRequest as BasePatchRequest\nfrom pydanticscim.responses import SCIMError as BaseSCIMError\nfrom pydanticscim.service_provider import Bulk, ChangePassword, Filter, Patch, Sort\nfrom pydanticscim.service_provider import (\n ServiceProviderConfiguration as BaseServiceProviderConfiguration,\n)\nfrom pydanticscim.user import User as BaseUser\n\n\nclass User(BaseUser):\n \"\"\"Modified User schema with added externalId field\"\"\"\n\n schemas: tuple[str] = (\"urn:ietf:params:scim:schemas:core:2.0:User\",)\n externalId: str | None = None\n\n\nclass Group(BaseGroup):\n \"\"\"Modified Group schema with added externalId field\"\"\"\n\n schemas: tuple[str] = (\"urn:ietf:params:scim:schemas:core:2.0:Group\",)\n externalId: str | None = None\n\n\nclass ServiceProviderConfiguration(BaseServiceProviderConfiguration):\n \"\"\"ServiceProviderConfig with fallback\"\"\"\n\n _is_fallback: bool | None = False\n\n @property\n def is_fallback(self) -> bool:\n \"\"\"Check if this service provider config was retrieved from the API endpoint\n or a fallback was used\"\"\"\n return self._is_fallback\n\n @staticmethod\n def default() -> \"ServiceProviderConfiguration\":\n \"\"\"Get default configuration, which doesn't support any optional features as fallback\"\"\"\n return ServiceProviderConfiguration(\n patch=Patch(supported=False),\n bulk=Bulk(supported=False),\n filter=Filter(supported=False),\n changePassword=ChangePassword(supported=False),\n sort=Sort(supported=False),\n authenticationSchemes=[],\n _is_fallback=True,\n )\n\n\nclass PatchRequest(BasePatchRequest):\n \"\"\"PatchRequest which correctly sets schemas\"\"\"\n\n schemas: tuple[str] = (\"urn:ietf:params:scim:api:messages:2.0:PatchOp\",)\n\n\nclass SCIMError(BaseSCIMError):\n \"\"\"SCIM error with optional status code\"\"\"\n\n status: int | None\n", "path": "authentik/providers/scim/clients/schema.py"}]} | 1,489 | 244 |
gh_patches_debug_6569 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-bolts-230 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CPCv2-Resnet18 pretrained not available
Hi,
When I try to load a pretrained CPCv2-resnet18 model, I get an `urllib.error.HTTPError: HTTP Error 403: Forbidden` error. The code I use to load the pretrained model is
```
model = CPCV2(encoder='resnet18', pretrained=True)
x = torch.rand(1, 3, 224, 224)
feats = model.encoder(x)
```
The following URL doesn't seem to open on my browser (Chrome) too.
https://github.com/PyTorchLightning/pytorch-lightning-bolts/blob/32fb560a429532dfb40a5935ca7674990dae1f66/pl_bolts/utils/pretrained_weights.py#L6
Any help is much appreciated.
Thanks!
</issue>
<code>
[start of pl_bolts/utils/pretrained_weights.py]
1
2
3 vae_imagenet2012 = 'https://pl-bolts-weights.s3.us-east-2.amazonaws.com/' \
4 'vae/imagenet_06_22_2019/checkpoints/epoch%3D63.ckpt'
5
6 cpcv2_resnet18 = 'https://pl-bolts-weights.s3.us-east-2.amazonaws.com/' \
7 'cpc/resnet18_version_6/checkpoints/epoch%3D85.ckpt'
8 urls = {
9 'vae-imagenet2012': vae_imagenet2012,
10 'CPCV2-resnet18': cpcv2_resnet18
11 }
12
13
14 def load_pretrained(model, class_name=None): # pragma: no-cover
15 if class_name is None:
16 class_name = model.__class__.__name__
17 ckpt_url = urls[class_name]
18 weights_model = model.__class__.load_from_checkpoint(ckpt_url)
19 model.load_state_dict(weights_model.state_dict())
20
[end of pl_bolts/utils/pretrained_weights.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pl_bolts/utils/pretrained_weights.py b/pl_bolts/utils/pretrained_weights.py
--- a/pl_bolts/utils/pretrained_weights.py
+++ b/pl_bolts/utils/pretrained_weights.py
@@ -4,7 +4,7 @@
'vae/imagenet_06_22_2019/checkpoints/epoch%3D63.ckpt'
cpcv2_resnet18 = 'https://pl-bolts-weights.s3.us-east-2.amazonaws.com/' \
- 'cpc/resnet18_version_6/checkpoints/epoch%3D85.ckpt'
+ 'cpc/resnet18-v6/epoch%3D85.ckpt'
urls = {
'vae-imagenet2012': vae_imagenet2012,
'CPCV2-resnet18': cpcv2_resnet18
| {"golden_diff": "diff --git a/pl_bolts/utils/pretrained_weights.py b/pl_bolts/utils/pretrained_weights.py\n--- a/pl_bolts/utils/pretrained_weights.py\n+++ b/pl_bolts/utils/pretrained_weights.py\n@@ -4,7 +4,7 @@\n 'vae/imagenet_06_22_2019/checkpoints/epoch%3D63.ckpt'\n \n cpcv2_resnet18 = 'https://pl-bolts-weights.s3.us-east-2.amazonaws.com/' \\\n- 'cpc/resnet18_version_6/checkpoints/epoch%3D85.ckpt'\n+ 'cpc/resnet18-v6/epoch%3D85.ckpt'\n urls = {\n 'vae-imagenet2012': vae_imagenet2012,\n 'CPCV2-resnet18': cpcv2_resnet18\n", "issue": "CPCv2-Resnet18 pretrained not available\nHi, \r\nWhen I try to load a pretrained CPCv2-resnet18 model, I get an `urllib.error.HTTPError: HTTP Error 403: Forbidden` error. The code I use to load the pretrained model is \r\n```\r\nmodel = CPCV2(encoder='resnet18', pretrained=True)\r\nx = torch.rand(1, 3, 224, 224)\r\nfeats = model.encoder(x)\r\n```\r\nThe following URL doesn't seem to open on my browser (Chrome) too. \r\nhttps://github.com/PyTorchLightning/pytorch-lightning-bolts/blob/32fb560a429532dfb40a5935ca7674990dae1f66/pl_bolts/utils/pretrained_weights.py#L6\r\n\r\nAny help is much appreciated.\r\nThanks! \r\n\n", "before_files": [{"content": "\n\nvae_imagenet2012 = 'https://pl-bolts-weights.s3.us-east-2.amazonaws.com/' \\\n 'vae/imagenet_06_22_2019/checkpoints/epoch%3D63.ckpt'\n\ncpcv2_resnet18 = 'https://pl-bolts-weights.s3.us-east-2.amazonaws.com/' \\\n 'cpc/resnet18_version_6/checkpoints/epoch%3D85.ckpt'\nurls = {\n 'vae-imagenet2012': vae_imagenet2012,\n 'CPCV2-resnet18': cpcv2_resnet18\n}\n\n\ndef load_pretrained(model, class_name=None): # pragma: no-cover\n if class_name is None:\n class_name = model.__class__.__name__\n ckpt_url = urls[class_name]\n weights_model = model.__class__.load_from_checkpoint(ckpt_url)\n model.load_state_dict(weights_model.state_dict())\n", "path": "pl_bolts/utils/pretrained_weights.py"}]} | 1,007 | 206 |
gh_patches_debug_20599 | rasdani/github-patches | git_diff | wagtail__wagtail-1650 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error when serving images through the URL generator
I posted a comment on https://github.com/torchbox/wagtail/issues/983 but probably better to open a new issue. Looks like the same problem to me though.
Hi guys, I think I'm having the same problem but when serving images using the URL generator. It does work if I'm logged-in in the site (cache not working) but doesn't when I'm not (cache full on).
Cheers,
Jordi
Internal Server Error: /images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/
Traceback (most recent call last):
File "/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/django/core/handlers/base.py", line 204, in get_response
response = middleware_method(request, response)
File "/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/django/middleware/cache.py", line 121, in process_response
self.cache.set(cache_key, response, timeout)
File "/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/redis_cache/cache.py", line 239, in set
result = self._set(key, pickle.dumps(value), timeout, client, _add_only)
File "/var/www/buildability/venvs/buildability.co.nz/lib/python2.7/copy_reg.py", line 70, in _reduce_ex
raise TypeError, "can't pickle %s objects" % base.__name__
TypeError: can't pickle instancemethod objects
Request repr():
<WSGIRequest
path:/images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/,
GET:<QueryDict: {}>,
POST:<QueryDict: {}>,
COOKIES:{'_ga': 'GA1.3.1219121887.1434427204',
'csrftoken': 'GNhfTEGBu40y8wRAFPa15lQTV66F9WCs'},
META:{'CONTENT_LENGTH': '',
'CONTENT_TYPE': '',
u'CSRF_COOKIE': u'GNhfTEGBu40y8wRAFPa15lQTV66F9WCs',
'DOCUMENT_ROOT': '/usr/share/nginx/html',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,_/_;q=0.8',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate, sdch',
'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8',
'HTTP_CACHE_CONTROL': 'max-age=0',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_COOKIE': '_ga=GA1.3.1219121887.1434427204; csrftoken=GNhfTEGBu40y8wRAFPa15lQTV66F9WCs',
'HTTP_HOST': 'www.buildability.co.nz',
'HTTP_UPGRADE_INSECURE_REQUESTS': '1',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36',
'PATH_INFO': u'/images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/',
'QUERY_STRING': '',
'REMOTE_ADDR': '131.203.137.142',
'REMOTE_PORT': '51455',
'REQUEST_METHOD': 'GET',
'REQUEST_URI': '/images/2dMQIUOPwS5DlZuprp_E_WFdfhw%3D/47/width-75/',
u'SCRIPT_NAME': u'',
'SERVER_NAME': 'www.buildability.co.nz',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'UWSGI_SCHEME': 'http',
'uwsgi.core': 7,
'uwsgi.node': 'avinton',
'uwsgi.version': '1.9.17.1-debian',
'wsgi.errors': <open file 'wsgi_errors', mode 'w' at 0x7f0548a548a0>,
'wsgi.file_wrapper': <built-in function uwsgi_sendfile>,
'wsgi.input': <uwsgi._Input object at 0x7f0548a20a08>,
'wsgi.multiprocess': True,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0)}>
</issue>
<code>
[start of wagtail/wagtailimages/views/frontend.py]
1 from wsgiref.util import FileWrapper
2 import imghdr
3
4 from django.shortcuts import get_object_or_404
5 from django.http import HttpResponse
6 from django.core.exceptions import PermissionDenied
7
8 from wagtail.wagtailimages.models import get_image_model
9 from wagtail.wagtailimages.utils import verify_signature
10 from wagtail.wagtailimages.exceptions import InvalidFilterSpecError
11
12
13 def serve(request, signature, image_id, filter_spec):
14 image = get_object_or_404(get_image_model(), id=image_id)
15
16 if not verify_signature(signature.encode(), image_id, filter_spec):
17 raise PermissionDenied
18
19 try:
20 rendition = image.get_rendition(filter_spec)
21 rendition.file.open('rb')
22 image_format = imghdr.what(rendition.file)
23 return HttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)
24 except InvalidFilterSpecError:
25 return HttpResponse("Invalid filter spec: " + filter_spec, content_type='text/plain', status=400)
26
[end of wagtail/wagtailimages/views/frontend.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/wagtailimages/views/frontend.py b/wagtail/wagtailimages/views/frontend.py
--- a/wagtail/wagtailimages/views/frontend.py
+++ b/wagtail/wagtailimages/views/frontend.py
@@ -2,7 +2,7 @@
import imghdr
from django.shortcuts import get_object_or_404
-from django.http import HttpResponse
+from django.http import HttpResponse, StreamingHttpResponse
from django.core.exceptions import PermissionDenied
from wagtail.wagtailimages.models import get_image_model
@@ -20,6 +20,6 @@
rendition = image.get_rendition(filter_spec)
rendition.file.open('rb')
image_format = imghdr.what(rendition.file)
- return HttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)
+ return StreamingHttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)
except InvalidFilterSpecError:
return HttpResponse("Invalid filter spec: " + filter_spec, content_type='text/plain', status=400)
| {"golden_diff": "diff --git a/wagtail/wagtailimages/views/frontend.py b/wagtail/wagtailimages/views/frontend.py\n--- a/wagtail/wagtailimages/views/frontend.py\n+++ b/wagtail/wagtailimages/views/frontend.py\n@@ -2,7 +2,7 @@\n import imghdr\n \n from django.shortcuts import get_object_or_404\n-from django.http import HttpResponse\n+from django.http import HttpResponse, StreamingHttpResponse\n from django.core.exceptions import PermissionDenied\n \n from wagtail.wagtailimages.models import get_image_model\n@@ -20,6 +20,6 @@\n rendition = image.get_rendition(filter_spec)\n rendition.file.open('rb')\n image_format = imghdr.what(rendition.file)\n- return HttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)\n+ return StreamingHttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)\n except InvalidFilterSpecError:\n return HttpResponse(\"Invalid filter spec: \" + filter_spec, content_type='text/plain', status=400)\n", "issue": "Error when serving images through the URL generator\nI posted a comment on https://github.com/torchbox/wagtail/issues/983 but probably better to open a new issue. Looks like the same problem to me though.\n\nHi guys, I think I'm having the same problem but when serving images using the URL generator. It does work if I'm logged-in in the site (cache not working) but doesn't when I'm not (cache full on).\n\nCheers,\nJordi\n\nInternal Server Error: /images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/\nTraceback (most recent call last):\n File \"/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/django/core/handlers/base.py\", line 204, in get_response\n response = middleware_method(request, response)\n File \"/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/django/middleware/cache.py\", line 121, in process_response\n self.cache.set(cache_key, response, timeout)\n File \"/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/redis_cache/cache.py\", line 239, in set\n result = self._set(key, pickle.dumps(value), timeout, client, _add_only)\n File \"/var/www/buildability/venvs/buildability.co.nz/lib/python2.7/copy_reg.py\", line 70, in _reduce_ex\n raise TypeError, \"can't pickle %s objects\" % base.__name__\nTypeError: can't pickle instancemethod objects\n\nRequest repr(): \n<WSGIRequest\npath:/images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/,\nGET:<QueryDict: {}>,\nPOST:<QueryDict: {}>,\nCOOKIES:{'_ga': 'GA1.3.1219121887.1434427204',\n 'csrftoken': 'GNhfTEGBu40y8wRAFPa15lQTV66F9WCs'},\nMETA:{'CONTENT_LENGTH': '',\n 'CONTENT_TYPE': '',\n u'CSRF_COOKIE': u'GNhfTEGBu40y8wRAFPa15lQTV66F9WCs',\n 'DOCUMENT_ROOT': '/usr/share/nginx/html',\n 'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,_/_;q=0.8',\n 'HTTP_ACCEPT_ENCODING': 'gzip, deflate, sdch',\n 'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8',\n 'HTTP_CACHE_CONTROL': 'max-age=0',\n 'HTTP_CONNECTION': 'keep-alive',\n 'HTTP_COOKIE': '_ga=GA1.3.1219121887.1434427204; csrftoken=GNhfTEGBu40y8wRAFPa15lQTV66F9WCs',\n 'HTTP_HOST': 'www.buildability.co.nz',\n 'HTTP_UPGRADE_INSECURE_REQUESTS': '1',\n 'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36',\n 'PATH_INFO': u'/images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/',\n 'QUERY_STRING': '',\n 'REMOTE_ADDR': '131.203.137.142',\n 'REMOTE_PORT': '51455',\n 'REQUEST_METHOD': 'GET',\n 'REQUEST_URI': '/images/2dMQIUOPwS5DlZuprp_E_WFdfhw%3D/47/width-75/',\n u'SCRIPT_NAME': u'',\n 'SERVER_NAME': 'www.buildability.co.nz',\n 'SERVER_PORT': '80',\n 'SERVER_PROTOCOL': 'HTTP/1.1',\n 'UWSGI_SCHEME': 'http',\n 'uwsgi.core': 7,\n 'uwsgi.node': 'avinton',\n 'uwsgi.version': '1.9.17.1-debian',\n 'wsgi.errors': <open file 'wsgi_errors', mode 'w' at 0x7f0548a548a0>,\n 'wsgi.file_wrapper': <built-in function uwsgi_sendfile>,\n 'wsgi.input': <uwsgi._Input object at 0x7f0548a20a08>,\n 'wsgi.multiprocess': True,\n 'wsgi.multithread': True,\n 'wsgi.run_once': False,\n 'wsgi.url_scheme': 'http',\n 'wsgi.version': (1, 0)}>\n\n", "before_files": [{"content": "from wsgiref.util import FileWrapper\nimport imghdr\n\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.exceptions import PermissionDenied\n\nfrom wagtail.wagtailimages.models import get_image_model\nfrom wagtail.wagtailimages.utils import verify_signature\nfrom wagtail.wagtailimages.exceptions import InvalidFilterSpecError\n\n\ndef serve(request, signature, image_id, filter_spec):\n image = get_object_or_404(get_image_model(), id=image_id)\n\n if not verify_signature(signature.encode(), image_id, filter_spec):\n raise PermissionDenied\n\n try:\n rendition = image.get_rendition(filter_spec)\n rendition.file.open('rb')\n image_format = imghdr.what(rendition.file)\n return HttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)\n except InvalidFilterSpecError:\n return HttpResponse(\"Invalid filter spec: \" + filter_spec, content_type='text/plain', status=400)\n", "path": "wagtail/wagtailimages/views/frontend.py"}]} | 1,897 | 233 |
gh_patches_debug_13497 | rasdani/github-patches | git_diff | ray-project__ray-8493 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
import error
ray 0.8.5,when I try to use ray, it occurs Ray must be imported before pickle5 because Ray requires a specific version of pickle5 (which is packaged along with Ray.
I want to know it must import pickle5 before import ray, Right?
</issue>
<code>
[start of python/ray/__init__.py]
1 import os
2 import logging
3 from os.path import dirname
4 import sys
5
6 logger = logging.getLogger(__name__)
7
8 # MUST add pickle5 to the import path because it will be imported by some
9 # raylet modules.
10
11 if "pickle5" in sys.modules:
12 raise ImportError("Ray must be imported before pickle5 because Ray "
13 "requires a specific version of pickle5 (which is "
14 "packaged along with Ray).")
15
16 if "OMP_NUM_THREADS" not in os.environ:
17 logger.debug("[ray] Forcing OMP_NUM_THREADS=1 to avoid performance "
18 "degradation with many workers (issue #6998). You can "
19 "override this by explicitly setting OMP_NUM_THREADS.")
20 os.environ["OMP_NUM_THREADS"] = "1"
21
22 # Add the directory containing pickle5 to the Python path so that we find the
23 # pickle5 version packaged with ray and not a pre-existing pickle5.
24 pickle5_path = os.path.join(
25 os.path.abspath(os.path.dirname(__file__)), "pickle5_files")
26 sys.path.insert(0, pickle5_path)
27
28 # Importing psutil & setproctitle. Must be before ray._raylet is initialized.
29 thirdparty_files = os.path.join(
30 os.path.abspath(os.path.dirname(__file__)), "thirdparty_files")
31 sys.path.insert(0, thirdparty_files)
32
33 if sys.platform == "win32":
34 import ray.compat # noqa: E402
35 ray.compat.patch_redis_empty_recv()
36
37 # Expose ray ABI symbols which may be dependent by other shared
38 # libraries such as _streaming.so. See BUILD.bazel:_raylet
39 python_shared_lib_suffix = ".so" if sys.platform != "win32" else ".pyd"
40 so_path = os.path.join(dirname(__file__), "_raylet" + python_shared_lib_suffix)
41 if os.path.exists(so_path):
42 import ctypes
43 from ctypes import CDLL
44 CDLL(so_path, ctypes.RTLD_GLOBAL)
45
46 import ray._raylet # noqa: E402
47
48 from ray._raylet import (
49 ActorCheckpointID,
50 ActorClassID,
51 ActorID,
52 ClientID,
53 Config as _Config,
54 JobID,
55 WorkerID,
56 FunctionID,
57 ObjectID,
58 TaskID,
59 UniqueID,
60 Language,
61 ) # noqa: E402
62
63 _config = _Config()
64
65 from ray.profiling import profile # noqa: E402
66 from ray.state import (jobs, nodes, actors, objects, timeline,
67 object_transfer_timeline, cluster_resources,
68 available_resources, errors) # noqa: E402
69 from ray.worker import (
70 LOCAL_MODE,
71 SCRIPT_MODE,
72 WORKER_MODE,
73 cancel,
74 connect,
75 disconnect,
76 get,
77 get_gpu_ids,
78 get_resource_ids,
79 get_webui_url,
80 init,
81 is_initialized,
82 put,
83 kill,
84 register_custom_serializer,
85 remote,
86 shutdown,
87 show_in_webui,
88 wait,
89 ) # noqa: E402
90 import ray.internal # noqa: E402
91 import ray.projects # noqa: E402
92 # We import ray.actor because some code is run in actor.py which initializes
93 # some functions in the worker.
94 import ray.actor # noqa: F401
95 from ray.actor import method # noqa: E402
96 from ray.cross_language import java_function, java_actor_class # noqa: E402
97 from ray import util # noqa: E402
98
99 # Replaced with the current commit when building the wheels.
100 __commit__ = "{{RAY_COMMIT_SHA}}"
101 __version__ = "0.9.0.dev0"
102
103 __all__ = [
104 "jobs",
105 "nodes",
106 "actors",
107 "objects",
108 "timeline",
109 "object_transfer_timeline",
110 "cluster_resources",
111 "available_resources",
112 "errors",
113 "LOCAL_MODE",
114 "PYTHON_MODE",
115 "SCRIPT_MODE",
116 "WORKER_MODE",
117 "__version__",
118 "_config",
119 "_get_runtime_context",
120 "actor",
121 "cancel",
122 "connect",
123 "disconnect",
124 "get",
125 "get_gpu_ids",
126 "get_resource_ids",
127 "get_webui_url",
128 "init",
129 "internal",
130 "is_initialized",
131 "method",
132 "profile",
133 "projects",
134 "put",
135 "kill",
136 "register_custom_serializer",
137 "remote",
138 "shutdown",
139 "show_in_webui",
140 "wait",
141 "Language",
142 "java_function",
143 "java_actor_class",
144 "util",
145 ]
146
147 # ID types
148 __all__ += [
149 "ActorCheckpointID",
150 "ActorClassID",
151 "ActorID",
152 "ClientID",
153 "JobID",
154 "WorkerID",
155 "FunctionID",
156 "ObjectID",
157 "TaskID",
158 "UniqueID",
159 ]
160
[end of python/ray/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/ray/__init__.py b/python/ray/__init__.py
--- a/python/ray/__init__.py
+++ b/python/ray/__init__.py
@@ -9,9 +9,13 @@
# raylet modules.
if "pickle5" in sys.modules:
- raise ImportError("Ray must be imported before pickle5 because Ray "
- "requires a specific version of pickle5 (which is "
- "packaged along with Ray).")
+ import pkg_resources
+ version_info = pkg_resources.require("pickle5")
+ version = tuple(int(n) for n in version_info[0].version.split("."))
+ if version < (0, 0, 10):
+ raise ImportError("You are using an old version of pickle5 that "
+ "leaks memory, please run 'pip install pickle5 -U' "
+ "to upgrade")
if "OMP_NUM_THREADS" not in os.environ:
logger.debug("[ray] Forcing OMP_NUM_THREADS=1 to avoid performance "
| {"golden_diff": "diff --git a/python/ray/__init__.py b/python/ray/__init__.py\n--- a/python/ray/__init__.py\n+++ b/python/ray/__init__.py\n@@ -9,9 +9,13 @@\n # raylet modules.\n \n if \"pickle5\" in sys.modules:\n- raise ImportError(\"Ray must be imported before pickle5 because Ray \"\n- \"requires a specific version of pickle5 (which is \"\n- \"packaged along with Ray).\")\n+ import pkg_resources\n+ version_info = pkg_resources.require(\"pickle5\")\n+ version = tuple(int(n) for n in version_info[0].version.split(\".\"))\n+ if version < (0, 0, 10):\n+ raise ImportError(\"You are using an old version of pickle5 that \"\n+ \"leaks memory, please run 'pip install pickle5 -U' \"\n+ \"to upgrade\")\n \n if \"OMP_NUM_THREADS\" not in os.environ:\n logger.debug(\"[ray] Forcing OMP_NUM_THREADS=1 to avoid performance \"\n", "issue": "import error\nray 0.8.5,when I try to use ray\uff0c it occurs Ray must be imported before pickle5 because Ray requires a specific version of pickle5 (which is packaged along with Ray.\r\n\r\nI want to know it must import pickle5 before import ray, Right?\n", "before_files": [{"content": "import os\nimport logging\nfrom os.path import dirname\nimport sys\n\nlogger = logging.getLogger(__name__)\n\n# MUST add pickle5 to the import path because it will be imported by some\n# raylet modules.\n\nif \"pickle5\" in sys.modules:\n raise ImportError(\"Ray must be imported before pickle5 because Ray \"\n \"requires a specific version of pickle5 (which is \"\n \"packaged along with Ray).\")\n\nif \"OMP_NUM_THREADS\" not in os.environ:\n logger.debug(\"[ray] Forcing OMP_NUM_THREADS=1 to avoid performance \"\n \"degradation with many workers (issue #6998). You can \"\n \"override this by explicitly setting OMP_NUM_THREADS.\")\n os.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n# Add the directory containing pickle5 to the Python path so that we find the\n# pickle5 version packaged with ray and not a pre-existing pickle5.\npickle5_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pickle5_files\")\nsys.path.insert(0, pickle5_path)\n\n# Importing psutil & setproctitle. Must be before ray._raylet is initialized.\nthirdparty_files = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"thirdparty_files\")\nsys.path.insert(0, thirdparty_files)\n\nif sys.platform == \"win32\":\n import ray.compat # noqa: E402\n ray.compat.patch_redis_empty_recv()\n\n# Expose ray ABI symbols which may be dependent by other shared\n# libraries such as _streaming.so. See BUILD.bazel:_raylet\npython_shared_lib_suffix = \".so\" if sys.platform != \"win32\" else \".pyd\"\nso_path = os.path.join(dirname(__file__), \"_raylet\" + python_shared_lib_suffix)\nif os.path.exists(so_path):\n import ctypes\n from ctypes import CDLL\n CDLL(so_path, ctypes.RTLD_GLOBAL)\n\nimport ray._raylet # noqa: E402\n\nfrom ray._raylet import (\n ActorCheckpointID,\n ActorClassID,\n ActorID,\n ClientID,\n Config as _Config,\n JobID,\n WorkerID,\n FunctionID,\n ObjectID,\n TaskID,\n UniqueID,\n Language,\n) # noqa: E402\n\n_config = _Config()\n\nfrom ray.profiling import profile # noqa: E402\nfrom ray.state import (jobs, nodes, actors, objects, timeline,\n object_transfer_timeline, cluster_resources,\n available_resources, errors) # noqa: E402\nfrom ray.worker import (\n LOCAL_MODE,\n SCRIPT_MODE,\n WORKER_MODE,\n cancel,\n connect,\n disconnect,\n get,\n get_gpu_ids,\n get_resource_ids,\n get_webui_url,\n init,\n is_initialized,\n put,\n kill,\n register_custom_serializer,\n remote,\n shutdown,\n show_in_webui,\n wait,\n) # noqa: E402\nimport ray.internal # noqa: E402\nimport ray.projects # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\nfrom ray.cross_language import java_function, java_actor_class # noqa: E402\nfrom ray import util # noqa: E402\n\n# Replaced with the current commit when building the wheels.\n__commit__ = \"{{RAY_COMMIT_SHA}}\"\n__version__ = \"0.9.0.dev0\"\n\n__all__ = [\n \"jobs\",\n \"nodes\",\n \"actors\",\n \"objects\",\n \"timeline\",\n \"object_transfer_timeline\",\n \"cluster_resources\",\n \"available_resources\",\n \"errors\",\n \"LOCAL_MODE\",\n \"PYTHON_MODE\",\n \"SCRIPT_MODE\",\n \"WORKER_MODE\",\n \"__version__\",\n \"_config\",\n \"_get_runtime_context\",\n \"actor\",\n \"cancel\",\n \"connect\",\n \"disconnect\",\n \"get\",\n \"get_gpu_ids\",\n \"get_resource_ids\",\n \"get_webui_url\",\n \"init\",\n \"internal\",\n \"is_initialized\",\n \"method\",\n \"profile\",\n \"projects\",\n \"put\",\n \"kill\",\n \"register_custom_serializer\",\n \"remote\",\n \"shutdown\",\n \"show_in_webui\",\n \"wait\",\n \"Language\",\n \"java_function\",\n \"java_actor_class\",\n \"util\",\n]\n\n# ID types\n__all__ += [\n \"ActorCheckpointID\",\n \"ActorClassID\",\n \"ActorID\",\n \"ClientID\",\n \"JobID\",\n \"WorkerID\",\n \"FunctionID\",\n \"ObjectID\",\n \"TaskID\",\n \"UniqueID\",\n]\n", "path": "python/ray/__init__.py"}]} | 2,033 | 229 |
gh_patches_debug_16437 | rasdani/github-patches | git_diff | google__pytype-773 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Automatically added git ignore file to sub directory
This PR automatically creates a .gitignore file to the output directory of pytype/tools/analyze_project/main.py
Issue #759
</issue>
<code>
[start of pytype/tools/analyze_project/main.py]
1 """Analyze an entire project using pytype."""
2
3 import logging
4 import sys
5 import tempfile
6
7 import importlab.environment
8 import importlab.fs
9 import importlab.graph
10 import importlab.output
11
12 from pytype import io
13 from pytype.tools import environment
14 from pytype.tools import tool_utils
15 from pytype.tools.analyze_project import config
16 from pytype.tools.analyze_project import environment as analyze_project_env
17 from pytype.tools.analyze_project import parse_args
18 from pytype.tools.analyze_project import pytype_runner
19
20
21 def main():
22 parser = parse_args.make_parser()
23 args = parser.parse_args(sys.argv[1:])
24
25 if args.version:
26 print(io.get_pytype_version())
27 sys.exit(0)
28
29 tool_utils.setup_logging_or_die(args.verbosity)
30
31 if args.generate_config:
32 config.generate_sample_config_or_die(args.generate_config,
33 parser.pytype_single_args)
34 sys.exit(0)
35
36 conf = parser.config_from_defaults()
37 # File options overwrite defaults.
38 file_config = config.read_config_file_or_die(args.config)
39 parser.postprocess(file_config, from_strings=True)
40 conf.populate_from(file_config)
41 # Command line arguments overwrite file options.
42 conf.populate_from(args)
43 conf.inputs -= conf.exclude
44 if args.no_cache:
45 conf.output = tempfile.mkdtemp()
46 if not conf.pythonpath:
47 conf.pythonpath = environment.compute_pythonpath(conf.inputs)
48 logging.info('\n '.join(['Configuration:'] + str(conf).split('\n')))
49
50 if not conf.inputs:
51 parser.parser.error('Need an input.')
52
53 # Importlab needs the python exe, so we check it as early as possible.
54 environment.check_python_exe_or_die(conf.python_version)
55
56 typeshed = environment.initialize_typeshed_or_die()
57 env = analyze_project_env.create_importlab_environment(conf, typeshed)
58 print('Computing dependencies')
59 import_graph = importlab.graph.ImportGraph.create(env, conf.inputs, trim=True)
60
61 if args.tree:
62 print('Source tree:')
63 importlab.output.print_tree(import_graph)
64 sys.exit(0)
65
66 if args.unresolved:
67 print('Unresolved dependencies:')
68 for imp in sorted(import_graph.get_all_unresolved()):
69 print(' ', imp.name)
70 sys.exit(0)
71
72 # Main usage mode: analyze the project file by file in dependency order.
73
74 logging.info('Source tree:\n%s',
75 importlab.output.formatted_deps_list(import_graph))
76 tool_utils.makedirs_or_die(conf.output, 'Could not create output directory')
77 deps = pytype_runner.deps_from_import_graph(import_graph)
78 runner = pytype_runner.PytypeRunner(conf, deps)
79 return runner.run()
80
81
82 if __name__ == '__main__':
83 sys.exit(main())
84
[end of pytype/tools/analyze_project/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytype/tools/analyze_project/main.py b/pytype/tools/analyze_project/main.py
--- a/pytype/tools/analyze_project/main.py
+++ b/pytype/tools/analyze_project/main.py
@@ -1,6 +1,7 @@
"""Analyze an entire project using pytype."""
import logging
+import os
import sys
import tempfile
@@ -74,6 +75,8 @@
logging.info('Source tree:\n%s',
importlab.output.formatted_deps_list(import_graph))
tool_utils.makedirs_or_die(conf.output, 'Could not create output directory')
+ with open(os.path.join(conf.output, '.gitignore'), 'w') as f:
+ f.write('# Automatically created by pytype\n*')
deps = pytype_runner.deps_from_import_graph(import_graph)
runner = pytype_runner.PytypeRunner(conf, deps)
return runner.run()
| {"golden_diff": "diff --git a/pytype/tools/analyze_project/main.py b/pytype/tools/analyze_project/main.py\n--- a/pytype/tools/analyze_project/main.py\n+++ b/pytype/tools/analyze_project/main.py\n@@ -1,6 +1,7 @@\n \"\"\"Analyze an entire project using pytype.\"\"\"\n \n import logging\n+import os\n import sys\n import tempfile\n \n@@ -74,6 +75,8 @@\n logging.info('Source tree:\\n%s',\n importlab.output.formatted_deps_list(import_graph))\n tool_utils.makedirs_or_die(conf.output, 'Could not create output directory')\n+ with open(os.path.join(conf.output, '.gitignore'), 'w') as f:\n+ f.write('# Automatically created by pytype\\n*')\n deps = pytype_runner.deps_from_import_graph(import_graph)\n runner = pytype_runner.PytypeRunner(conf, deps)\n return runner.run()\n", "issue": "Automatically added git ignore file to sub directory\nThis PR automatically creates a .gitignore file to the output directory of pytype/tools/analyze_project/main.py\r\n\r\nIssue #759 \n", "before_files": [{"content": "\"\"\"Analyze an entire project using pytype.\"\"\"\n\nimport logging\nimport sys\nimport tempfile\n\nimport importlab.environment\nimport importlab.fs\nimport importlab.graph\nimport importlab.output\n\nfrom pytype import io\nfrom pytype.tools import environment\nfrom pytype.tools import tool_utils\nfrom pytype.tools.analyze_project import config\nfrom pytype.tools.analyze_project import environment as analyze_project_env\nfrom pytype.tools.analyze_project import parse_args\nfrom pytype.tools.analyze_project import pytype_runner\n\n\ndef main():\n parser = parse_args.make_parser()\n args = parser.parse_args(sys.argv[1:])\n\n if args.version:\n print(io.get_pytype_version())\n sys.exit(0)\n\n tool_utils.setup_logging_or_die(args.verbosity)\n\n if args.generate_config:\n config.generate_sample_config_or_die(args.generate_config,\n parser.pytype_single_args)\n sys.exit(0)\n\n conf = parser.config_from_defaults()\n # File options overwrite defaults.\n file_config = config.read_config_file_or_die(args.config)\n parser.postprocess(file_config, from_strings=True)\n conf.populate_from(file_config)\n # Command line arguments overwrite file options.\n conf.populate_from(args)\n conf.inputs -= conf.exclude\n if args.no_cache:\n conf.output = tempfile.mkdtemp()\n if not conf.pythonpath:\n conf.pythonpath = environment.compute_pythonpath(conf.inputs)\n logging.info('\\n '.join(['Configuration:'] + str(conf).split('\\n')))\n\n if not conf.inputs:\n parser.parser.error('Need an input.')\n\n # Importlab needs the python exe, so we check it as early as possible.\n environment.check_python_exe_or_die(conf.python_version)\n\n typeshed = environment.initialize_typeshed_or_die()\n env = analyze_project_env.create_importlab_environment(conf, typeshed)\n print('Computing dependencies')\n import_graph = importlab.graph.ImportGraph.create(env, conf.inputs, trim=True)\n\n if args.tree:\n print('Source tree:')\n importlab.output.print_tree(import_graph)\n sys.exit(0)\n\n if args.unresolved:\n print('Unresolved dependencies:')\n for imp in sorted(import_graph.get_all_unresolved()):\n print(' ', imp.name)\n sys.exit(0)\n\n # Main usage mode: analyze the project file by file in dependency order.\n\n logging.info('Source tree:\\n%s',\n importlab.output.formatted_deps_list(import_graph))\n tool_utils.makedirs_or_die(conf.output, 'Could not create output directory')\n deps = pytype_runner.deps_from_import_graph(import_graph)\n runner = pytype_runner.PytypeRunner(conf, deps)\n return runner.run()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "pytype/tools/analyze_project/main.py"}]} | 1,327 | 194 |
gh_patches_debug_31834 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-881 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix segmentation example
# Before submitting
- [x] Was this discussed/approved via a Github issue? (no need for typos, doc improvements)
- [x] Did you read the [contributor guideline](https://github.com/PyTorchLightning/pytorch-lightning/blob/master/.github/CONTRIBUTING.md)?
- [x] Did you make sure to update the docs?
- [ ] Did you write any new necessary tests?
## What does this PR do?
Fixes #874 (issue).
## PR review
Anyone in the community is free to review the PR once the tests have passed.
## Some comments
I have added a custom model (UNet) instead of the previous torchvision segmentation model as it was causing tests to fail (mostly because those torchvision models were introduced in version 0.3 and it will fail for previous versions).
</issue>
<code>
[start of pl_examples/full_examples/semantic_segmentation/models/unet/model.py]
1 import torch
2 import torch.nn as nn
3 import torch.nn.functional as F
4
5 from parts import DoubleConv, Down, Up
6
7
8 class UNet(nn.Module):
9 '''
10 Architecture based on U-Net: Convolutional Networks for Biomedical Image Segmentation
11 Link - https://arxiv.org/abs/1505.04597
12 '''
13 def __init__(self, num_classes=19, bilinear=False):
14 super().__init__()
15 self.bilinear = bilinear
16 self.num_classes = num_classes
17 self.layer1 = DoubleConv(3, 64)
18 self.layer2 = Down(64, 128)
19 self.layer3 = Down(128, 256)
20 self.layer4 = Down(256, 512)
21 self.layer5 = Down(512, 1024)
22
23 self.layer6 = Up(1024, 512, bilinear=self.bilinear)
24 self.layer7 = Up(512, 256, bilinear=self.bilinear)
25 self.layer8 = Up(256, 128, bilinear=self.bilinear)
26 self.layer9 = Up(128, 64, bilinear=self.bilinear)
27
28 self.layer10 = nn.Conv2d(64, self.num_classes, kernel_size=1)
29
30 def forward(self, x):
31 x1 = self.layer1(x)
32 x2 = self.layer2(x1)
33 x3 = self.layer3(x2)
34 x4 = self.layer4(x3)
35 x5 = self.layer5(x4)
36
37 x6 = self.layer6(x5, x4)
38 x6 = self.layer7(x6, x3)
39 x6 = self.layer8(x6, x2)
40 x6 = self.layer9(x6, x1)
41
42 return self.layer10(x6)
43
[end of pl_examples/full_examples/semantic_segmentation/models/unet/model.py]
[start of pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py]
1 # For relative imports to work in Python 3.6
2 import os
3 import sys
4 sys.path.append(os.path.dirname(os.path.realpath(__file__)))
5
[end of pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py b/pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py
deleted file mode 100644
--- a/pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# For relative imports to work in Python 3.6
-import os
-import sys
-sys.path.append(os.path.dirname(os.path.realpath(__file__)))
diff --git a/pl_examples/full_examples/semantic_segmentation/models/unet/model.py b/pl_examples/full_examples/semantic_segmentation/models/unet/model.py
--- a/pl_examples/full_examples/semantic_segmentation/models/unet/model.py
+++ b/pl_examples/full_examples/semantic_segmentation/models/unet/model.py
@@ -2,30 +2,33 @@
import torch.nn as nn
import torch.nn.functional as F
-from parts import DoubleConv, Down, Up
+from models.unet.parts import DoubleConv, Down, Up
class UNet(nn.Module):
'''
Architecture based on U-Net: Convolutional Networks for Biomedical Image Segmentation
Link - https://arxiv.org/abs/1505.04597
+
+ Parameters:
+ num_classes (int) - Number of output classes required (default 19 for KITTI dataset)
+ bilinear (bool) - Whether to use bilinear interpolation or transposed
+ convolutions for upsampling.
'''
def __init__(self, num_classes=19, bilinear=False):
super().__init__()
- self.bilinear = bilinear
- self.num_classes = num_classes
self.layer1 = DoubleConv(3, 64)
self.layer2 = Down(64, 128)
self.layer3 = Down(128, 256)
self.layer4 = Down(256, 512)
self.layer5 = Down(512, 1024)
- self.layer6 = Up(1024, 512, bilinear=self.bilinear)
- self.layer7 = Up(512, 256, bilinear=self.bilinear)
- self.layer8 = Up(256, 128, bilinear=self.bilinear)
- self.layer9 = Up(128, 64, bilinear=self.bilinear)
+ self.layer6 = Up(1024, 512, bilinear=bilinear)
+ self.layer7 = Up(512, 256, bilinear=bilinear)
+ self.layer8 = Up(256, 128, bilinear=bilinear)
+ self.layer9 = Up(128, 64, bilinear=bilinear)
- self.layer10 = nn.Conv2d(64, self.num_classes, kernel_size=1)
+ self.layer10 = nn.Conv2d(64, num_classes, kernel_size=1)
def forward(self, x):
x1 = self.layer1(x)
| {"golden_diff": "diff --git a/pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py b/pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py\ndeleted file mode 100644\n--- a/pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py\n+++ /dev/null\n@@ -1,4 +0,0 @@\n-# For relative imports to work in Python 3.6\n-import os\n-import sys\n-sys.path.append(os.path.dirname(os.path.realpath(__file__)))\ndiff --git a/pl_examples/full_examples/semantic_segmentation/models/unet/model.py b/pl_examples/full_examples/semantic_segmentation/models/unet/model.py\n--- a/pl_examples/full_examples/semantic_segmentation/models/unet/model.py\n+++ b/pl_examples/full_examples/semantic_segmentation/models/unet/model.py\n@@ -2,30 +2,33 @@\n import torch.nn as nn\n import torch.nn.functional as F\n \n-from parts import DoubleConv, Down, Up\n+from models.unet.parts import DoubleConv, Down, Up\n \n \n class UNet(nn.Module):\n '''\n Architecture based on U-Net: Convolutional Networks for Biomedical Image Segmentation\n Link - https://arxiv.org/abs/1505.04597\n+\n+ Parameters:\n+ num_classes (int) - Number of output classes required (default 19 for KITTI dataset)\n+ bilinear (bool) - Whether to use bilinear interpolation or transposed\n+ convolutions for upsampling.\n '''\n def __init__(self, num_classes=19, bilinear=False):\n super().__init__()\n- self.bilinear = bilinear\n- self.num_classes = num_classes\n self.layer1 = DoubleConv(3, 64)\n self.layer2 = Down(64, 128)\n self.layer3 = Down(128, 256)\n self.layer4 = Down(256, 512)\n self.layer5 = Down(512, 1024)\n \n- self.layer6 = Up(1024, 512, bilinear=self.bilinear)\n- self.layer7 = Up(512, 256, bilinear=self.bilinear)\n- self.layer8 = Up(256, 128, bilinear=self.bilinear)\n- self.layer9 = Up(128, 64, bilinear=self.bilinear)\n+ self.layer6 = Up(1024, 512, bilinear=bilinear)\n+ self.layer7 = Up(512, 256, bilinear=bilinear)\n+ self.layer8 = Up(256, 128, bilinear=bilinear)\n+ self.layer9 = Up(128, 64, bilinear=bilinear)\n \n- self.layer10 = nn.Conv2d(64, self.num_classes, kernel_size=1)\n+ self.layer10 = nn.Conv2d(64, num_classes, kernel_size=1)\n \n def forward(self, x):\n x1 = self.layer1(x)\n", "issue": "Fix segmentation example\n# Before submitting\r\n\r\n- [x] Was this discussed/approved via a Github issue? (no need for typos, doc improvements)\r\n- [x] Did you read the [contributor guideline](https://github.com/PyTorchLightning/pytorch-lightning/blob/master/.github/CONTRIBUTING.md)?\r\n- [x] Did you make sure to update the docs? \r\n- [ ] Did you write any new necessary tests? \r\n\r\n## What does this PR do?\r\nFixes #874 (issue).\r\n\r\n## PR review \r\nAnyone in the community is free to review the PR once the tests have passed. \r\n\r\n## Some comments\r\nI have added a custom model (UNet) instead of the previous torchvision segmentation model as it was causing tests to fail (mostly because those torchvision models were introduced in version 0.3 and it will fail for previous versions).\n", "before_files": [{"content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom parts import DoubleConv, Down, Up\n\n\nclass UNet(nn.Module):\n '''\n Architecture based on U-Net: Convolutional Networks for Biomedical Image Segmentation\n Link - https://arxiv.org/abs/1505.04597\n '''\n def __init__(self, num_classes=19, bilinear=False):\n super().__init__()\n self.bilinear = bilinear\n self.num_classes = num_classes\n self.layer1 = DoubleConv(3, 64)\n self.layer2 = Down(64, 128)\n self.layer3 = Down(128, 256)\n self.layer4 = Down(256, 512)\n self.layer5 = Down(512, 1024)\n\n self.layer6 = Up(1024, 512, bilinear=self.bilinear)\n self.layer7 = Up(512, 256, bilinear=self.bilinear)\n self.layer8 = Up(256, 128, bilinear=self.bilinear)\n self.layer9 = Up(128, 64, bilinear=self.bilinear)\n\n self.layer10 = nn.Conv2d(64, self.num_classes, kernel_size=1)\n\n def forward(self, x):\n x1 = self.layer1(x)\n x2 = self.layer2(x1)\n x3 = self.layer3(x2)\n x4 = self.layer4(x3)\n x5 = self.layer5(x4)\n\n x6 = self.layer6(x5, x4)\n x6 = self.layer7(x6, x3)\n x6 = self.layer8(x6, x2)\n x6 = self.layer9(x6, x1)\n\n return self.layer10(x6)\n", "path": "pl_examples/full_examples/semantic_segmentation/models/unet/model.py"}, {"content": "# For relative imports to work in Python 3.6\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\n", "path": "pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py"}]} | 1,318 | 699 |
gh_patches_debug_35203 | rasdani/github-patches | git_diff | acl-org__acl-anthology-255 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
How to annotate first initials in XML
@mjpost and I have been discussing offline: When an author's name is written using a first initial on the paper itself, but the complete first name is known (at least probably), how should it be written in the XML? Some ideas (others are possible):
```
a) <author><first>Matt</first><last>Post</last></author>
b) <author><first>M[att]</first><last>Post</last></author>
c) <author><first complete="Matt">M.</first><last>Post</last></author>
d) <author><first initials="M.">Matt</first><last>Post</last></author>
e) <author><first>M<completion>att</completion></first><last>Post</last></author>
```
We have a big influx of these, thanks to the hard work of the LREC organizers supplying complete first names, so if the answer is anything other than (a), it would be nice to decide before merging in the new ones.
JATS uses (d) but without the period (https://jats.nlm.nih.gov/archiving/tag-library/1.1d1/n-zu82.html).
Some special cases to consider:
- Initials are not always initials: `Wm.` to `William`, `R.` to `Bob`
- Middle initials, double first names, second last names, etc. can be expanded from initials as well.
- ADDED: Possibly related, possibly a can of worms: There are some (but not many) papers where the author's name is written _only_ in a non-Latin script (e.g., https://www.aclweb.org/anthology/O00-1008), so the editor has filled in a transliteration.
</issue>
<code>
[start of bin/check_name_variants.py]
1 import yaml
2 import sys
3 import lxml.etree as etree
4
5 person_fields = {'canonical', 'variants', 'comment'}
6 name_fields = {'first', 'last', 'papers'}
7
8 def text(node):
9 """Extract text from an XML node."""
10 if node is None: return ''
11 s = ''.join(node.itertext())
12 return ' '.join(s.split())
13
14 def name(d):
15 return (d['first'], d['last'])
16
17 if len(sys.argv) > 2:
18 names = set()
19 for xmlfilename in sys.argv[2:]:
20 try:
21 tree = etree.parse(xmlfilename)
22 except:
23 print(xmlfilename)
24 raise
25 for paper in tree.getroot().findall('paper'):
26 for person in paper.xpath('./author|./editor'):
27 first = text(person.find('first'))
28 last = text(person.find('last'))
29 names.add((first,last))
30 else:
31 names = None
32
33 doc = yaml.load(open(sys.argv[1]))
34
35 assert isinstance(doc, list)
36 for person in doc:
37 assert isinstance(person, dict), person
38 assert set(person.keys()).issubset(person_fields), person
39 assert 'canonical' in person, person
40 assert isinstance(person['canonical'], dict), person
41 assert set(person['canonical']).issubset(name_fields), person
42 if names is not None and name(person['canonical']) not in names:
43 print('unused name', person['canonical'])
44 dupes = {name(person['canonical'])}
45 assert 'variants' in person, person
46 assert isinstance(person['variants'], list), person
47 for variant in person['variants']:
48 assert set(variant).issubset(name_fields), person
49 if names is not None and name(variant) not in names:
50 print('unused name', variant)
51 assert name(variant) not in dupes, variant
52 dupes.add(name(variant))
53
54
[end of bin/check_name_variants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bin/check_name_variants.py b/bin/check_name_variants.py
--- a/bin/check_name_variants.py
+++ b/bin/check_name_variants.py
@@ -1,14 +1,19 @@
import yaml
import sys
import lxml.etree as etree
+import logging
person_fields = {'canonical', 'variants', 'comment'}
name_fields = {'first', 'last', 'papers'}
def text(node):
"""Extract text from an XML node."""
- if node is None: return ''
- s = ''.join(node.itertext())
+ if node is None:
+ return ''
+ if 'complete' in node.attrib:
+ s = node.attrib['complete']
+ else:
+ s = ''.join(node.itertext())
return ' '.join(s.split())
def name(d):
@@ -20,7 +25,7 @@
try:
tree = etree.parse(xmlfilename)
except:
- print(xmlfilename)
+ logging.error("couldn't parse {}".format(xmlfilename))
raise
for paper in tree.getroot().findall('paper'):
for person in paper.xpath('./author|./editor'):
@@ -40,14 +45,15 @@
assert isinstance(person['canonical'], dict), person
assert set(person['canonical']).issubset(name_fields), person
if names is not None and name(person['canonical']) not in names:
- print('unused name', person['canonical'])
+ logging.warning('unused name: {}'.format(person['canonical']))
dupes = {name(person['canonical'])}
assert 'variants' in person, person
assert isinstance(person['variants'], list), person
for variant in person['variants']:
assert set(variant).issubset(name_fields), person
if names is not None and name(variant) not in names:
- print('unused name', variant)
+ logging.warning('unused name: {}'.format(variant))
assert name(variant) not in dupes, variant
dupes.add(name(variant))
+print(yaml.dump(doc, allow_unicode=True))
| {"golden_diff": "diff --git a/bin/check_name_variants.py b/bin/check_name_variants.py\n--- a/bin/check_name_variants.py\n+++ b/bin/check_name_variants.py\n@@ -1,14 +1,19 @@\n import yaml\n import sys\n import lxml.etree as etree\n+import logging\n \n person_fields = {'canonical', 'variants', 'comment'}\n name_fields = {'first', 'last', 'papers'}\n \n def text(node):\n \"\"\"Extract text from an XML node.\"\"\"\n- if node is None: return ''\n- s = ''.join(node.itertext())\n+ if node is None:\n+ return ''\n+ if 'complete' in node.attrib:\n+ s = node.attrib['complete']\n+ else:\n+ s = ''.join(node.itertext())\n return ' '.join(s.split())\n \n def name(d):\n@@ -20,7 +25,7 @@\n try:\n tree = etree.parse(xmlfilename)\n except:\n- print(xmlfilename)\n+ logging.error(\"couldn't parse {}\".format(xmlfilename))\n raise\n for paper in tree.getroot().findall('paper'):\n for person in paper.xpath('./author|./editor'):\n@@ -40,14 +45,15 @@\n assert isinstance(person['canonical'], dict), person\n assert set(person['canonical']).issubset(name_fields), person\n if names is not None and name(person['canonical']) not in names:\n- print('unused name', person['canonical'])\n+ logging.warning('unused name: {}'.format(person['canonical']))\n dupes = {name(person['canonical'])}\n assert 'variants' in person, person\n assert isinstance(person['variants'], list), person\n for variant in person['variants']:\n assert set(variant).issubset(name_fields), person\n if names is not None and name(variant) not in names:\n- print('unused name', variant)\n+ logging.warning('unused name: {}'.format(variant))\n assert name(variant) not in dupes, variant\n dupes.add(name(variant))\n \n+print(yaml.dump(doc, allow_unicode=True))\n", "issue": "How to annotate first initials in XML\n@mjpost and I have been discussing offline: When an author's name is written using a first initial on the paper itself, but the complete first name is known (at least probably), how should it be written in the XML? Some ideas (others are possible):\r\n\r\n```\r\na) <author><first>Matt</first><last>Post</last></author>\r\nb) <author><first>M[att]</first><last>Post</last></author>\r\nc) <author><first complete=\"Matt\">M.</first><last>Post</last></author>\r\nd) <author><first initials=\"M.\">Matt</first><last>Post</last></author>\r\ne) <author><first>M<completion>att</completion></first><last>Post</last></author>\r\n```\r\n\r\nWe have a big influx of these, thanks to the hard work of the LREC organizers supplying complete first names, so if the answer is anything other than (a), it would be nice to decide before merging in the new ones.\r\n\r\nJATS uses (d) but without the period (https://jats.nlm.nih.gov/archiving/tag-library/1.1d1/n-zu82.html).\r\n\r\nSome special cases to consider:\r\n\r\n- Initials are not always initials: `Wm.` to `William`, `R.` to `Bob`\r\n- Middle initials, double first names, second last names, etc. can be expanded from initials as well.\r\n- ADDED: Possibly related, possibly a can of worms: There are some (but not many) papers where the author's name is written _only_ in a non-Latin script (e.g., https://www.aclweb.org/anthology/O00-1008), so the editor has filled in a transliteration.\n", "before_files": [{"content": "import yaml\nimport sys\nimport lxml.etree as etree\n\nperson_fields = {'canonical', 'variants', 'comment'}\nname_fields = {'first', 'last', 'papers'}\n\ndef text(node):\n \"\"\"Extract text from an XML node.\"\"\"\n if node is None: return ''\n s = ''.join(node.itertext())\n return ' '.join(s.split())\n\ndef name(d):\n return (d['first'], d['last'])\n\nif len(sys.argv) > 2:\n names = set()\n for xmlfilename in sys.argv[2:]:\n try:\n tree = etree.parse(xmlfilename)\n except:\n print(xmlfilename)\n raise\n for paper in tree.getroot().findall('paper'):\n for person in paper.xpath('./author|./editor'):\n first = text(person.find('first'))\n last = text(person.find('last'))\n names.add((first,last))\nelse:\n names = None\n\ndoc = yaml.load(open(sys.argv[1]))\n\nassert isinstance(doc, list)\nfor person in doc:\n assert isinstance(person, dict), person\n assert set(person.keys()).issubset(person_fields), person\n assert 'canonical' in person, person\n assert isinstance(person['canonical'], dict), person\n assert set(person['canonical']).issubset(name_fields), person\n if names is not None and name(person['canonical']) not in names:\n print('unused name', person['canonical'])\n dupes = {name(person['canonical'])}\n assert 'variants' in person, person\n assert isinstance(person['variants'], list), person\n for variant in person['variants']:\n assert set(variant).issubset(name_fields), person\n if names is not None and name(variant) not in names:\n print('unused name', variant)\n assert name(variant) not in dupes, variant\n dupes.add(name(variant))\n \n", "path": "bin/check_name_variants.py"}]} | 1,420 | 458 |
gh_patches_debug_10203 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-746 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Set up integration testing environment
## Problem
We need to setup an integration test environment for Mathesar, to test all user action scenarios.
## Proposed solution
Based on the discussions we've had previously (#89), the most suitable choice for us is [Playwright](https://playwright.dev/).
Integ tests require a complete mathesar setup, including a database. We should be able to test `user action -> api calls -> changes in db`.
## Additional context
- #89
</issue>
<code>
[start of install.py]
1 """
2 This script installs functions and types for Mathesar onto the configured DB.
3 """
4 from config.settings import DATABASES
5 from db import install
6 import sys
7 import getopt
8
9
10 def main():
11 skip_confirm = False
12 (opts, _) = getopt.getopt(sys.argv[1:], ":s", ["skip-confirm"])
13 for (opt, value) in opts:
14 if (opt == "-s") or (opt == "--skip-confirm"):
15 skip_confirm = True
16 for database_key in [key for key in DATABASES if key != "default"]:
17 install_on_db_with_key(database_key, skip_confirm)
18
19
20 def install_on_db_with_key(database_key, skip_confirm):
21 if DATABASES[database_key]["HOST"] == "mathesar_db":
22 # if we're going to install on the docker-created Postgres, we'll
23 # create the DB
24 print("Creating Mathesar DB on docker-created PostgreSQL instance")
25 install.create_mathesar_database(
26 DATABASES[database_key]["NAME"],
27 DATABASES["default"]["USER"],
28 DATABASES["default"]["PASSWORD"],
29 DATABASES["default"]["HOST"],
30 DATABASES["default"]["NAME"],
31 DATABASES["default"]["PORT"],
32 )
33 print(f"Created DB is {DATABASES['mathesar_tables']['NAME']}")
34 else:
35 # if we're installing anywhere else, we require the DB to exist in
36 # advance.
37 username = DATABASES[database_key]["USER"]
38 password = DATABASES[database_key]["PASSWORD"]
39 host = DATABASES[database_key]["HOST"]
40 db_name = DATABASES[database_key]["NAME"]
41 port = DATABASES[database_key]["PORT"]
42 print("Installing Mathesar DB on preexisting PostgreSQL instance...")
43 if skip_confirm is False:
44 confirmation = input(
45 f"Mathesar will be installed on DB {db_name} at host {host}."
46 "Confirm? (y/n) > "
47 )
48 if (confirmation.lower() in ["y", "yes"]) or (skip_confirm is True):
49 print("Installing...")
50 install.install_mathesar_on_preexisting_database(
51 username,
52 password,
53 host,
54 db_name,
55 port,
56 )
57 else:
58 print("Skipping DB with key {database_key}.")
59
60
61 if __name__ == "__main__":
62 main()
63
[end of install.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/install.py b/install.py
--- a/install.py
+++ b/install.py
@@ -39,7 +39,7 @@
host = DATABASES[database_key]["HOST"]
db_name = DATABASES[database_key]["NAME"]
port = DATABASES[database_key]["PORT"]
- print("Installing Mathesar DB on preexisting PostgreSQL instance...")
+ print(f"Installing Mathesar DB {db_name} on preexisting PostgreSQL instance at host {host}...")
if skip_confirm is False:
confirmation = input(
f"Mathesar will be installed on DB {db_name} at host {host}."
| {"golden_diff": "diff --git a/install.py b/install.py\n--- a/install.py\n+++ b/install.py\n@@ -39,7 +39,7 @@\n host = DATABASES[database_key][\"HOST\"]\n db_name = DATABASES[database_key][\"NAME\"]\n port = DATABASES[database_key][\"PORT\"]\n- print(\"Installing Mathesar DB on preexisting PostgreSQL instance...\")\n+ print(f\"Installing Mathesar DB {db_name} on preexisting PostgreSQL instance at host {host}...\")\n if skip_confirm is False:\n confirmation = input(\n f\"Mathesar will be installed on DB {db_name} at host {host}.\"\n", "issue": "Set up integration testing environment\n## Problem\r\nWe need to setup an integration test environment for Mathesar, to test all user action scenarios.\r\n\r\n## Proposed solution\r\nBased on the discussions we've had previously (#89), the most suitable choice for us is [Playwright](https://playwright.dev/).\r\n\r\nInteg tests require a complete mathesar setup, including a database. We should be able to test `user action -> api calls -> changes in db`.\r\n\r\n## Additional context\r\n- #89 \n", "before_files": [{"content": "\"\"\"\nThis script installs functions and types for Mathesar onto the configured DB.\n\"\"\"\nfrom config.settings import DATABASES\nfrom db import install\nimport sys\nimport getopt\n\n\ndef main():\n skip_confirm = False\n (opts, _) = getopt.getopt(sys.argv[1:], \":s\", [\"skip-confirm\"])\n for (opt, value) in opts:\n if (opt == \"-s\") or (opt == \"--skip-confirm\"):\n skip_confirm = True\n for database_key in [key for key in DATABASES if key != \"default\"]:\n install_on_db_with_key(database_key, skip_confirm)\n\n\ndef install_on_db_with_key(database_key, skip_confirm):\n if DATABASES[database_key][\"HOST\"] == \"mathesar_db\":\n # if we're going to install on the docker-created Postgres, we'll\n # create the DB\n print(\"Creating Mathesar DB on docker-created PostgreSQL instance\")\n install.create_mathesar_database(\n DATABASES[database_key][\"NAME\"],\n DATABASES[\"default\"][\"USER\"],\n DATABASES[\"default\"][\"PASSWORD\"],\n DATABASES[\"default\"][\"HOST\"],\n DATABASES[\"default\"][\"NAME\"],\n DATABASES[\"default\"][\"PORT\"],\n )\n print(f\"Created DB is {DATABASES['mathesar_tables']['NAME']}\")\n else:\n # if we're installing anywhere else, we require the DB to exist in\n # advance.\n username = DATABASES[database_key][\"USER\"]\n password = DATABASES[database_key][\"PASSWORD\"]\n host = DATABASES[database_key][\"HOST\"]\n db_name = DATABASES[database_key][\"NAME\"]\n port = DATABASES[database_key][\"PORT\"]\n print(\"Installing Mathesar DB on preexisting PostgreSQL instance...\")\n if skip_confirm is False:\n confirmation = input(\n f\"Mathesar will be installed on DB {db_name} at host {host}.\"\n \"Confirm? (y/n) > \"\n )\n if (confirmation.lower() in [\"y\", \"yes\"]) or (skip_confirm is True):\n print(\"Installing...\")\n install.install_mathesar_on_preexisting_database(\n username,\n password,\n host,\n db_name,\n port,\n )\n else:\n print(\"Skipping DB with key {database_key}.\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install.py"}]} | 1,241 | 137 |
gh_patches_debug_29514 | rasdani/github-patches | git_diff | liqd__a4-opin-250 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No warning/help text when password entered incorrectly
When I try to log in with the wrong log in details, I don't get a message informing me what didn't work - i.e. wrong password/log in (Firefox, 47 on Mac).
</issue>
<code>
[start of euth/user_management/forms.py]
1 from django import forms
2 from django.contrib.auth import authenticate, get_user_model
3 from django.contrib.auth.hashers import make_password
4 from django.core.exceptions import ValidationError
5 from django.utils.translation import ugettext as _
6
7 from .models import Registration, Reset
8
9 User = get_user_model()
10
11
12 class LoginForm(forms.Form):
13 email = forms.EmailField(max_length=255, required=True)
14 password = forms.CharField(widget=forms.PasswordInput, required=True)
15
16 def clean(self):
17 email = self.cleaned_data.get('email')
18 password = self.cleaned_data.get('password')
19 user = authenticate(username=email, password=password)
20 if not user or not user.is_active:
21 raise ValidationError(_('password mismatch'))
22 return self.cleaned_data
23
24 def login(self, request):
25 email = self.cleaned_data.get('email')
26 password = self.cleaned_data.get('password')
27 user = authenticate(username=email, password=password)
28 return user
29
30
31 class RegisterForm(forms.Form):
32 email = forms.EmailField(max_length=255, required=True)
33 username = forms.CharField(max_length=255, required=True)
34 password = forms.CharField(
35 widget=forms.PasswordInput,
36 min_length=8,
37 required=True)
38 password_repeat = forms.CharField(
39 widget=forms.PasswordInput, required=True)
40
41 def clean_password_repeat(self):
42 password1 = self.cleaned_data.get('password')
43 password2 = self.cleaned_data.get('password_repeat')
44 if password1 != password2:
45 raise ValidationError(_('passwords dont match'))
46 return password2
47
48 def clean_username(self):
49 username = self.cleaned_data.get('username')
50 user_exists = User.objects.filter(
51 username=username).first() is not None
52 register_exits = Registration.objects.filter(
53 username=username).first() is not None
54
55 if user_exists or register_exits:
56 raise ValidationError(_('username taken'))
57 return username
58
59 def clean_email(self):
60 email = self.cleaned_data.get('email')
61 user_exists = User.objects.filter(email=email).first()
62 register_exists = Registration.objects.filter(email=email).first()
63 if user_exists or register_exists:
64 raise ValidationError(_('email in use'))
65 return email
66
67 def register(self, request):
68 username = self.cleaned_data.get('username')
69 password = self.cleaned_data.get('password')
70 email = self.cleaned_data.get('email')
71 registration = Registration(username=username,
72 email=email,
73 password=make_password(password))
74 return registration
75
76
77 class ActivateForm(forms.Form):
78 token = forms.UUIDField(widget=forms.HiddenInput(), required=True)
79
80 def clean_token(self):
81 token = self.cleaned_data.get('token')
82 registration = Registration.objects.filter(token=token).first()
83 if not registration:
84 raise ValidationError(_('invalid token'))
85 else:
86 self.cleaned_data['registration'] = registration
87 return token
88
89 def activate(self, request):
90 registration = self.cleaned_data.get('registration')
91 user = User(username=registration.username,
92 email=registration.email,
93 password=registration.password)
94 return user, registration
95
96
97 class RequestResetForm(forms.Form):
98 username_or_email = forms.CharField(max_length=255)
99
100 def clean_username_or_email(self):
101 username_or_email = self.cleaned_data.get('username_or_email')
102 user = (User.objects.filter(username=username_or_email).first() or
103 User.objects.filter(email=username_or_email).first())
104 if not user:
105 raise ValidationError(_('unkown user'))
106 else:
107 self.cleaned_data['user'] = user
108 return username_or_email
109
110 def request_reset(self, request):
111 user = self.cleaned_data.get('user')
112 return Reset(user=user)
113
114
115 class ResetForm(forms.Form):
116 token = forms.UUIDField(widget=forms.HiddenInput(), required=True)
117 password = forms.CharField(
118 widget=forms.PasswordInput,
119 min_length=8,
120 required=True)
121 password_repeat = forms.CharField(
122 widget=forms.PasswordInput,
123 required=True)
124
125 def clean_token(self):
126 token = self.cleaned_data.get('token')
127 reset = Reset.objects.filter(token=token).first()
128 if not reset:
129 ValidationError(_('invalid token'))
130 else:
131 self.cleaned_data['reset'] = reset
132 return token
133
134 def clean_password_repeat(self):
135 password1 = self.cleaned_data.get('password')
136 password2 = self.cleaned_data.get('password_repeat')
137 if password1 != password2:
138 raise ValidationError(_('passwords dont match'))
139 return password2
140
141 def reset_password(self, request):
142 reset = self.cleaned_data.get('reset')
143 password = self.cleaned_data.get('password')
144 user = reset.user
145 user.password = make_password(password)
146 return user, reset
147
[end of euth/user_management/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/euth/user_management/forms.py b/euth/user_management/forms.py
--- a/euth/user_management/forms.py
+++ b/euth/user_management/forms.py
@@ -15,6 +15,12 @@
def clean(self):
email = self.cleaned_data.get('email')
+ if email and not User.objects.filter(email=email):
+ if Registration.objects.filter(email=email):
+ raise ValidationError(_('account not activated'))
+ else:
+ raise ValidationError(_('account doesn\'t exist'))
+
password = self.cleaned_data.get('password')
user = authenticate(username=email, password=password)
if not user or not user.is_active:
@@ -42,7 +48,7 @@
password1 = self.cleaned_data.get('password')
password2 = self.cleaned_data.get('password_repeat')
if password1 != password2:
- raise ValidationError(_('passwords dont match'))
+ raise ValidationError(_('passwords don\'t match'))
return password2
def clean_username(self):
@@ -134,8 +140,8 @@
def clean_password_repeat(self):
password1 = self.cleaned_data.get('password')
password2 = self.cleaned_data.get('password_repeat')
- if password1 != password2:
- raise ValidationError(_('passwords dont match'))
+ if password1 and password1 != password2:
+ raise ValidationError(_('passwords don\'t match'))
return password2
def reset_password(self, request):
| {"golden_diff": "diff --git a/euth/user_management/forms.py b/euth/user_management/forms.py\n--- a/euth/user_management/forms.py\n+++ b/euth/user_management/forms.py\n@@ -15,6 +15,12 @@\n \n def clean(self):\n email = self.cleaned_data.get('email')\n+ if email and not User.objects.filter(email=email):\n+ if Registration.objects.filter(email=email):\n+ raise ValidationError(_('account not activated'))\n+ else:\n+ raise ValidationError(_('account doesn\\'t exist'))\n+\n password = self.cleaned_data.get('password')\n user = authenticate(username=email, password=password)\n if not user or not user.is_active:\n@@ -42,7 +48,7 @@\n password1 = self.cleaned_data.get('password')\n password2 = self.cleaned_data.get('password_repeat')\n if password1 != password2:\n- raise ValidationError(_('passwords dont match'))\n+ raise ValidationError(_('passwords don\\'t match'))\n return password2\n \n def clean_username(self):\n@@ -134,8 +140,8 @@\n def clean_password_repeat(self):\n password1 = self.cleaned_data.get('password')\n password2 = self.cleaned_data.get('password_repeat')\n- if password1 != password2:\n- raise ValidationError(_('passwords dont match'))\n+ if password1 and password1 != password2:\n+ raise ValidationError(_('passwords don\\'t match'))\n return password2\n \n def reset_password(self, request):\n", "issue": "No warning/help text when password entered incorrectly\nWhen I try to log in with the wrong log in details, I don't get a message informing me what didn't work - i.e. wrong password/log in (Firefox, 47 on Mac).\n\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth import authenticate, get_user_model\nfrom django.contrib.auth.hashers import make_password\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext as _\n\nfrom .models import Registration, Reset\n\nUser = get_user_model()\n\n\nclass LoginForm(forms.Form):\n email = forms.EmailField(max_length=255, required=True)\n password = forms.CharField(widget=forms.PasswordInput, required=True)\n\n def clean(self):\n email = self.cleaned_data.get('email')\n password = self.cleaned_data.get('password')\n user = authenticate(username=email, password=password)\n if not user or not user.is_active:\n raise ValidationError(_('password mismatch'))\n return self.cleaned_data\n\n def login(self, request):\n email = self.cleaned_data.get('email')\n password = self.cleaned_data.get('password')\n user = authenticate(username=email, password=password)\n return user\n\n\nclass RegisterForm(forms.Form):\n email = forms.EmailField(max_length=255, required=True)\n username = forms.CharField(max_length=255, required=True)\n password = forms.CharField(\n widget=forms.PasswordInput,\n min_length=8,\n required=True)\n password_repeat = forms.CharField(\n widget=forms.PasswordInput, required=True)\n\n def clean_password_repeat(self):\n password1 = self.cleaned_data.get('password')\n password2 = self.cleaned_data.get('password_repeat')\n if password1 != password2:\n raise ValidationError(_('passwords dont match'))\n return password2\n\n def clean_username(self):\n username = self.cleaned_data.get('username')\n user_exists = User.objects.filter(\n username=username).first() is not None\n register_exits = Registration.objects.filter(\n username=username).first() is not None\n\n if user_exists or register_exits:\n raise ValidationError(_('username taken'))\n return username\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n user_exists = User.objects.filter(email=email).first()\n register_exists = Registration.objects.filter(email=email).first()\n if user_exists or register_exists:\n raise ValidationError(_('email in use'))\n return email\n\n def register(self, request):\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n email = self.cleaned_data.get('email')\n registration = Registration(username=username,\n email=email,\n password=make_password(password))\n return registration\n\n\nclass ActivateForm(forms.Form):\n token = forms.UUIDField(widget=forms.HiddenInput(), required=True)\n\n def clean_token(self):\n token = self.cleaned_data.get('token')\n registration = Registration.objects.filter(token=token).first()\n if not registration:\n raise ValidationError(_('invalid token'))\n else:\n self.cleaned_data['registration'] = registration\n return token\n\n def activate(self, request):\n registration = self.cleaned_data.get('registration')\n user = User(username=registration.username,\n email=registration.email,\n password=registration.password)\n return user, registration\n\n\nclass RequestResetForm(forms.Form):\n username_or_email = forms.CharField(max_length=255)\n\n def clean_username_or_email(self):\n username_or_email = self.cleaned_data.get('username_or_email')\n user = (User.objects.filter(username=username_or_email).first() or\n User.objects.filter(email=username_or_email).first())\n if not user:\n raise ValidationError(_('unkown user'))\n else:\n self.cleaned_data['user'] = user\n return username_or_email\n\n def request_reset(self, request):\n user = self.cleaned_data.get('user')\n return Reset(user=user)\n\n\nclass ResetForm(forms.Form):\n token = forms.UUIDField(widget=forms.HiddenInput(), required=True)\n password = forms.CharField(\n widget=forms.PasswordInput,\n min_length=8,\n required=True)\n password_repeat = forms.CharField(\n widget=forms.PasswordInput,\n required=True)\n\n def clean_token(self):\n token = self.cleaned_data.get('token')\n reset = Reset.objects.filter(token=token).first()\n if not reset:\n ValidationError(_('invalid token'))\n else:\n self.cleaned_data['reset'] = reset\n return token\n\n def clean_password_repeat(self):\n password1 = self.cleaned_data.get('password')\n password2 = self.cleaned_data.get('password_repeat')\n if password1 != password2:\n raise ValidationError(_('passwords dont match'))\n return password2\n\n def reset_password(self, request):\n reset = self.cleaned_data.get('reset')\n password = self.cleaned_data.get('password')\n user = reset.user\n user.password = make_password(password)\n return user, reset\n", "path": "euth/user_management/forms.py"}]} | 1,916 | 318 |
gh_patches_debug_17 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-2071 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update data on the Ebola map
</issue>
<code>
[start of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
1 hdx_version = 'v0.5.13'
2
[end of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
@@ -1 +1 @@
-hdx_version = 'v0.5.13'
+hdx_version = 'v0.5.14'
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.5.13'\n+hdx_version = 'v0.5.14'\n", "issue": "Update data on the Ebola map\n\n", "before_files": [{"content": "hdx_version = 'v0.5.13'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]} | 578 | 109 |
gh_patches_debug_3433 | rasdani/github-patches | git_diff | facebookresearch__mmf-74 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ERROR: 'BaseTrainer' object has no attribute 'args'
I followed the instructions on the https://learnpythia.readthedocs.io/ to setup. I seem to have everything installed, and I could run the collab notebook locally.
However, when I am trying to run the below command to try out inferences:
python tools/run.py --tasks vqa --datasets textvqa --model lorra --config configs/vqa/textvqa/lorra.yml \
--run_type val --evalai_inference 1 --resume_file data/models/lorra_best.pth
I am getting below error:
}
2019-05-29T20:25:41 INFO: Loading tasks and data
2019-05-29T20:25:44 INFO: CUDA Device 0 is: GeForce GTX 1080 Ti
2019-05-29T20:25:46 INFO: Torch version is: 1.0.0
2019-05-29T20:25:46 ERROR: 'BaseTrainer' object has no attribute 'args'
Traceback (most recent call last):
File "tools/run.py", line 94, in <module>
run()
File "tools/run.py", line 82, in run
trainer.load()
File "/root/pythia/pythia/trainers/base_trainer.py", line 46, in load
self.load_extras()
File "/root/pythia/pythia/trainers/base_trainer.py", line 139, in load_extras
self.checkpoint = Checkpoint(self)
File "/root/pythia/pythia/utils/checkpoint.py", line 28, in __init__
self.ckpt_foldername += foldername_from_config_override(self.trainer.args)
AttributeError: 'BaseTrainer' object has no attribute 'args'
I seem to be doing something wrong, any help would be great. I didn't change any code or config yet.
</issue>
<code>
[start of pythia/utils/build_utils.py]
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 import torch
3 import warnings
4
5 from pythia.utils.configuration import Configuration
6 from pythia.common.registry import registry
7 from pythia.utils.general import get_optimizer_parameters
8
9
10 def build_trainer(args, *rest, **kwargs):
11 configuration = Configuration(args.config)
12
13 # Update with the config override if passed
14 configuration.override_with_cmd_config(args.config_override)
15
16 # Now, update with opts args that were passed
17 configuration.override_with_cmd_opts(args.opts)
18
19 # Finally, update with args that were specifically passed
20 # as arguments
21 configuration.update_with_args(args)
22 configuration.freeze()
23
24 config = configuration.get_config()
25 registry.register("config", config)
26 registry.register("configuration", configuration)
27
28 trainer_type = config.training_parameters.trainer
29 trainer_cls = registry.get_trainer_class(trainer_type)
30 return trainer_cls(config)
31
32
33 def build_model(config):
34 model_name = config.model
35
36 model_class = registry.get_model_class(model_name)
37
38 if model_class is None:
39 registry.get("writer").write("No model registered for name: %s" % model_name)
40 model = model_class(config)
41
42 if hasattr(model, "build"):
43 model.build()
44 model.init_losses_and_metrics()
45
46 return model
47
48
49 def build_optimizer(model, config):
50 optimizer_config = config.optimizer_attributes
51 if not hasattr(optimizer_config, "type"):
52 raise ValueError(
53 "Optimizer attributes must have a 'type' key "
54 "specifying the type of optimizer. "
55 "(Custom or PyTorch)"
56 )
57 optimizer_type = optimizer_config.type
58
59 if not hasattr(optimizer_config, "params"):
60 warnings.warn(
61 "optimizer attributes has no params defined, defaulting to {}."
62 )
63
64 params = getattr(optimizer_config, "params", {})
65
66 if hasattr(torch.optim, optimizer_type):
67 optimizer_class = getattr(torch.optim, optimizer_type)
68 else:
69 optimizer_class = registry.get_optimizer_class(optimizer_type)
70 if optimizer_class is None:
71 raise ValueError(
72 "No optimizer class of type {} present in "
73 "either torch or registered to registry"
74 )
75
76 parameters = get_optimizer_parameters(model, config)
77 optimizer = optimizer_class(parameters, **params)
78 return optimizer
79
[end of pythia/utils/build_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pythia/utils/build_utils.py b/pythia/utils/build_utils.py
--- a/pythia/utils/build_utils.py
+++ b/pythia/utils/build_utils.py
@@ -27,7 +27,12 @@
trainer_type = config.training_parameters.trainer
trainer_cls = registry.get_trainer_class(trainer_type)
- return trainer_cls(config)
+ trainer_obj = trainer_cls(config)
+
+ # Set args as an attribute for future use
+ setattr(trainer_obj, 'args', args)
+
+ return trainer_obj
def build_model(config):
| {"golden_diff": "diff --git a/pythia/utils/build_utils.py b/pythia/utils/build_utils.py\n--- a/pythia/utils/build_utils.py\n+++ b/pythia/utils/build_utils.py\n@@ -27,7 +27,12 @@\n \n trainer_type = config.training_parameters.trainer\n trainer_cls = registry.get_trainer_class(trainer_type)\n- return trainer_cls(config)\n+ trainer_obj = trainer_cls(config)\n+\n+ # Set args as an attribute for future use\n+ setattr(trainer_obj, 'args', args)\n+\n+ return trainer_obj\n \n \n def build_model(config):\n", "issue": "ERROR: 'BaseTrainer' object has no attribute 'args'\nI followed the instructions on the https://learnpythia.readthedocs.io/ to setup. I seem to have everything installed, and I could run the collab notebook locally. \r\n\r\nHowever, when I am trying to run the below command to try out inferences:\r\n\r\npython tools/run.py --tasks vqa --datasets textvqa --model lorra --config configs/vqa/textvqa/lorra.yml \\\r\n--run_type val --evalai_inference 1 --resume_file data/models/lorra_best.pth\r\n\r\nI am getting below error:\r\n\r\n\r\n}\r\n2019-05-29T20:25:41 INFO: Loading tasks and data\r\n2019-05-29T20:25:44 INFO: CUDA Device 0 is: GeForce GTX 1080 Ti\r\n2019-05-29T20:25:46 INFO: Torch version is: 1.0.0\r\n2019-05-29T20:25:46 ERROR: 'BaseTrainer' object has no attribute 'args'\r\nTraceback (most recent call last):\r\n File \"tools/run.py\", line 94, in <module>\r\n run()\r\n File \"tools/run.py\", line 82, in run\r\n trainer.load()\r\n File \"/root/pythia/pythia/trainers/base_trainer.py\", line 46, in load\r\n self.load_extras()\r\n File \"/root/pythia/pythia/trainers/base_trainer.py\", line 139, in load_extras\r\n self.checkpoint = Checkpoint(self)\r\n File \"/root/pythia/pythia/utils/checkpoint.py\", line 28, in __init__\r\n self.ckpt_foldername += foldername_from_config_override(self.trainer.args)\r\nAttributeError: 'BaseTrainer' object has no attribute 'args'\r\n\r\n\r\nI seem to be doing something wrong, any help would be great. I didn't change any code or config yet.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\nimport torch\nimport warnings\n\nfrom pythia.utils.configuration import Configuration\nfrom pythia.common.registry import registry\nfrom pythia.utils.general import get_optimizer_parameters\n\n\ndef build_trainer(args, *rest, **kwargs):\n configuration = Configuration(args.config)\n\n # Update with the config override if passed\n configuration.override_with_cmd_config(args.config_override)\n\n # Now, update with opts args that were passed\n configuration.override_with_cmd_opts(args.opts)\n\n # Finally, update with args that were specifically passed\n # as arguments\n configuration.update_with_args(args)\n configuration.freeze()\n\n config = configuration.get_config()\n registry.register(\"config\", config)\n registry.register(\"configuration\", configuration)\n\n trainer_type = config.training_parameters.trainer\n trainer_cls = registry.get_trainer_class(trainer_type)\n return trainer_cls(config)\n\n\ndef build_model(config):\n model_name = config.model\n\n model_class = registry.get_model_class(model_name)\n\n if model_class is None:\n registry.get(\"writer\").write(\"No model registered for name: %s\" % model_name)\n model = model_class(config)\n\n if hasattr(model, \"build\"):\n model.build()\n model.init_losses_and_metrics()\n\n return model\n\n\ndef build_optimizer(model, config):\n optimizer_config = config.optimizer_attributes\n if not hasattr(optimizer_config, \"type\"):\n raise ValueError(\n \"Optimizer attributes must have a 'type' key \"\n \"specifying the type of optimizer. \"\n \"(Custom or PyTorch)\"\n )\n optimizer_type = optimizer_config.type\n\n if not hasattr(optimizer_config, \"params\"):\n warnings.warn(\n \"optimizer attributes has no params defined, defaulting to {}.\"\n )\n\n params = getattr(optimizer_config, \"params\", {})\n\n if hasattr(torch.optim, optimizer_type):\n optimizer_class = getattr(torch.optim, optimizer_type)\n else:\n optimizer_class = registry.get_optimizer_class(optimizer_type)\n if optimizer_class is None:\n raise ValueError(\n \"No optimizer class of type {} present in \"\n \"either torch or registered to registry\"\n )\n\n parameters = get_optimizer_parameters(model, config)\n optimizer = optimizer_class(parameters, **params)\n return optimizer\n", "path": "pythia/utils/build_utils.py"}]} | 1,630 | 132 |
gh_patches_debug_3708 | rasdani/github-patches | git_diff | pyca__cryptography-163 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hasher.hexdigest() is documented as returning a str, but returns unicode under python2
It should return a native string under boht py2 and py3
</issue>
<code>
[start of cryptography/primitives/hashes.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 import abc
17
18 import binascii
19
20 import six
21
22 from cryptography.bindings import _default_api
23
24
25 class BaseHash(six.with_metaclass(abc.ABCMeta)):
26 def __init__(self, data=None, api=None, ctx=None):
27 if api is None:
28 api = _default_api
29 self._api = api
30 self._ctx = self._api.create_hash_context(self) if ctx is None else ctx
31 if data is not None:
32 self.update(data)
33
34 def update(self, data):
35 if isinstance(data, six.text_type):
36 raise TypeError("Unicode-objects must be encoded before hashing")
37 self._api.update_hash_context(self._ctx, data)
38
39 def copy(self):
40 return self.__class__(ctx=self._copy_ctx())
41
42 def digest(self):
43 return self._api.finalize_hash_context(self._copy_ctx(),
44 self.digest_size)
45
46 def hexdigest(self):
47 return binascii.hexlify(self.digest()).decode("ascii")
48
49 def _copy_ctx(self):
50 return self._api.copy_hash_context(self._ctx)
51
52
53 class SHA1(BaseHash):
54 name = "sha1"
55 digest_size = 20
56 block_size = 64
57
58
59 class SHA224(BaseHash):
60 name = "sha224"
61 digest_size = 28
62 block_size = 64
63
64
65 class SHA256(BaseHash):
66 name = "sha256"
67 digest_size = 32
68 block_size = 64
69
70
71 class SHA384(BaseHash):
72 name = "sha384"
73 digest_size = 48
74 block_size = 128
75
76
77 class SHA512(BaseHash):
78 name = "sha512"
79 digest_size = 64
80 block_size = 128
81
82
83 class RIPEMD160(BaseHash):
84 name = "ripemd160"
85 digest_size = 20
86 block_size = 64
87
88
89 class Whirlpool(BaseHash):
90 name = "whirlpool"
91 digest_size = 64
92 block_size = 64
93
94
95 class MD5(BaseHash):
96 name = "md5"
97 digest_size = 16
98 block_size = 64
99
[end of cryptography/primitives/hashes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cryptography/primitives/hashes.py b/cryptography/primitives/hashes.py
--- a/cryptography/primitives/hashes.py
+++ b/cryptography/primitives/hashes.py
@@ -44,7 +44,7 @@
self.digest_size)
def hexdigest(self):
- return binascii.hexlify(self.digest()).decode("ascii")
+ return str(binascii.hexlify(self.digest()).decode("ascii"))
def _copy_ctx(self):
return self._api.copy_hash_context(self._ctx)
| {"golden_diff": "diff --git a/cryptography/primitives/hashes.py b/cryptography/primitives/hashes.py\n--- a/cryptography/primitives/hashes.py\n+++ b/cryptography/primitives/hashes.py\n@@ -44,7 +44,7 @@\n self.digest_size)\n \n def hexdigest(self):\n- return binascii.hexlify(self.digest()).decode(\"ascii\")\n+ return str(binascii.hexlify(self.digest()).decode(\"ascii\"))\n \n def _copy_ctx(self):\n return self._api.copy_hash_context(self._ctx)\n", "issue": "Hasher.hexdigest() is documented as returning a str, but returns unicode under python2\nIt should return a native string under boht py2 and py3\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\n\nimport binascii\n\nimport six\n\nfrom cryptography.bindings import _default_api\n\n\nclass BaseHash(six.with_metaclass(abc.ABCMeta)):\n def __init__(self, data=None, api=None, ctx=None):\n if api is None:\n api = _default_api\n self._api = api\n self._ctx = self._api.create_hash_context(self) if ctx is None else ctx\n if data is not None:\n self.update(data)\n\n def update(self, data):\n if isinstance(data, six.text_type):\n raise TypeError(\"Unicode-objects must be encoded before hashing\")\n self._api.update_hash_context(self._ctx, data)\n\n def copy(self):\n return self.__class__(ctx=self._copy_ctx())\n\n def digest(self):\n return self._api.finalize_hash_context(self._copy_ctx(),\n self.digest_size)\n\n def hexdigest(self):\n return binascii.hexlify(self.digest()).decode(\"ascii\")\n\n def _copy_ctx(self):\n return self._api.copy_hash_context(self._ctx)\n\n\nclass SHA1(BaseHash):\n name = \"sha1\"\n digest_size = 20\n block_size = 64\n\n\nclass SHA224(BaseHash):\n name = \"sha224\"\n digest_size = 28\n block_size = 64\n\n\nclass SHA256(BaseHash):\n name = \"sha256\"\n digest_size = 32\n block_size = 64\n\n\nclass SHA384(BaseHash):\n name = \"sha384\"\n digest_size = 48\n block_size = 128\n\n\nclass SHA512(BaseHash):\n name = \"sha512\"\n digest_size = 64\n block_size = 128\n\n\nclass RIPEMD160(BaseHash):\n name = \"ripemd160\"\n digest_size = 20\n block_size = 64\n\n\nclass Whirlpool(BaseHash):\n name = \"whirlpool\"\n digest_size = 64\n block_size = 64\n\n\nclass MD5(BaseHash):\n name = \"md5\"\n digest_size = 16\n block_size = 64\n", "path": "cryptography/primitives/hashes.py"}]} | 1,404 | 114 |
gh_patches_debug_3139 | rasdani/github-patches | git_diff | UTNkar__moore-53 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Confirmation e-mails are not sent
For some reason the confirmation e-mails are no longer being send.
</issue>
<code>
[start of website/members/signals.py]
1 import datetime
2 from django.conf import settings
3 from django.core.mail import EmailMultiAlternatives
4 from django.db.models.signals import pre_save
5 from django.dispatch import receiver
6 from django.template import loader
7 from django.utils import timezone
8 from simple_email_confirmation import unconfirmed_email_created
9
10 from members.models import Member
11
12
13 @receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')
14 def send_confirmation_email(sender, email, user=None, **kwargs):
15 if user is not None:
16 context = {
17 'email': email,
18 'domain': settings.BASE_URL,
19 'site_name': settings.WAGTAIL_SITE_NAME,
20 'token': user.get_confirmation_key(email),
21 }
22
23 subject = loader.render_to_string(
24 'members/email_change_subject.txt', context)
25 # Email subject *must not* contain newlines
26 subject = ''.join(subject.splitlines())
27 body = loader.render_to_string('members/email_change_email.html',
28 context)
29
30 email_message = EmailMultiAlternatives(subject, body, None, [email])
31 email_message.send()
32
33
34 @receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')
35 def check_membership(sender, instance, **kwargs):
36 if timezone.now() - instance.status_changed > datetime.timedelta(1):
37 instance.update_status()
38
[end of website/members/signals.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/members/signals.py b/website/members/signals.py
--- a/website/members/signals.py
+++ b/website/members/signals.py
@@ -12,6 +12,7 @@
@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')
def send_confirmation_email(sender, email, user=None, **kwargs):
+ user = user or sender
if user is not None:
context = {
'email': email,
| {"golden_diff": "diff --git a/website/members/signals.py b/website/members/signals.py\n--- a/website/members/signals.py\n+++ b/website/members/signals.py\n@@ -12,6 +12,7 @@\n \n @receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\n def send_confirmation_email(sender, email, user=None, **kwargs):\n+ user = user or sender\n if user is not None:\n context = {\n 'email': email,\n", "issue": "Confirmation e-mails are not sent\nFor some reason the confirmation e-mails are no longer being send.\n", "before_files": [{"content": "import datetime\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.template import loader\nfrom django.utils import timezone\nfrom simple_email_confirmation import unconfirmed_email_created\n\nfrom members.models import Member\n\n\n@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\ndef send_confirmation_email(sender, email, user=None, **kwargs):\n if user is not None:\n context = {\n 'email': email,\n 'domain': settings.BASE_URL,\n 'site_name': settings.WAGTAIL_SITE_NAME,\n 'token': user.get_confirmation_key(email),\n }\n\n subject = loader.render_to_string(\n 'members/email_change_subject.txt', context)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n body = loader.render_to_string('members/email_change_email.html',\n context)\n\n email_message = EmailMultiAlternatives(subject, body, None, [email])\n email_message.send()\n\n\n@receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')\ndef check_membership(sender, instance, **kwargs):\n if timezone.now() - instance.status_changed > datetime.timedelta(1):\n instance.update_status()\n", "path": "website/members/signals.py"}]} | 895 | 107 |
gh_patches_debug_21111 | rasdani/github-patches | git_diff | gammapy__gammapy-4314 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bad rendering of Super Exponential Cutoff Power Law Model used for 4FGL-DR3 equations in docs
**Gammapy version**
dev
**Bug description**
The latex rendering of equations in the following doc page is broken.
https://docs.gammapy.org/dev/user-guide/model-gallery/spectral/plot_super_exp_cutoff_powerlaw_4fgl.html
I think that it is due to the `\a`that appears multiple times in `\frac`, but I am not really sure as I don't understand HTML.
Bad rendering of Super Exponential Cutoff Power Law Model used for 4FGL-DR3 equations in docs
**Gammapy version**
dev
**Bug description**
The latex rendering of equations in the following doc page is broken.
https://docs.gammapy.org/dev/user-guide/model-gallery/spectral/plot_super_exp_cutoff_powerlaw_4fgl.html
I think that it is due to the `\a`that appears multiple times in `\frac`, but I am not really sure as I don't understand HTML.
</issue>
<code>
[start of examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py]
1 r"""
2 .. _super-exp-cutoff-powerlaw-4fgl-dr3-spectral-model:
3
4 Super Exponential Cutoff Power Law Model used for 4FGL-DR3
5 ==========================================================
6
7 This model parametrises super exponential cutoff power-law model spectrum used for 4FGL.
8
9 It is defined by the following equation:
10
11 .. math::
12
13
14 \phi(e) =
15 \begin{cases}
16 \phi_0 \cdot \left(\frac{E}{E_0}\right)^{\frac{\a}{\Gamma_2} -\Gamma_1} \cdot \exp \left(
17 \frac{\a}{\Gamma_2^2} \left( 1 - \left(\frac{E}{E_0}\right)^{\frac{\a}{\Gamma_2} \right)
18 \right)&
19 \\
20 \phi_0 \cdot \left(\frac{E}{E_0}\right)^{ -\Gamma_1 - \frac{\a}{2} \ln \frac{E}{E_0} - \frac{\a \Gamma_2}{6} \ln^2 \frac{E}{E_0} - \frac{\a \Gamma_2^2}{24} \ln^3 \frac{E}{E_0}}\\
21 0 & \text{for } \left| \Gamma_2 \ln \frac{E}{E_0} \right|
22 \end{cases}
23
24 See Equation (2) and (3) in https://arxiv.org/pdf/2201.11184.pdf
25 """
26
27 # %%
28 # Example plot
29 # ------------
30 # Here is an example plot of the model:
31
32 from astropy import units as u
33 import matplotlib.pyplot as plt
34 from gammapy.modeling.models import (
35 Models,
36 SkyModel,
37 SuperExpCutoffPowerLaw4FGLDR3SpectralModel,
38 )
39
40 energy_range = [0.1, 100] * u.TeV
41 model = SuperExpCutoffPowerLaw4FGLDR3SpectralModel(
42 index_1=1,
43 index_2=2,
44 amplitude="1e-12 TeV-1 cm-2 s-1",
45 reference="1 TeV",
46 expfactor=1e-2,
47 )
48 model.plot(energy_range)
49 plt.grid(which="both")
50 plt.ylim(1e-24, 1e-10)
51
52 # %%
53 # YAML representation
54 # -------------------
55 # Here is an example YAML file using the model:
56
57 model = SkyModel(spectral_model=model, name="super-exp-cutoff-power-law-4fgl-dr3-model")
58 models = Models([model])
59
60 print(models.to_yaml())
61
[end of examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py b/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py
--- a/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py
+++ b/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py
@@ -10,16 +10,7 @@
.. math::
-
- \phi(e) =
- \begin{cases}
- \phi_0 \cdot \left(\frac{E}{E_0}\right)^{\frac{\a}{\Gamma_2} -\Gamma_1} \cdot \exp \left(
- \frac{\a}{\Gamma_2^2} \left( 1 - \left(\frac{E}{E_0}\right)^{\frac{\a}{\Gamma_2} \right)
- \right)&
- \\
- \phi_0 \cdot \left(\frac{E}{E_0}\right)^{ -\Gamma_1 - \frac{\a}{2} \ln \frac{E}{E_0} - \frac{\a \Gamma_2}{6} \ln^2 \frac{E}{E_0} - \frac{\a \Gamma_2^2}{24} \ln^3 \frac{E}{E_0}}\\
- 0 & \text{for } \left| \Gamma_2 \ln \frac{E}{E_0} \right|
- \end{cases}
+\phi(E) = \begin{cases} \phi_0 \cdot \left(\frac{E}{E_0}\right)^{\frac{a}{\Gamma_2} -\Gamma_1} \cdot \exp \left( \frac{a}{\Gamma_2^2}\left( 1 - \left(\frac{E}{E_0}\right)^{\Gamma_2} \right) \right) \\ \phi_0 \cdot \left(\frac{E}{E_0}\right)^{ -\Gamma_1 - \frac{a}{2} \ln \frac{E}{E_0} - \frac{a \Gamma_2}{6} \ln^2 \frac{E}{E_0} - \frac{a \Gamma_2^2}{24} \ln^3 \frac{E}{E_0}} & \text{for } \left| \Gamma_2 \ln \frac{E}{E_0} \right| < 10^{-2} \end{cases}
See Equation (2) and (3) in https://arxiv.org/pdf/2201.11184.pdf
"""
| {"golden_diff": "diff --git a/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py b/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py\n--- a/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py\n+++ b/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py\n@@ -10,16 +10,7 @@\n \n .. math::\n \n-\n- \\phi(e) =\n- \\begin{cases}\n- \\phi_0 \\cdot \\left(\\frac{E}{E_0}\\right)^{\\frac{\\a}{\\Gamma_2} -\\Gamma_1} \\cdot \\exp \\left(\n- \\frac{\\a}{\\Gamma_2^2} \\left( 1 - \\left(\\frac{E}{E_0}\\right)^{\\frac{\\a}{\\Gamma_2} \\right)\n- \\right)&\n- \\\\\n- \\phi_0 \\cdot \\left(\\frac{E}{E_0}\\right)^{ -\\Gamma_1 - \\frac{\\a}{2} \\ln \\frac{E}{E_0} - \\frac{\\a \\Gamma_2}{6} \\ln^2 \\frac{E}{E_0} - \\frac{\\a \\Gamma_2^2}{24} \\ln^3 \\frac{E}{E_0}}\\\\\n- 0 & \\text{for } \\left| \\Gamma_2 \\ln \\frac{E}{E_0} \\right|\n- \\end{cases}\n+\\phi(E) = \\begin{cases} \\phi_0 \\cdot \\left(\\frac{E}{E_0}\\right)^{\\frac{a}{\\Gamma_2} -\\Gamma_1} \\cdot \\exp \\left( \\frac{a}{\\Gamma_2^2}\\left( 1 - \\left(\\frac{E}{E_0}\\right)^{\\Gamma_2} \\right) \\right) \\\\ \\phi_0 \\cdot \\left(\\frac{E}{E_0}\\right)^{ -\\Gamma_1 - \\frac{a}{2} \\ln \\frac{E}{E_0} - \\frac{a \\Gamma_2}{6} \\ln^2 \\frac{E}{E_0} - \\frac{a \\Gamma_2^2}{24} \\ln^3 \\frac{E}{E_0}} & \\text{for } \\left| \\Gamma_2 \\ln \\frac{E}{E_0} \\right| < 10^{-2} \\end{cases}\n \n See Equation (2) and (3) in https://arxiv.org/pdf/2201.11184.pdf\n \"\"\"\n", "issue": "Bad rendering of Super Exponential Cutoff Power Law Model used for 4FGL-DR3 equations in docs\n**Gammapy version**\r\ndev\r\n\r\n**Bug description**\r\n\r\nThe latex rendering of equations in the following doc page is broken. \r\nhttps://docs.gammapy.org/dev/user-guide/model-gallery/spectral/plot_super_exp_cutoff_powerlaw_4fgl.html\r\n\r\nI think that it is due to the `\\a`that appears multiple times in `\\frac`, but I am not really sure as I don't understand HTML.\r\n\nBad rendering of Super Exponential Cutoff Power Law Model used for 4FGL-DR3 equations in docs\n**Gammapy version**\r\ndev\r\n\r\n**Bug description**\r\n\r\nThe latex rendering of equations in the following doc page is broken. \r\nhttps://docs.gammapy.org/dev/user-guide/model-gallery/spectral/plot_super_exp_cutoff_powerlaw_4fgl.html\r\n\r\nI think that it is due to the `\\a`that appears multiple times in `\\frac`, but I am not really sure as I don't understand HTML.\r\n\n", "before_files": [{"content": "r\"\"\"\n.. _super-exp-cutoff-powerlaw-4fgl-dr3-spectral-model:\n\nSuper Exponential Cutoff Power Law Model used for 4FGL-DR3\n==========================================================\n\nThis model parametrises super exponential cutoff power-law model spectrum used for 4FGL.\n\nIt is defined by the following equation:\n\n.. math::\n\n\n \\phi(e) =\n \\begin{cases}\n \\phi_0 \\cdot \\left(\\frac{E}{E_0}\\right)^{\\frac{\\a}{\\Gamma_2} -\\Gamma_1} \\cdot \\exp \\left(\n \\frac{\\a}{\\Gamma_2^2} \\left( 1 - \\left(\\frac{E}{E_0}\\right)^{\\frac{\\a}{\\Gamma_2} \\right)\n \\right)&\n \\\\\n \\phi_0 \\cdot \\left(\\frac{E}{E_0}\\right)^{ -\\Gamma_1 - \\frac{\\a}{2} \\ln \\frac{E}{E_0} - \\frac{\\a \\Gamma_2}{6} \\ln^2 \\frac{E}{E_0} - \\frac{\\a \\Gamma_2^2}{24} \\ln^3 \\frac{E}{E_0}}\\\\\n 0 & \\text{for } \\left| \\Gamma_2 \\ln \\frac{E}{E_0} \\right|\n \\end{cases}\n\nSee Equation (2) and (3) in https://arxiv.org/pdf/2201.11184.pdf\n\"\"\"\n\n# %%\n# Example plot\n# ------------\n# Here is an example plot of the model:\n\nfrom astropy import units as u\nimport matplotlib.pyplot as plt\nfrom gammapy.modeling.models import (\n Models,\n SkyModel,\n SuperExpCutoffPowerLaw4FGLDR3SpectralModel,\n)\n\nenergy_range = [0.1, 100] * u.TeV\nmodel = SuperExpCutoffPowerLaw4FGLDR3SpectralModel(\n index_1=1,\n index_2=2,\n amplitude=\"1e-12 TeV-1 cm-2 s-1\",\n reference=\"1 TeV\",\n expfactor=1e-2,\n)\nmodel.plot(energy_range)\nplt.grid(which=\"both\")\nplt.ylim(1e-24, 1e-10)\n\n# %%\n# YAML representation\n# -------------------\n# Here is an example YAML file using the model:\n\nmodel = SkyModel(spectral_model=model, name=\"super-exp-cutoff-power-law-4fgl-dr3-model\")\nmodels = Models([model])\n\nprint(models.to_yaml())\n", "path": "examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py"}]} | 1,488 | 610 |
gh_patches_debug_15716 | rasdani/github-patches | git_diff | beeware__toga-1198 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
WebView in MacOS not accepting keyboard input
**Describe the bug**
I am unable to type text in HTML input fields within a Toga WebView on Mac OS.
**To Reproduce**
Steps to reproduce the behavior:
1. Follow the tutorial at https://docs.beeware.org/en/latest/tutorial/tutorial-1.html until you are able to run `briefcase dev` successfully.
2. Modify the `startup` function so that its body is:
```main_box = toga.Box()
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = toga.WebView(style=Pack(flex=1))
self.main_window.content.url = 'https://www.google.ca/'
self.main_window.show()
```
3. Attempt to search for something in the Google search input.
4. When I try, I am unable to type characters. Nothing happens with my key presses. Note that specified command shortcuts do seem to work.
**Expected behavior**
I expect to be able to enter text into text boxes in a WebView in Toga.
**Environment:**
- Operating System: Mac OS 10.15.6
- Python version: 3.6.5
- Software versions:
- Briefcase: 0.3.0
- Toga: 0.3.0.dev26
**Additional context**
I discovered that If I just comment out the `keyDown_` method in toga_cocoa [here](https://github.com/beeware/toga/blob/master/src/cocoa/toga_cocoa/widgets/webview.py#L23-L27) then I am able to enter text in a WebView.
</issue>
<code>
[start of src/cocoa/toga_cocoa/widgets/webview.py]
1 from asyncio import get_event_loop
2
3 from travertino.size import at_least
4
5 from toga_cocoa.keys import toga_key
6 from toga_cocoa.libs import NSURL, NSURLRequest, WKWebView
7 from rubicon.objc import objc_method, py_from_ns
8 from rubicon.objc.runtime import objc_id
9
10 from .base import Widget
11
12
13 class TogaWebView(WKWebView):
14 @objc_method
15 def webView_didFinish_navigation_(self, sender, wkNavigation) -> None:
16 if self.interface.on_webview_load:
17 self.interface.on_webview_load(self.interface)
18
19 @objc_method
20 def acceptsFirstResponder(self) -> bool:
21 return True
22
23 @objc_method
24 def keyDown_(self, event) -> None:
25 if self.interface.on_key_down:
26 self.interface.on_key_down(self.interface, **toga_key(event))
27
28 @objc_method
29 def touchBar(self):
30 # Disable the touchbar.
31 return None
32
33
34 class WebView(Widget):
35 def create(self):
36 self.native = TogaWebView.alloc().init()
37 self.native.interface = self.interface
38
39 self.native.downloadDelegate = self.native
40 self.native.frameLoadDelegate = self.native
41 self.native.policyDelegate = self.native
42 self.native.resourceLoadDelegate = self.native
43 self.native.uIDelegate = self.native
44
45 # Add the layout constraints
46 self.add_constraints()
47
48 def set_on_key_down(self, handler):
49 pass
50
51 def set_on_webview_load(self, handler):
52 pass
53
54 def get_dom(self):
55 # Utilises Step 2) of:
56 # https://developer.apple.com/library/content/documentation/
57 # Cocoa/Conceptual/DisplayWebContent/Tasks/SaveAndLoad.html
58 html = self.native.mainframe.DOMDocument.documentElement.outerHTML
59 return html
60
61 def set_url(self, value):
62 if value:
63 request = NSURLRequest.requestWithURL(NSURL.URLWithString(self.interface.url))
64 self.native.loadRequest(request)
65
66 def set_content(self, root_url, content):
67 self.native.loadHTMLString(content, baseURL=NSURL.URLWithString(root_url))
68
69 def set_user_agent(self, value):
70 user_agent = value if value else "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8" # NOQA
71 self.native.customUserAgent = user_agent
72
73 async def evaluate_javascript(self, javascript):
74 """
75 Evaluate a JavaScript expression.
76
77 **This method is asynchronous**. It will return when the expression has been
78 evaluated and a result is available.
79
80 :param javascript: The javascript expression to evaluate
81 :type javascript: ``str``
82 """
83
84 loop = get_event_loop()
85 future = loop.create_future()
86
87 def completion_handler(res: objc_id, error: objc_id) -> None:
88
89 if error:
90 error = py_from_ns(error)
91 exc = RuntimeError(str(error))
92 future.set_exception(exc)
93 else:
94 future.set_result(py_from_ns(res))
95
96 self.native.evaluateJavaScript(javascript, completionHandler=completion_handler)
97
98 return await future
99
100 def invoke_javascript(self, javascript):
101 """
102 Invoke a block of javascript.
103
104 :param javascript: The javascript expression to invoke
105 """
106 self.native.evaluateJavaScript(javascript, completionHandler=None)
107
108 def rehint(self):
109 self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)
110 self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)
111
[end of src/cocoa/toga_cocoa/widgets/webview.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cocoa/toga_cocoa/widgets/webview.py b/src/cocoa/toga_cocoa/widgets/webview.py
--- a/src/cocoa/toga_cocoa/widgets/webview.py
+++ b/src/cocoa/toga_cocoa/widgets/webview.py
@@ -4,7 +4,7 @@
from toga_cocoa.keys import toga_key
from toga_cocoa.libs import NSURL, NSURLRequest, WKWebView
-from rubicon.objc import objc_method, py_from_ns
+from rubicon.objc import objc_method, py_from_ns, send_super
from rubicon.objc.runtime import objc_id
from .base import Widget
@@ -24,6 +24,7 @@
def keyDown_(self, event) -> None:
if self.interface.on_key_down:
self.interface.on_key_down(self.interface, **toga_key(event))
+ send_super(__class__, self, 'keyDown:', event)
@objc_method
def touchBar(self):
| {"golden_diff": "diff --git a/src/cocoa/toga_cocoa/widgets/webview.py b/src/cocoa/toga_cocoa/widgets/webview.py\n--- a/src/cocoa/toga_cocoa/widgets/webview.py\n+++ b/src/cocoa/toga_cocoa/widgets/webview.py\n@@ -4,7 +4,7 @@\n \n from toga_cocoa.keys import toga_key\n from toga_cocoa.libs import NSURL, NSURLRequest, WKWebView\n-from rubicon.objc import objc_method, py_from_ns\n+from rubicon.objc import objc_method, py_from_ns, send_super\n from rubicon.objc.runtime import objc_id\n \n from .base import Widget\n@@ -24,6 +24,7 @@\n def keyDown_(self, event) -> None:\n if self.interface.on_key_down:\n self.interface.on_key_down(self.interface, **toga_key(event))\n+ send_super(__class__, self, 'keyDown:', event)\n \n @objc_method\n def touchBar(self):\n", "issue": "WebView in MacOS not accepting keyboard input\n**Describe the bug**\r\n\r\nI am unable to type text in HTML input fields within a Toga WebView on Mac OS.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Follow the tutorial at https://docs.beeware.org/en/latest/tutorial/tutorial-1.html until you are able to run `briefcase dev` successfully.\r\n2. Modify the `startup` function so that its body is:\r\n ```main_box = toga.Box()\r\n self.main_window = toga.MainWindow(title=self.formal_name)\r\n self.main_window.content = toga.WebView(style=Pack(flex=1))\r\n self.main_window.content.url = 'https://www.google.ca/'\r\n self.main_window.show()\r\n ```\r\n3. Attempt to search for something in the Google search input.\r\n4. When I try, I am unable to type characters. Nothing happens with my key presses. Note that specified command shortcuts do seem to work.\r\n\r\n**Expected behavior**\r\nI expect to be able to enter text into text boxes in a WebView in Toga.\r\n\r\n**Environment:**\r\n - Operating System: Mac OS 10.15.6\r\n - Python version: 3.6.5\r\n - Software versions:\r\n - Briefcase: 0.3.0\r\n - Toga: 0.3.0.dev26\r\n\r\n**Additional context**\r\nI discovered that If I just comment out the `keyDown_` method in toga_cocoa [here](https://github.com/beeware/toga/blob/master/src/cocoa/toga_cocoa/widgets/webview.py#L23-L27) then I am able to enter text in a WebView.\n", "before_files": [{"content": "from asyncio import get_event_loop\n\nfrom travertino.size import at_least\n\nfrom toga_cocoa.keys import toga_key\nfrom toga_cocoa.libs import NSURL, NSURLRequest, WKWebView\nfrom rubicon.objc import objc_method, py_from_ns\nfrom rubicon.objc.runtime import objc_id\n\nfrom .base import Widget\n\n\nclass TogaWebView(WKWebView):\n @objc_method\n def webView_didFinish_navigation_(self, sender, wkNavigation) -> None:\n if self.interface.on_webview_load:\n self.interface.on_webview_load(self.interface)\n\n @objc_method\n def acceptsFirstResponder(self) -> bool:\n return True\n\n @objc_method\n def keyDown_(self, event) -> None:\n if self.interface.on_key_down:\n self.interface.on_key_down(self.interface, **toga_key(event))\n\n @objc_method\n def touchBar(self):\n # Disable the touchbar.\n return None\n\n\nclass WebView(Widget):\n def create(self):\n self.native = TogaWebView.alloc().init()\n self.native.interface = self.interface\n\n self.native.downloadDelegate = self.native\n self.native.frameLoadDelegate = self.native\n self.native.policyDelegate = self.native\n self.native.resourceLoadDelegate = self.native\n self.native.uIDelegate = self.native\n\n # Add the layout constraints\n self.add_constraints()\n\n def set_on_key_down(self, handler):\n pass\n\n def set_on_webview_load(self, handler):\n pass\n\n def get_dom(self):\n # Utilises Step 2) of:\n # https://developer.apple.com/library/content/documentation/\n # Cocoa/Conceptual/DisplayWebContent/Tasks/SaveAndLoad.html\n html = self.native.mainframe.DOMDocument.documentElement.outerHTML\n return html\n\n def set_url(self, value):\n if value:\n request = NSURLRequest.requestWithURL(NSURL.URLWithString(self.interface.url))\n self.native.loadRequest(request)\n\n def set_content(self, root_url, content):\n self.native.loadHTMLString(content, baseURL=NSURL.URLWithString(root_url))\n\n def set_user_agent(self, value):\n user_agent = value if value else \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8\" # NOQA\n self.native.customUserAgent = user_agent\n\n async def evaluate_javascript(self, javascript):\n \"\"\"\n Evaluate a JavaScript expression.\n\n **This method is asynchronous**. It will return when the expression has been\n evaluated and a result is available.\n\n :param javascript: The javascript expression to evaluate\n :type javascript: ``str``\n \"\"\"\n\n loop = get_event_loop()\n future = loop.create_future()\n\n def completion_handler(res: objc_id, error: objc_id) -> None:\n\n if error:\n error = py_from_ns(error)\n exc = RuntimeError(str(error))\n future.set_exception(exc)\n else:\n future.set_result(py_from_ns(res))\n\n self.native.evaluateJavaScript(javascript, completionHandler=completion_handler)\n\n return await future\n\n def invoke_javascript(self, javascript):\n \"\"\"\n Invoke a block of javascript.\n\n :param javascript: The javascript expression to invoke\n \"\"\"\n self.native.evaluateJavaScript(javascript, completionHandler=None)\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)\n", "path": "src/cocoa/toga_cocoa/widgets/webview.py"}]} | 1,907 | 214 |
gh_patches_debug_15799 | rasdani/github-patches | git_diff | pytorch__vision-3453 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
What is expected Kinetics400 dataset directory structure?
Given that the dataset does not come with official downloader scripts and that most roll their own or hack some third-party scripts, it would be much clearer if https://pytorch.org/docs/stable/torchvision/datasets.html#kinetics-400 explained what directory structure is expected by `torchvision.datasets.Kinetics400`
What is the expected dataset size? and the video file extensions?
Thanks!
cc @pmeier
</issue>
<code>
[start of torchvision/datasets/kinetics.py]
1 from .utils import list_dir
2 from .folder import make_dataset
3 from .video_utils import VideoClips
4 from .vision import VisionDataset
5
6
7 class Kinetics400(VisionDataset):
8 """
9 `Kinetics-400 <https://deepmind.com/research/open-source/open-source-datasets/kinetics/>`_
10 dataset.
11
12 Kinetics-400 is an action recognition video dataset.
13 This dataset consider every video as a collection of video clips of fixed size, specified
14 by ``frames_per_clip``, where the step in frames between each clip is given by
15 ``step_between_clips``.
16
17 To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
18 and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
19 elements will come from video 1, and the next three elements from video 2.
20 Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
21 frames in a video might be present.
22
23 Internally, it uses a VideoClips object to handle clip creation.
24
25 Args:
26 root (string): Root directory of the Kinetics-400 Dataset.
27 frames_per_clip (int): number of frames in a clip
28 step_between_clips (int): number of frames between each clip
29 transform (callable, optional): A function/transform that takes in a TxHxWxC video
30 and returns a transformed version.
31
32 Returns:
33 tuple: A 3-tuple with the following entries:
34
35 - video (Tensor[T, H, W, C]): the `T` video frames
36 - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
37 and `L` is the number of points
38 - label (int): class of the video clip
39 """
40
41 def __init__(self, root, frames_per_clip, step_between_clips=1, frame_rate=None,
42 extensions=('avi',), transform=None, _precomputed_metadata=None,
43 num_workers=1, _video_width=0, _video_height=0,
44 _video_min_dimension=0, _audio_samples=0, _audio_channels=0):
45 super(Kinetics400, self).__init__(root)
46
47 classes = list(sorted(list_dir(root)))
48 class_to_idx = {classes[i]: i for i in range(len(classes))}
49 self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)
50 self.classes = classes
51 video_list = [x[0] for x in self.samples]
52 self.video_clips = VideoClips(
53 video_list,
54 frames_per_clip,
55 step_between_clips,
56 frame_rate,
57 _precomputed_metadata,
58 num_workers=num_workers,
59 _video_width=_video_width,
60 _video_height=_video_height,
61 _video_min_dimension=_video_min_dimension,
62 _audio_samples=_audio_samples,
63 _audio_channels=_audio_channels,
64 )
65 self.transform = transform
66
67 @property
68 def metadata(self):
69 return self.video_clips.metadata
70
71 def __len__(self):
72 return self.video_clips.num_clips()
73
74 def __getitem__(self, idx):
75 video, audio, info, video_idx = self.video_clips.get_clip(idx)
76 label = self.samples[video_idx][1]
77
78 if self.transform is not None:
79 video = self.transform(video)
80
81 return video, audio, label
82
[end of torchvision/datasets/kinetics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchvision/datasets/kinetics.py b/torchvision/datasets/kinetics.py
--- a/torchvision/datasets/kinetics.py
+++ b/torchvision/datasets/kinetics.py
@@ -23,7 +23,18 @@
Internally, it uses a VideoClips object to handle clip creation.
Args:
- root (string): Root directory of the Kinetics-400 Dataset.
+ root (string): Root directory of the Kinetics-400 Dataset. Should be structured as follows:
+ .. code::
+
+ root/
+ ├── class1
+ │ ├── clip1.avi
+ │ ├── clip2.avi
+ │ └── ...
+ └── class2
+ ├── clipx.avi
+ └── ...
+
frames_per_clip (int): number of frames in a clip
step_between_clips (int): number of frames between each clip
transform (callable, optional): A function/transform that takes in a TxHxWxC video
| {"golden_diff": "diff --git a/torchvision/datasets/kinetics.py b/torchvision/datasets/kinetics.py\n--- a/torchvision/datasets/kinetics.py\n+++ b/torchvision/datasets/kinetics.py\n@@ -23,7 +23,18 @@\n Internally, it uses a VideoClips object to handle clip creation.\n \n Args:\n- root (string): Root directory of the Kinetics-400 Dataset.\n+ root (string): Root directory of the Kinetics-400 Dataset. Should be structured as follows:\n+ .. code::\n+\n+ root/\n+ \u251c\u2500\u2500 class1\n+ \u2502 \u251c\u2500\u2500 clip1.avi\n+ \u2502 \u251c\u2500\u2500 clip2.avi\n+ \u2502 \u2514\u2500\u2500 ...\n+ \u2514\u2500\u2500 class2\n+ \u251c\u2500\u2500 clipx.avi\n+ \u2514\u2500\u2500 ...\n+\n frames_per_clip (int): number of frames in a clip\n step_between_clips (int): number of frames between each clip\n transform (callable, optional): A function/transform that takes in a TxHxWxC video\n", "issue": "What is expected Kinetics400 dataset directory structure?\nGiven that the dataset does not come with official downloader scripts and that most roll their own or hack some third-party scripts, it would be much clearer if https://pytorch.org/docs/stable/torchvision/datasets.html#kinetics-400 explained what directory structure is expected by `torchvision.datasets.Kinetics400`\r\n\r\nWhat is the expected dataset size? and the video file extensions?\r\n\r\nThanks!\n\ncc @pmeier\n", "before_files": [{"content": "from .utils import list_dir\nfrom .folder import make_dataset\nfrom .video_utils import VideoClips\nfrom .vision import VisionDataset\n\n\nclass Kinetics400(VisionDataset):\n \"\"\"\n `Kinetics-400 <https://deepmind.com/research/open-source/open-source-datasets/kinetics/>`_\n dataset.\n\n Kinetics-400 is an action recognition video dataset.\n This dataset consider every video as a collection of video clips of fixed size, specified\n by ``frames_per_clip``, where the step in frames between each clip is given by\n ``step_between_clips``.\n\n To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``\n and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two\n elements will come from video 1, and the next three elements from video 2.\n Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all\n frames in a video might be present.\n\n Internally, it uses a VideoClips object to handle clip creation.\n\n Args:\n root (string): Root directory of the Kinetics-400 Dataset.\n frames_per_clip (int): number of frames in a clip\n step_between_clips (int): number of frames between each clip\n transform (callable, optional): A function/transform that takes in a TxHxWxC video\n and returns a transformed version.\n\n Returns:\n tuple: A 3-tuple with the following entries:\n\n - video (Tensor[T, H, W, C]): the `T` video frames\n - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels\n and `L` is the number of points\n - label (int): class of the video clip\n \"\"\"\n\n def __init__(self, root, frames_per_clip, step_between_clips=1, frame_rate=None,\n extensions=('avi',), transform=None, _precomputed_metadata=None,\n num_workers=1, _video_width=0, _video_height=0,\n _video_min_dimension=0, _audio_samples=0, _audio_channels=0):\n super(Kinetics400, self).__init__(root)\n\n classes = list(sorted(list_dir(root)))\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)\n self.classes = classes\n video_list = [x[0] for x in self.samples]\n self.video_clips = VideoClips(\n video_list,\n frames_per_clip,\n step_between_clips,\n frame_rate,\n _precomputed_metadata,\n num_workers=num_workers,\n _video_width=_video_width,\n _video_height=_video_height,\n _video_min_dimension=_video_min_dimension,\n _audio_samples=_audio_samples,\n _audio_channels=_audio_channels,\n )\n self.transform = transform\n\n @property\n def metadata(self):\n return self.video_clips.metadata\n\n def __len__(self):\n return self.video_clips.num_clips()\n\n def __getitem__(self, idx):\n video, audio, info, video_idx = self.video_clips.get_clip(idx)\n label = self.samples[video_idx][1]\n\n if self.transform is not None:\n video = self.transform(video)\n\n return video, audio, label\n", "path": "torchvision/datasets/kinetics.py"}]} | 1,579 | 240 |
gh_patches_debug_2451 | rasdani/github-patches | git_diff | ivy-llc__ivy-17429 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
empty_like
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/creation.py]
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes
4 from .tensor import Tensor
5 from ivy.functional.frontends.paddle.func_wrapper import (
6 to_ivy_arrays_and_back,
7 )
8
9
10 @to_ivy_arrays_and_back
11 def to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):
12 array = ivy.array(data, dtype=dtype, device=place)
13 return Tensor(array, dtype=dtype, place=place)
14
15
16 @with_unsupported_dtypes({"2.4.2 and below": "int8"}, "paddle")
17 @to_ivy_arrays_and_back
18 def ones(shape, /, *, dtype=None, name=None):
19 dtype = "float32" if dtype is None else dtype
20 return ivy.ones(shape, dtype=dtype)
21
22
23 @with_unsupported_dtypes(
24 {"2.4.2 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle"
25 )
26 @to_ivy_arrays_and_back
27 def ones_like(x, /, *, dtype=None, name=None):
28 dtype = x.dtype if dtype is None else dtype
29 return ivy.ones_like(x, dtype=dtype)
30
31
32 @with_unsupported_dtypes({"2.4.2 and below": "int8"}, "paddle")
33 @to_ivy_arrays_and_back
34 def zeros(shape, /, *, dtype=None, name=None):
35 dtype = "float32" if dtype is None else dtype
36 return ivy.zeros(shape, dtype=dtype)
37
38
39 @with_unsupported_dtypes(
40 {"2.4.2 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle"
41 )
42 @to_ivy_arrays_and_back
43 def zeros_like(x, /, *, dtype=None, name=None):
44 dtype = x.dtype if dtype is None else dtype
45 return ivy.zeros_like(x, dtype=dtype)
46
47
48 @to_ivy_arrays_and_back
49 def full(shape, fill_value, /, *, dtype=None, name=None):
50 dtype = "float32" if dtype is None else dtype
51 return ivy.full(shape, fill_value, dtype=dtype)
52
53
54 @to_ivy_arrays_and_back
55 def full_like(x, fill_value, /, *, dtype=None, name=None):
56 dtype = x.dtype if dtype is None else dtype
57 return ivy.full_like(x, fill_value, dtype=dtype)
58
59
60 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
61 @to_ivy_arrays_and_back
62 def arange(start, end=None, step=1, dtype=None, name=None):
63 return ivy.arange(start, end, step=step, dtype=dtype)
64
65
66 @to_ivy_arrays_and_back
67 def empty(shape, dtype=None):
68 return ivy.empty(shape=shape, dtype=dtype)
69
70
71 @to_ivy_arrays_and_back
72 def eye(num_rows, num_columns=None, dtype=None, name=None):
73 return ivy.eye(num_rows, num_columns, dtype=dtype)
74
[end of ivy/functional/frontends/paddle/tensor/creation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py
--- a/ivy/functional/frontends/paddle/tensor/creation.py
+++ b/ivy/functional/frontends/paddle/tensor/creation.py
@@ -71,3 +71,8 @@
@to_ivy_arrays_and_back
def eye(num_rows, num_columns=None, dtype=None, name=None):
return ivy.eye(num_rows, num_columns, dtype=dtype)
+
+
+@to_ivy_arrays_and_back
+def empty_like(x, dtype=None, name=None):
+ return ivy.empty_like(x, dtype=dtype)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py\n--- a/ivy/functional/frontends/paddle/tensor/creation.py\n+++ b/ivy/functional/frontends/paddle/tensor/creation.py\n@@ -71,3 +71,8 @@\n @to_ivy_arrays_and_back\r\n def eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n+\r\n+\r\n+@to_ivy_arrays_and_back\r\n+def empty_like(x, dtype=None, name=None):\r\n+ return ivy.empty_like(x, dtype=dtype)\n", "issue": "empty_like\n\n", "before_files": [{"content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py"}]} | 1,376 | 153 |
gh_patches_debug_561 | rasdani/github-patches | git_diff | pex-tool__pex-822 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.0.3
On the docket:
+ [x] Pex should trust any host passed via `--index` or `--find-links`. #812
+ [x] A cache should always be used by `pex.resolver.resolve`. #809
+ [x] Use the resolve cache to skip installs. #815
+ [x] Parallelize resolve. #818
+ [x] Cache sdist & local project builds #817
+ [x] Unify resolve and runtime wheel caches. #820
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.0.2'
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.0.2'
+__version__ = '2.0.3'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.0.2'\n+__version__ = '2.0.3'\n", "issue": "Release 2.0.3\nOn the docket:\r\n\r\n+ [x] Pex should trust any host passed via `--index` or `--find-links`. #812\r\n+ [x] A cache should always be used by `pex.resolver.resolve`. #809\r\n+ [x] Use the resolve cache to skip installs. #815\r\n+ [x] Parallelize resolve. #818\r\n+ [x] Cache sdist & local project builds #817\r\n+ [x] Unify resolve and runtime wheel caches. #820\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.0.2'\n", "path": "pex/version.py"}]} | 708 | 95 |
gh_patches_debug_1738 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-94 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: url on Storage Exception when key not found
When attempting to get a key that does not exist the exception for the `NotFoundError` is trying to reference `request.url` which does not exist.
``` py
Traceback (most recent call last):
[...]
file_key = self.bucket.get_key(path)
File "gcloud/storage/bucket.py", line 83, in get_key
response = self.connection.api_request(method='GET', path=key.path)
File "gcloud/storage/connection.py", line 212, in api_request
raise exceptions.NotFoundError(response, content)
File "gcloud/storage/exceptions.py", line 17, in __init__
self.message = 'GET %s returned a 404.' % (response.url)
File "httplib2/__init__.py", line 1680, in __getattr__
raise AttributeError, name
AttributeError: url
```
</issue>
<code>
[start of gcloud/storage/exceptions.py]
1 # TODO: Make these super useful.
2
3 class StorageError(Exception):
4 pass
5
6
7 class ConnectionError(StorageError):
8
9 def __init__(self, response, content):
10 message = str(response) + content
11 super(ConnectionError, self).__init__(message)
12
13
14 class NotFoundError(ConnectionError):
15
16 def __init__(self, response, content):
17 self.message = 'GET %s returned a 404.' % (response.url)
18
19
20 class StorageDataError(StorageError):
21 pass
22
[end of gcloud/storage/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gcloud/storage/exceptions.py b/gcloud/storage/exceptions.py
--- a/gcloud/storage/exceptions.py
+++ b/gcloud/storage/exceptions.py
@@ -14,7 +14,7 @@
class NotFoundError(ConnectionError):
def __init__(self, response, content):
- self.message = 'GET %s returned a 404.' % (response.url)
+ self.message = 'Request returned a 404. Headers: %s' % (response)
class StorageDataError(StorageError):
| {"golden_diff": "diff --git a/gcloud/storage/exceptions.py b/gcloud/storage/exceptions.py\n--- a/gcloud/storage/exceptions.py\n+++ b/gcloud/storage/exceptions.py\n@@ -14,7 +14,7 @@\n class NotFoundError(ConnectionError):\n \n def __init__(self, response, content):\n- self.message = 'GET %s returned a 404.' % (response.url)\n+ self.message = 'Request returned a 404. Headers: %s' % (response)\n \n \n class StorageDataError(StorageError):\n", "issue": "AttributeError: url on Storage Exception when key not found\nWhen attempting to get a key that does not exist the exception for the `NotFoundError` is trying to reference `request.url` which does not exist.\n\n``` py\nTraceback (most recent call last):\n [...]\n file_key = self.bucket.get_key(path)\n File \"gcloud/storage/bucket.py\", line 83, in get_key\n response = self.connection.api_request(method='GET', path=key.path)\n File \"gcloud/storage/connection.py\", line 212, in api_request\n raise exceptions.NotFoundError(response, content)\n File \"gcloud/storage/exceptions.py\", line 17, in __init__\n self.message = 'GET %s returned a 404.' % (response.url)\n File \"httplib2/__init__.py\", line 1680, in __getattr__\n raise AttributeError, name\nAttributeError: url\n```\n\n", "before_files": [{"content": "# TODO: Make these super useful.\n\nclass StorageError(Exception):\n pass\n\n\nclass ConnectionError(StorageError):\n\n def __init__(self, response, content):\n message = str(response) + content\n super(ConnectionError, self).__init__(message)\n\n\nclass NotFoundError(ConnectionError):\n\n def __init__(self, response, content):\n self.message = 'GET %s returned a 404.' % (response.url)\n\n\nclass StorageDataError(StorageError):\n pass\n", "path": "gcloud/storage/exceptions.py"}]} | 886 | 120 |
gh_patches_debug_15959 | rasdani/github-patches | git_diff | zulip__zulip-21059 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Race conditions in muting topics and users
Our pattern in [muting topics](https://github.com/zulip/zulip/blob/b4075b78eb6e128bce7ef3d36b86d176ef2ecfa5/zerver/views/muting.py#L39-L42) is to check if the topic is muted, and if not then to add a row:
```py3
if topic_is_muted(user_profile, stream.id, topic_name):
raise JsonableError(_("Topic already muted"))
do_mute_topic(user_profile, stream, topic_name, date_muted)
return json_success()
```
This pattern is inherently prone to race conditions. Luckily, we catch those due to database constraints, in the form of `UserTopic.objects.create` raising an IntegrityError, but those bubble up as 500's, not 400's.
We should catch those IntegrityError's and re-raise them as `JsonableError(_("Topic already muted"))`. That applies to the mute-topic codepath, as well as the mute-user codepath.
Though it doesn't affect correctness in this case, since the duplicate row is the first database change operation, these actions should be done inside of transactions.
Un-muting is technically also subject to this race, though it doesn't matter -- both processes calling `.delete()` on the same object is a mostly-silent no-op for the second process.
</issue>
<code>
[start of zerver/views/muting.py]
1 import datetime
2 from typing import Optional
3
4 from django.http import HttpRequest, HttpResponse
5 from django.utils.timezone import now as timezone_now
6 from django.utils.translation import gettext as _
7
8 from zerver.lib.actions import do_mute_topic, do_mute_user, do_unmute_topic, do_unmute_user
9 from zerver.lib.exceptions import JsonableError
10 from zerver.lib.request import REQ, has_request_variables
11 from zerver.lib.response import json_success
12 from zerver.lib.streams import (
13 access_stream_by_id,
14 access_stream_by_name,
15 access_stream_for_unmute_topic_by_id,
16 access_stream_for_unmute_topic_by_name,
17 check_for_exactly_one_stream_arg,
18 )
19 from zerver.lib.topic_mutes import topic_is_muted
20 from zerver.lib.user_mutes import get_mute_object
21 from zerver.lib.users import access_user_by_id
22 from zerver.lib.validator import check_int
23 from zerver.models import UserProfile
24
25
26 def mute_topic(
27 user_profile: UserProfile,
28 stream_id: Optional[int],
29 stream_name: Optional[str],
30 topic_name: str,
31 date_muted: datetime.datetime,
32 ) -> None:
33 if stream_name is not None:
34 (stream, sub) = access_stream_by_name(user_profile, stream_name)
35 else:
36 assert stream_id is not None
37 (stream, sub) = access_stream_by_id(user_profile, stream_id)
38
39 if topic_is_muted(user_profile, stream.id, topic_name):
40 raise JsonableError(_("Topic already muted"))
41
42 do_mute_topic(user_profile, stream, topic_name, date_muted)
43
44
45 def unmute_topic(
46 user_profile: UserProfile,
47 stream_id: Optional[int],
48 stream_name: Optional[str],
49 topic_name: str,
50 ) -> None:
51 error = _("Topic is not muted")
52
53 if stream_name is not None:
54 stream = access_stream_for_unmute_topic_by_name(user_profile, stream_name, error)
55 else:
56 assert stream_id is not None
57 stream = access_stream_for_unmute_topic_by_id(user_profile, stream_id, error)
58
59 do_unmute_topic(user_profile, stream, topic_name)
60
61
62 @has_request_variables
63 def update_muted_topic(
64 request: HttpRequest,
65 user_profile: UserProfile,
66 stream_id: Optional[int] = REQ(json_validator=check_int, default=None),
67 stream: Optional[str] = REQ(default=None),
68 topic: str = REQ(),
69 op: str = REQ(),
70 ) -> HttpResponse:
71
72 check_for_exactly_one_stream_arg(stream_id=stream_id, stream=stream)
73
74 if op == "add":
75 mute_topic(
76 user_profile=user_profile,
77 stream_id=stream_id,
78 stream_name=stream,
79 topic_name=topic,
80 date_muted=timezone_now(),
81 )
82 return json_success(request)
83 elif op == "remove":
84 unmute_topic(
85 user_profile=user_profile,
86 stream_id=stream_id,
87 stream_name=stream,
88 topic_name=topic,
89 )
90 return json_success(request)
91
92
93 def mute_user(request: HttpRequest, user_profile: UserProfile, muted_user_id: int) -> HttpResponse:
94 if user_profile.id == muted_user_id:
95 raise JsonableError(_("Cannot mute self"))
96
97 muted_user = access_user_by_id(
98 user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False
99 )
100 date_muted = timezone_now()
101
102 if get_mute_object(user_profile, muted_user) is not None:
103 raise JsonableError(_("User already muted"))
104
105 do_mute_user(user_profile, muted_user, date_muted)
106 return json_success(request)
107
108
109 def unmute_user(
110 request: HttpRequest, user_profile: UserProfile, muted_user_id: int
111 ) -> HttpResponse:
112 muted_user = access_user_by_id(
113 user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False
114 )
115 mute_object = get_mute_object(user_profile, muted_user)
116
117 if mute_object is None:
118 raise JsonableError(_("User is not muted"))
119
120 do_unmute_user(mute_object)
121 return json_success(request)
122
[end of zerver/views/muting.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/views/muting.py b/zerver/views/muting.py
--- a/zerver/views/muting.py
+++ b/zerver/views/muting.py
@@ -1,6 +1,7 @@
import datetime
from typing import Optional
+from django.db import IntegrityError
from django.http import HttpRequest, HttpResponse
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
@@ -39,7 +40,10 @@
if topic_is_muted(user_profile, stream.id, topic_name):
raise JsonableError(_("Topic already muted"))
- do_mute_topic(user_profile, stream, topic_name, date_muted)
+ try:
+ do_mute_topic(user_profile, stream, topic_name, date_muted)
+ except IntegrityError:
+ raise JsonableError(_("Topic already muted"))
def unmute_topic(
| {"golden_diff": "diff --git a/zerver/views/muting.py b/zerver/views/muting.py\n--- a/zerver/views/muting.py\n+++ b/zerver/views/muting.py\n@@ -1,6 +1,7 @@\n import datetime\n from typing import Optional\n \n+from django.db import IntegrityError\n from django.http import HttpRequest, HttpResponse\n from django.utils.timezone import now as timezone_now\n from django.utils.translation import gettext as _\n@@ -39,7 +40,10 @@\n if topic_is_muted(user_profile, stream.id, topic_name):\n raise JsonableError(_(\"Topic already muted\"))\n \n- do_mute_topic(user_profile, stream, topic_name, date_muted)\n+ try:\n+ do_mute_topic(user_profile, stream, topic_name, date_muted)\n+ except IntegrityError:\n+ raise JsonableError(_(\"Topic already muted\"))\n \n \n def unmute_topic(\n", "issue": "Race conditions in muting topics and users\nOur pattern in [muting topics](https://github.com/zulip/zulip/blob/b4075b78eb6e128bce7ef3d36b86d176ef2ecfa5/zerver/views/muting.py#L39-L42) is to check if the topic is muted, and if not then to add a row:\r\n```py3\r\n if topic_is_muted(user_profile, stream.id, topic_name):\r\n raise JsonableError(_(\"Topic already muted\"))\r\n\r\n do_mute_topic(user_profile, stream, topic_name, date_muted)\r\n return json_success()\r\n```\r\n\r\nThis pattern is inherently prone to race conditions. Luckily, we catch those due to database constraints, in the form of `UserTopic.objects.create` raising an IntegrityError, but those bubble up as 500's, not 400's.\r\n\r\nWe should catch those IntegrityError's and re-raise them as `JsonableError(_(\"Topic already muted\"))`. That applies to the mute-topic codepath, as well as the mute-user codepath.\r\n\r\nThough it doesn't affect correctness in this case, since the duplicate row is the first database change operation, these actions should be done inside of transactions.\r\n\r\nUn-muting is technically also subject to this race, though it doesn't matter -- both processes calling `.delete()` on the same object is a mostly-silent no-op for the second process.\n", "before_files": [{"content": "import datetime\nfrom typing import Optional\n\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.timezone import now as timezone_now\nfrom django.utils.translation import gettext as _\n\nfrom zerver.lib.actions import do_mute_topic, do_mute_user, do_unmute_topic, do_unmute_user\nfrom zerver.lib.exceptions import JsonableError\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.streams import (\n access_stream_by_id,\n access_stream_by_name,\n access_stream_for_unmute_topic_by_id,\n access_stream_for_unmute_topic_by_name,\n check_for_exactly_one_stream_arg,\n)\nfrom zerver.lib.topic_mutes import topic_is_muted\nfrom zerver.lib.user_mutes import get_mute_object\nfrom zerver.lib.users import access_user_by_id\nfrom zerver.lib.validator import check_int\nfrom zerver.models import UserProfile\n\n\ndef mute_topic(\n user_profile: UserProfile,\n stream_id: Optional[int],\n stream_name: Optional[str],\n topic_name: str,\n date_muted: datetime.datetime,\n) -> None:\n if stream_name is not None:\n (stream, sub) = access_stream_by_name(user_profile, stream_name)\n else:\n assert stream_id is not None\n (stream, sub) = access_stream_by_id(user_profile, stream_id)\n\n if topic_is_muted(user_profile, stream.id, topic_name):\n raise JsonableError(_(\"Topic already muted\"))\n\n do_mute_topic(user_profile, stream, topic_name, date_muted)\n\n\ndef unmute_topic(\n user_profile: UserProfile,\n stream_id: Optional[int],\n stream_name: Optional[str],\n topic_name: str,\n) -> None:\n error = _(\"Topic is not muted\")\n\n if stream_name is not None:\n stream = access_stream_for_unmute_topic_by_name(user_profile, stream_name, error)\n else:\n assert stream_id is not None\n stream = access_stream_for_unmute_topic_by_id(user_profile, stream_id, error)\n\n do_unmute_topic(user_profile, stream, topic_name)\n\n\n@has_request_variables\ndef update_muted_topic(\n request: HttpRequest,\n user_profile: UserProfile,\n stream_id: Optional[int] = REQ(json_validator=check_int, default=None),\n stream: Optional[str] = REQ(default=None),\n topic: str = REQ(),\n op: str = REQ(),\n) -> HttpResponse:\n\n check_for_exactly_one_stream_arg(stream_id=stream_id, stream=stream)\n\n if op == \"add\":\n mute_topic(\n user_profile=user_profile,\n stream_id=stream_id,\n stream_name=stream,\n topic_name=topic,\n date_muted=timezone_now(),\n )\n return json_success(request)\n elif op == \"remove\":\n unmute_topic(\n user_profile=user_profile,\n stream_id=stream_id,\n stream_name=stream,\n topic_name=topic,\n )\n return json_success(request)\n\n\ndef mute_user(request: HttpRequest, user_profile: UserProfile, muted_user_id: int) -> HttpResponse:\n if user_profile.id == muted_user_id:\n raise JsonableError(_(\"Cannot mute self\"))\n\n muted_user = access_user_by_id(\n user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False\n )\n date_muted = timezone_now()\n\n if get_mute_object(user_profile, muted_user) is not None:\n raise JsonableError(_(\"User already muted\"))\n\n do_mute_user(user_profile, muted_user, date_muted)\n return json_success(request)\n\n\ndef unmute_user(\n request: HttpRequest, user_profile: UserProfile, muted_user_id: int\n) -> HttpResponse:\n muted_user = access_user_by_id(\n user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False\n )\n mute_object = get_mute_object(user_profile, muted_user)\n\n if mute_object is None:\n raise JsonableError(_(\"User is not muted\"))\n\n do_unmute_user(mute_object)\n return json_success(request)\n", "path": "zerver/views/muting.py"}]} | 1,994 | 191 |
gh_patches_debug_1074 | rasdani/github-patches | git_diff | huggingface__diffusers-1052 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve the precision of our integration tests
We currently have a rather low precision when testing our pipeline due to due reasons.
1. - Our reference is an image and not a numpy array. This means that when we created our reference image we lost float precision which is unnecessary
2. - We only test for `.max() < 1e-2` . IMO we should test for `.max() < 1e-4` with the numpy arrays. In my experiements across multiple devices I have **not** seen differences bigger than `.max() < 1e-4` when using full precision.
IMO this could have also prevented: https://github.com/huggingface/diffusers/issues/902
</issue>
<code>
[start of src/diffusers/utils/__init__.py]
1 # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 import os
17
18 from .deprecation_utils import deprecate
19 from .import_utils import (
20 ENV_VARS_TRUE_AND_AUTO_VALUES,
21 ENV_VARS_TRUE_VALUES,
22 USE_JAX,
23 USE_TF,
24 USE_TORCH,
25 DummyObject,
26 is_accelerate_available,
27 is_flax_available,
28 is_inflect_available,
29 is_modelcards_available,
30 is_onnx_available,
31 is_scipy_available,
32 is_tf_available,
33 is_torch_available,
34 is_transformers_available,
35 is_unidecode_available,
36 requires_backends,
37 )
38 from .logging import get_logger
39 from .outputs import BaseOutput
40
41
42 if is_torch_available():
43 from .testing_utils import (
44 floats_tensor,
45 load_image,
46 load_numpy,
47 parse_flag_from_env,
48 require_torch_gpu,
49 slow,
50 torch_all_close,
51 torch_device,
52 )
53
54
55 logger = get_logger(__name__)
56
57
58 hf_cache_home = os.path.expanduser(
59 os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
60 )
61 default_cache_path = os.path.join(hf_cache_home, "diffusers")
62
63
64 CONFIG_NAME = "config.json"
65 WEIGHTS_NAME = "diffusion_pytorch_model.bin"
66 FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
67 ONNX_WEIGHTS_NAME = "model.onnx"
68 HUGGINGFACE_CO_RESOLVE_ENDPOINT = "https://huggingface.co"
69 DIFFUSERS_CACHE = default_cache_path
70 DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
71 HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
72
[end of src/diffusers/utils/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/diffusers/utils/__init__.py b/src/diffusers/utils/__init__.py
--- a/src/diffusers/utils/__init__.py
+++ b/src/diffusers/utils/__init__.py
@@ -42,6 +42,7 @@
if is_torch_available():
from .testing_utils import (
floats_tensor,
+ load_hf_numpy,
load_image,
load_numpy,
parse_flag_from_env,
| {"golden_diff": "diff --git a/src/diffusers/utils/__init__.py b/src/diffusers/utils/__init__.py\n--- a/src/diffusers/utils/__init__.py\n+++ b/src/diffusers/utils/__init__.py\n@@ -42,6 +42,7 @@\n if is_torch_available():\n from .testing_utils import (\n floats_tensor,\n+ load_hf_numpy,\n load_image,\n load_numpy,\n parse_flag_from_env,\n", "issue": "Improve the precision of our integration tests\nWe currently have a rather low precision when testing our pipeline due to due reasons. \r\n1. - Our reference is an image and not a numpy array. This means that when we created our reference image we lost float precision which is unnecessary\r\n2. - We only test for `.max() < 1e-2` . IMO we should test for `.max() < 1e-4` with the numpy arrays. In my experiements across multiple devices I have **not** seen differences bigger than `.max() < 1e-4` when using full precision.\r\n\r\nIMO this could have also prevented: https://github.com/huggingface/diffusers/issues/902\n", "before_files": [{"content": "# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\n\nfrom .deprecation_utils import deprecate\nfrom .import_utils import (\n ENV_VARS_TRUE_AND_AUTO_VALUES,\n ENV_VARS_TRUE_VALUES,\n USE_JAX,\n USE_TF,\n USE_TORCH,\n DummyObject,\n is_accelerate_available,\n is_flax_available,\n is_inflect_available,\n is_modelcards_available,\n is_onnx_available,\n is_scipy_available,\n is_tf_available,\n is_torch_available,\n is_transformers_available,\n is_unidecode_available,\n requires_backends,\n)\nfrom .logging import get_logger\nfrom .outputs import BaseOutput\n\n\nif is_torch_available():\n from .testing_utils import (\n floats_tensor,\n load_image,\n load_numpy,\n parse_flag_from_env,\n require_torch_gpu,\n slow,\n torch_all_close,\n torch_device,\n )\n\n\nlogger = get_logger(__name__)\n\n\nhf_cache_home = os.path.expanduser(\n os.getenv(\"HF_HOME\", os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"huggingface\"))\n)\ndefault_cache_path = os.path.join(hf_cache_home, \"diffusers\")\n\n\nCONFIG_NAME = \"config.json\"\nWEIGHTS_NAME = \"diffusion_pytorch_model.bin\"\nFLAX_WEIGHTS_NAME = \"diffusion_flax_model.msgpack\"\nONNX_WEIGHTS_NAME = \"model.onnx\"\nHUGGINGFACE_CO_RESOLVE_ENDPOINT = \"https://huggingface.co\"\nDIFFUSERS_CACHE = default_cache_path\nDIFFUSERS_DYNAMIC_MODULE_NAME = \"diffusers_modules\"\nHF_MODULES_CACHE = os.getenv(\"HF_MODULES_CACHE\", os.path.join(hf_cache_home, \"modules\"))\n", "path": "src/diffusers/utils/__init__.py"}]} | 1,327 | 99 |
gh_patches_debug_24220 | rasdani/github-patches | git_diff | ietf-tools__datatracker-4407 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Schedule editor icons need to be more distinct
From @flynnliz
The various “person” icons are confusing. It’s hard to know at a glance in the grid which conflicts are “person who must be present” and which are “chair conflict,” and it’s even more confusing that in the session request data box on the bottom right, the “requested by” icon is the same as the chair conflict. Can these three be more distinct from each other?

- The “technology overlap” chain icon shows up really faintly and it’s very tiny, so it’s easy to miss. Same with the “key participant overlap” key icon — those two are really difficult to distinguish from each other when they are so small. Can these be made larger or even just changed to something that takes up more vertical space so they’re easier to distinguish?

</issue>
<code>
[start of ietf/meeting/templatetags/editor_tags.py]
1 # Copyright The IETF Trust 2022, All Rights Reserved
2 # -*- coding: utf-8 -*-
3
4 """Custom tags for the schedule editor"""
5 import debug # pyflakes: ignore
6
7 from django import template
8 from django.utils.html import format_html
9
10 register = template.Library()
11
12
13 @register.simple_tag
14 def constraint_icon_for(constraint_name, count=None):
15 # icons must be valid HTML and kept up to date with tests.EditorTagTests.test_constraint_icon_for()
16 icons = {
17 'conflict': '<span class="encircled">{reversed}1</span>',
18 'conflic2': '<span class="encircled">{reversed}2</span>',
19 'conflic3': '<span class="encircled">{reversed}3</span>',
20 'bethere': '<i class="bi bi-person"></i>{count}',
21 'timerange': '<i class="bi bi-calendar"></i>',
22 'time_relation': 'Δ',
23 'wg_adjacent': '{reversed}<i class="bi bi-skip-end"></i>',
24 'chair_conflict': '{reversed}<i class="bi bi-person-circle"></i>',
25 'tech_overlap': '{reversed}<i class="bi bi-link"></i>',
26 'key_participant': '{reversed}<i class="bi bi-key"></i>',
27 'joint_with_groups': '<i class="bi bi-merge"></i>',
28 'responsible_ad': '<span class="encircled">AD</span>',
29 }
30 reversed_suffix = '-reversed'
31 if constraint_name.slug.endswith(reversed_suffix):
32 reversed = True
33 cn = constraint_name.slug[: -len(reversed_suffix)]
34 else:
35 reversed = False
36 cn = constraint_name.slug
37 return format_html(
38 icons[cn],
39 count=count or '',
40 reversed='-' if reversed else '',
41 )
42
[end of ietf/meeting/templatetags/editor_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ietf/meeting/templatetags/editor_tags.py b/ietf/meeting/templatetags/editor_tags.py
--- a/ietf/meeting/templatetags/editor_tags.py
+++ b/ietf/meeting/templatetags/editor_tags.py
@@ -17,13 +17,13 @@
'conflict': '<span class="encircled">{reversed}1</span>',
'conflic2': '<span class="encircled">{reversed}2</span>',
'conflic3': '<span class="encircled">{reversed}3</span>',
- 'bethere': '<i class="bi bi-person"></i>{count}',
+ 'bethere': '<i class="bi bi-people-fill"></i>{count}',
'timerange': '<i class="bi bi-calendar"></i>',
'time_relation': 'Δ',
'wg_adjacent': '{reversed}<i class="bi bi-skip-end"></i>',
- 'chair_conflict': '{reversed}<i class="bi bi-person-circle"></i>',
- 'tech_overlap': '{reversed}<i class="bi bi-link"></i>',
- 'key_participant': '{reversed}<i class="bi bi-key"></i>',
+ 'chair_conflict': '{reversed}<i class="bi bi-circle-fill"></i>',
+ 'tech_overlap': '{reversed}<i class="bi bi-link-45deg"></i>',
+ 'key_participant': '{reversed}<i class="bi bi-star"></i>',
'joint_with_groups': '<i class="bi bi-merge"></i>',
'responsible_ad': '<span class="encircled">AD</span>',
}
| {"golden_diff": "diff --git a/ietf/meeting/templatetags/editor_tags.py b/ietf/meeting/templatetags/editor_tags.py\n--- a/ietf/meeting/templatetags/editor_tags.py\n+++ b/ietf/meeting/templatetags/editor_tags.py\n@@ -17,13 +17,13 @@\n 'conflict': '<span class=\"encircled\">{reversed}1</span>',\n 'conflic2': '<span class=\"encircled\">{reversed}2</span>',\n 'conflic3': '<span class=\"encircled\">{reversed}3</span>',\n- 'bethere': '<i class=\"bi bi-person\"></i>{count}',\n+ 'bethere': '<i class=\"bi bi-people-fill\"></i>{count}',\n 'timerange': '<i class=\"bi bi-calendar\"></i>',\n 'time_relation': 'Δ',\n 'wg_adjacent': '{reversed}<i class=\"bi bi-skip-end\"></i>',\n- 'chair_conflict': '{reversed}<i class=\"bi bi-person-circle\"></i>',\n- 'tech_overlap': '{reversed}<i class=\"bi bi-link\"></i>',\n- 'key_participant': '{reversed}<i class=\"bi bi-key\"></i>',\n+ 'chair_conflict': '{reversed}<i class=\"bi bi-circle-fill\"></i>',\n+ 'tech_overlap': '{reversed}<i class=\"bi bi-link-45deg\"></i>',\n+ 'key_participant': '{reversed}<i class=\"bi bi-star\"></i>',\n 'joint_with_groups': '<i class=\"bi bi-merge\"></i>',\n 'responsible_ad': '<span class=\"encircled\">AD</span>',\n }\n", "issue": "Schedule editor icons need to be more distinct\nFrom @flynnliz\r\n\r\nThe various \u201cperson\u201d icons are confusing. It\u2019s hard to know at a glance in the grid which conflicts are \u201cperson who must be present\u201d and which are \u201cchair conflict,\u201d and it\u2019s even more confusing that in the session request data box on the bottom right, the \u201crequested by\u201d icon is the same as the chair conflict. Can these three be more distinct from each other? \r\n\r\n\r\n\r\n\r\n- The \u201ctechnology overlap\u201d chain icon shows up really faintly and it\u2019s very tiny, so it\u2019s easy to miss. Same with the \u201ckey participant overlap\u201d key icon \u2014 those two are really difficult to distinguish from each other when they are so small. Can these be made larger or even just changed to something that takes up more vertical space so they\u2019re easier to distinguish?\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright The IETF Trust 2022, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\"\"\"Custom tags for the schedule editor\"\"\"\nimport debug # pyflakes: ignore\n\nfrom django import template\nfrom django.utils.html import format_html\n\nregister = template.Library()\n\n\[email protected]_tag\ndef constraint_icon_for(constraint_name, count=None):\n # icons must be valid HTML and kept up to date with tests.EditorTagTests.test_constraint_icon_for()\n icons = {\n 'conflict': '<span class=\"encircled\">{reversed}1</span>',\n 'conflic2': '<span class=\"encircled\">{reversed}2</span>',\n 'conflic3': '<span class=\"encircled\">{reversed}3</span>',\n 'bethere': '<i class=\"bi bi-person\"></i>{count}',\n 'timerange': '<i class=\"bi bi-calendar\"></i>',\n 'time_relation': 'Δ',\n 'wg_adjacent': '{reversed}<i class=\"bi bi-skip-end\"></i>',\n 'chair_conflict': '{reversed}<i class=\"bi bi-person-circle\"></i>',\n 'tech_overlap': '{reversed}<i class=\"bi bi-link\"></i>',\n 'key_participant': '{reversed}<i class=\"bi bi-key\"></i>',\n 'joint_with_groups': '<i class=\"bi bi-merge\"></i>',\n 'responsible_ad': '<span class=\"encircled\">AD</span>',\n }\n reversed_suffix = '-reversed'\n if constraint_name.slug.endswith(reversed_suffix):\n reversed = True\n cn = constraint_name.slug[: -len(reversed_suffix)]\n else:\n reversed = False\n cn = constraint_name.slug\n return format_html(\n icons[cn],\n count=count or '',\n reversed='-' if reversed else '',\n )\n", "path": "ietf/meeting/templatetags/editor_tags.py"}]} | 1,328 | 380 |
gh_patches_debug_23481 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-900 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rename "bundle" to "distribution"?
**Is your feature request related to a problem? Please describe.**
We've been calling our output a bundle, but it's really a distribution.
**Describe the solution you'd like**
Rename bundle to distribution everywhere.
</issue>
<code>
[start of src/assemble_workflow/bundle_recorder.py]
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import os
8 from urllib.parse import urljoin
9
10 from manifests.bundle_manifest import BundleManifest
11
12
13 class BundleRecorder:
14 def __init__(self, build, output_dir, artifacts_dir, base_url):
15 self.output_dir = output_dir
16 self.build_id = build.id
17 self.base_url = base_url
18 self.version = build.version
19 self.package_name = self.__get_package_name(build)
20 self.artifacts_dir = artifacts_dir
21 self.architecture = build.architecture
22 self.bundle_manifest = self.BundleManifestBuilder(
23 build.id,
24 build.name,
25 build.version,
26 build.platform,
27 build.architecture,
28 self.__get_package_location(),
29 )
30
31 def __get_package_name(self, build):
32 parts = [
33 build.name.lower().replace(" ", "-"),
34 build.version,
35 build.platform,
36 build.architecture,
37 ]
38 return "-".join(parts) + (".zip" if build.platform == "windows" else ".tar.gz")
39
40 def __get_public_url_path(self, folder, rel_path):
41 path = "/".join((folder, rel_path))
42 return urljoin(self.base_url + "/", path)
43
44 def __get_location(self, folder_name, rel_path, abs_path):
45 if self.base_url:
46 return self.__get_public_url_path(folder_name, rel_path)
47 return abs_path
48
49 # Assembled bundles are expected to be served from a separate "bundles" folder
50 # Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id
51 def __get_package_location(self):
52 return self.__get_location("dist", self.package_name, os.path.join(self.output_dir, self.package_name))
53
54 # Build artifacts are expected to be served from a "builds" folder
55 # Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>
56 def __get_component_location(self, component_rel_path):
57 abs_path = os.path.join(self.artifacts_dir, component_rel_path)
58 return self.__get_location("builds", component_rel_path, abs_path)
59
60 def record_component(self, component, rel_path):
61 self.bundle_manifest.append_component(
62 component.name,
63 component.repository,
64 component.ref,
65 component.commit_id,
66 self.__get_component_location(rel_path),
67 )
68
69 def get_manifest(self):
70 return self.bundle_manifest.to_manifest()
71
72 def write_manifest(self, folder):
73 manifest_path = os.path.join(folder, "manifest.yml")
74 self.get_manifest().to_file(manifest_path)
75
76 class BundleManifestBuilder:
77 def __init__(self, build_id, name, version, platform, architecture, location):
78 self.data = {}
79 self.data["build"] = {}
80 self.data["build"]["id"] = build_id
81 self.data["build"]["name"] = name
82 self.data["build"]["version"] = str(version)
83 self.data["build"]["platform"] = platform
84 self.data["build"]["architecture"] = architecture
85 self.data["build"]["location"] = location
86 self.data["schema-version"] = "1.1"
87 # We need to store components as a hash so that we can append artifacts by component name
88 # When we convert to a BundleManifest this will get converted back into a list
89 self.data["components"] = []
90
91 def append_component(self, name, repository_url, ref, commit_id, location):
92 component = {
93 "name": name,
94 "repository": repository_url,
95 "ref": ref,
96 "commit_id": commit_id,
97 "location": location,
98 }
99 self.data["components"].append(component)
100
101 def to_manifest(self):
102 return BundleManifest(self.data)
103
[end of src/assemble_workflow/bundle_recorder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/assemble_workflow/bundle_recorder.py b/src/assemble_workflow/bundle_recorder.py
--- a/src/assemble_workflow/bundle_recorder.py
+++ b/src/assemble_workflow/bundle_recorder.py
@@ -46,13 +46,13 @@
return self.__get_public_url_path(folder_name, rel_path)
return abs_path
- # Assembled bundles are expected to be served from a separate "bundles" folder
- # Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id
+ # Assembled output are expected to be served from a separate "dist" folder
+ # Example: https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/build-id/linux/x64/dist/
def __get_package_location(self):
return self.__get_location("dist", self.package_name, os.path.join(self.output_dir, self.package_name))
# Build artifacts are expected to be served from a "builds" folder
- # Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>
+ # Example: https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/build-id/linux/x64/builds/
def __get_component_location(self, component_rel_path):
abs_path = os.path.join(self.artifacts_dir, component_rel_path)
return self.__get_location("builds", component_rel_path, abs_path)
| {"golden_diff": "diff --git a/src/assemble_workflow/bundle_recorder.py b/src/assemble_workflow/bundle_recorder.py\n--- a/src/assemble_workflow/bundle_recorder.py\n+++ b/src/assemble_workflow/bundle_recorder.py\n@@ -46,13 +46,13 @@\n return self.__get_public_url_path(folder_name, rel_path)\n return abs_path\n \n- # Assembled bundles are expected to be served from a separate \"bundles\" folder\n- # Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id\n+ # Assembled output are expected to be served from a separate \"dist\" folder\n+ # Example: https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/build-id/linux/x64/dist/\n def __get_package_location(self):\n return self.__get_location(\"dist\", self.package_name, os.path.join(self.output_dir, self.package_name))\n \n # Build artifacts are expected to be served from a \"builds\" folder\n- # Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>\n+ # Example: https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/build-id/linux/x64/builds/\n def __get_component_location(self, component_rel_path):\n abs_path = os.path.join(self.artifacts_dir, component_rel_path)\n return self.__get_location(\"builds\", component_rel_path, abs_path)\n", "issue": "Rename \"bundle\" to \"distribution\"?\n**Is your feature request related to a problem? Please describe.**\r\nWe've been calling our output a bundle, but it's really a distribution.\r\n\r\n**Describe the solution you'd like**\r\nRename bundle to distribution everywhere.\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\nfrom urllib.parse import urljoin\n\nfrom manifests.bundle_manifest import BundleManifest\n\n\nclass BundleRecorder:\n def __init__(self, build, output_dir, artifacts_dir, base_url):\n self.output_dir = output_dir\n self.build_id = build.id\n self.base_url = base_url\n self.version = build.version\n self.package_name = self.__get_package_name(build)\n self.artifacts_dir = artifacts_dir\n self.architecture = build.architecture\n self.bundle_manifest = self.BundleManifestBuilder(\n build.id,\n build.name,\n build.version,\n build.platform,\n build.architecture,\n self.__get_package_location(),\n )\n\n def __get_package_name(self, build):\n parts = [\n build.name.lower().replace(\" \", \"-\"),\n build.version,\n build.platform,\n build.architecture,\n ]\n return \"-\".join(parts) + (\".zip\" if build.platform == \"windows\" else \".tar.gz\")\n\n def __get_public_url_path(self, folder, rel_path):\n path = \"/\".join((folder, rel_path))\n return urljoin(self.base_url + \"/\", path)\n\n def __get_location(self, folder_name, rel_path, abs_path):\n if self.base_url:\n return self.__get_public_url_path(folder_name, rel_path)\n return abs_path\n\n # Assembled bundles are expected to be served from a separate \"bundles\" folder\n # Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id\n def __get_package_location(self):\n return self.__get_location(\"dist\", self.package_name, os.path.join(self.output_dir, self.package_name))\n\n # Build artifacts are expected to be served from a \"builds\" folder\n # Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>\n def __get_component_location(self, component_rel_path):\n abs_path = os.path.join(self.artifacts_dir, component_rel_path)\n return self.__get_location(\"builds\", component_rel_path, abs_path)\n\n def record_component(self, component, rel_path):\n self.bundle_manifest.append_component(\n component.name,\n component.repository,\n component.ref,\n component.commit_id,\n self.__get_component_location(rel_path),\n )\n\n def get_manifest(self):\n return self.bundle_manifest.to_manifest()\n\n def write_manifest(self, folder):\n manifest_path = os.path.join(folder, \"manifest.yml\")\n self.get_manifest().to_file(manifest_path)\n\n class BundleManifestBuilder:\n def __init__(self, build_id, name, version, platform, architecture, location):\n self.data = {}\n self.data[\"build\"] = {}\n self.data[\"build\"][\"id\"] = build_id\n self.data[\"build\"][\"name\"] = name\n self.data[\"build\"][\"version\"] = str(version)\n self.data[\"build\"][\"platform\"] = platform\n self.data[\"build\"][\"architecture\"] = architecture\n self.data[\"build\"][\"location\"] = location\n self.data[\"schema-version\"] = \"1.1\"\n # We need to store components as a hash so that we can append artifacts by component name\n # When we convert to a BundleManifest this will get converted back into a list\n self.data[\"components\"] = []\n\n def append_component(self, name, repository_url, ref, commit_id, location):\n component = {\n \"name\": name,\n \"repository\": repository_url,\n \"ref\": ref,\n \"commit_id\": commit_id,\n \"location\": location,\n }\n self.data[\"components\"].append(component)\n\n def to_manifest(self):\n return BundleManifest(self.data)\n", "path": "src/assemble_workflow/bundle_recorder.py"}]} | 1,638 | 332 |
gh_patches_debug_8433 | rasdani/github-patches | git_diff | weecology__retriever-1350 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Post release bump of version to 2.4.1-dev
</issue>
<code>
[start of setup.py]
1 """Use the following command to install retriever: python setup.py install"""
2 from __future__ import absolute_import
3
4 import os
5 import re
6 import platform
7
8 from pkg_resources import parse_version
9 from setuptools import setup, find_packages
10
11 current_platform = platform.system().lower()
12 extra_includes = []
13 if current_platform == "windows":
14 extra_includes += ["pypyodbc"]
15
16 if os.path.exists(".git/hooks"): # check if we are in git repo
17 os.system("cp hooks/pre-commit .git/hooks/pre-commit")
18 os.system("chmod +x .git/hooks/pre-commit")
19
20 app_data = "~/.retriever/scripts"
21 if os.path.exists(app_data):
22 os.system("rm -r {}".format(app_data))
23
24 __version__ = 'v2.4.0'
25 with open(os.path.join("retriever", "_version.py"), "w") as version_file:
26 version_file.write("__version__ = " + "'" + __version__ + "'\n")
27 version_file.close()
28
29
30 def clean_version(v):
31 return parse_version(v).__repr__().lstrip("<Version('").rstrip("')>")
32
33
34 def read(*names, **kwargs):
35 return open(
36 os.path.join(os.path.dirname(__file__), *names),
37 ).read()
38
39 includes = [
40 'xlrd',
41 'future',
42 'argcomplete',
43 'pymysql',
44 'psycopg2-binary',
45 'sqlite3',
46 ] + extra_includes
47
48 excludes = [
49 'pyreadline',
50 'doctest',
51 'pickle',
52 'pdb',
53 'pywin', 'pywin.debugger',
54 'pywin.debugger.dbgcon',
55 'pywin.dialogs', 'pywin.dialogs.list',
56 'Tkconstants', 'Tkinter', 'tcl', 'tk'
57 ]
58
59 setup(
60 name='retriever',
61 version=clean_version(__version__),
62 description='Data Retriever',
63 long_description='{a}'.format(a=read('README.md')),
64 long_description_content_type='text/markdown',
65 author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',
66 author_email='[email protected]',
67 url='https://github.com/weecology/retriever',
68 classifiers=[
69 'Intended Audience :: Science/Research',
70 'License :: OSI Approved :: MIT License',
71 'Operating System :: Microsoft :: Windows',
72 'Operating System :: POSIX',
73 'Operating System :: Unix',
74 'Programming Language :: Python',
75 'Programming Language :: Python :: 3',
76 'Programming Language :: Python :: 3.4',
77 'Programming Language :: Python :: 3.5',
78 'Programming Language :: Python :: 3.6',
79 'Programming Language :: Python :: 3.7',
80 'Programming Language :: Python :: Implementation :: PyPy',
81 'Topic :: Software Development :: Libraries :: Python Modules',
82 'Topic :: Scientific/Engineering :: GIS',
83 'Topic :: Scientific/Engineering :: Information Analysis',
84 'Topic :: Database',
85 ],
86 packages=find_packages(
87 exclude=['hooks',
88 'docs',
89 'tests',
90 'scripts',
91 'docker',
92 ".cache"]),
93 entry_points={
94 'console_scripts': [
95 'retriever = retriever.__main__:main',
96 ],
97 },
98 install_requires=[
99 'xlrd',
100 'future',
101 'argcomplete',
102 'tqdm',
103 'requests',
104 'pandas'
105 ],
106 data_files=[('', ['CITATION'])],
107 setup_requires=[],
108 )
109
110 # windows doesn't have bash. No point in using bash-completion
111 if current_platform != "windows":
112 # if platform is OS X use "~/.bash_profile"
113 if current_platform == "darwin":
114 bash_file = "~/.bash_profile"
115 # if platform is Linux use "~/.bashrc
116 elif current_platform == "linux":
117 bash_file = "~/.bashrc"
118 # else write and discard
119 else:
120 bash_file = "/dev/null"
121
122 argcomplete_command = 'eval "$(register-python-argcomplete retriever)"'
123 with open(os.path.expanduser(bash_file), "a+") as bashrc:
124 bashrc.seek(0)
125 # register retriever for arg-completion if not already registered
126 # whenever a new shell is spawned
127 if argcomplete_command not in bashrc.read():
128 bashrc.write(argcomplete_command + "\n")
129 bashrc.close()
130 os.system("activate-global-python-argcomplete")
131 # register for the current shell
132 os.system(argcomplete_command)
133
134 try:
135 from retriever.compile import compile
136 from retriever.lib.repository import check_for_updates
137
138 check_for_updates()
139 compile()
140 except:
141 pass
142
[end of setup.py]
[start of retriever/_version.py]
1 __version__ = 'v2.4.0'
2
[end of retriever/_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/retriever/_version.py b/retriever/_version.py
--- a/retriever/_version.py
+++ b/retriever/_version.py
@@ -1 +1 @@
-__version__ = 'v2.4.0'
+__version__ = 'v2.4.1.dev'
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@
if os.path.exists(app_data):
os.system("rm -r {}".format(app_data))
-__version__ = 'v2.4.0'
+__version__ = 'v2.4.1.dev'
with open(os.path.join("retriever", "_version.py"), "w") as version_file:
version_file.write("__version__ = " + "'" + __version__ + "'\n")
version_file.close()
| {"golden_diff": "diff --git a/retriever/_version.py b/retriever/_version.py\n--- a/retriever/_version.py\n+++ b/retriever/_version.py\n@@ -1 +1 @@\n-__version__ = 'v2.4.0'\n+__version__ = 'v2.4.1.dev'\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,7 +21,7 @@\n if os.path.exists(app_data):\n os.system(\"rm -r {}\".format(app_data))\n \n-__version__ = 'v2.4.0'\n+__version__ = 'v2.4.1.dev'\n with open(os.path.join(\"retriever\", \"_version.py\"), \"w\") as version_file:\n version_file.write(\"__version__ = \" + \"'\" + __version__ + \"'\\n\")\n version_file.close()\n", "issue": "Post release bump of version to 2.4.1-dev\n\n", "before_files": [{"content": "\"\"\"Use the following command to install retriever: python setup.py install\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport re\nimport platform\n\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages\n\ncurrent_platform = platform.system().lower()\nextra_includes = []\nif current_platform == \"windows\":\n extra_includes += [\"pypyodbc\"]\n\nif os.path.exists(\".git/hooks\"): # check if we are in git repo\n os.system(\"cp hooks/pre-commit .git/hooks/pre-commit\")\n os.system(\"chmod +x .git/hooks/pre-commit\")\n\napp_data = \"~/.retriever/scripts\"\nif os.path.exists(app_data):\n os.system(\"rm -r {}\".format(app_data))\n\n__version__ = 'v2.4.0'\nwith open(os.path.join(\"retriever\", \"_version.py\"), \"w\") as version_file:\n version_file.write(\"__version__ = \" + \"'\" + __version__ + \"'\\n\")\n version_file.close()\n\n\ndef clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n\n\ndef read(*names, **kwargs):\n return open(\n os.path.join(os.path.dirname(__file__), *names),\n ).read()\n\nincludes = [\n 'xlrd',\n 'future',\n 'argcomplete',\n 'pymysql',\n 'psycopg2-binary',\n 'sqlite3',\n ] + extra_includes\n\nexcludes = [\n 'pyreadline',\n 'doctest',\n 'pickle',\n 'pdb',\n 'pywin', 'pywin.debugger',\n 'pywin.debugger.dbgcon',\n 'pywin.dialogs', 'pywin.dialogs.list',\n 'Tkconstants', 'Tkinter', 'tcl', 'tk'\n]\n\nsetup(\n name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n long_description='{a}'.format(a=read('README.md')),\n long_description_content_type='text/markdown',\n author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Scientific/Engineering :: GIS',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Database',\n ],\n packages=find_packages(\n exclude=['hooks',\n 'docs',\n 'tests',\n 'scripts',\n 'docker',\n \".cache\"]),\n entry_points={\n 'console_scripts': [\n 'retriever = retriever.__main__:main',\n ],\n },\n install_requires=[\n 'xlrd',\n 'future',\n 'argcomplete',\n 'tqdm',\n 'requests',\n 'pandas'\n ],\n data_files=[('', ['CITATION'])],\n setup_requires=[],\n)\n\n# windows doesn't have bash. No point in using bash-completion\nif current_platform != \"windows\":\n # if platform is OS X use \"~/.bash_profile\"\n if current_platform == \"darwin\":\n bash_file = \"~/.bash_profile\"\n # if platform is Linux use \"~/.bashrc\n elif current_platform == \"linux\":\n bash_file = \"~/.bashrc\"\n # else write and discard\n else:\n bash_file = \"/dev/null\"\n\n argcomplete_command = 'eval \"$(register-python-argcomplete retriever)\"'\n with open(os.path.expanduser(bash_file), \"a+\") as bashrc:\n bashrc.seek(0)\n # register retriever for arg-completion if not already registered\n # whenever a new shell is spawned\n if argcomplete_command not in bashrc.read():\n bashrc.write(argcomplete_command + \"\\n\")\n bashrc.close()\n os.system(\"activate-global-python-argcomplete\")\n # register for the current shell\n os.system(argcomplete_command)\n\ntry:\n from retriever.compile import compile\n from retriever.lib.repository import check_for_updates\n\n check_for_updates()\n compile()\nexcept:\n pass\n", "path": "setup.py"}, {"content": "__version__ = 'v2.4.0'\n", "path": "retriever/_version.py"}]} | 1,915 | 195 |
gh_patches_debug_3437 | rasdani/github-patches | git_diff | vacanza__python-holidays-794 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DeprecationWarning upon "import holidays" in version 0.17
The implementation of deprecating the Swaziland calendar contains a bug. Just importing the holidays package is enough to fire the `DeprecationWarning`.
**Steps to reproduce (in bash):**
```bash
# Setup
python -m venv demo
source demo/bin/activate
pip install --upgrade pip
# Bad version
pip install holidays==0.17
# Expose bug
python -W error::DeprecationWarning -c 'import holidays'
# Workoround
pip uninstall -y holidays
pip install holidays!=0.17
python -W error::DeprecationWarning -c 'import holidays'
# Cleanup
deactivate
rm -rf demo
```
**Expected behavior:**
The `DeprecationWarning` should only fire when the user constructs an instance of the `Swaziland` or a subclass.
</issue>
<code>
[start of holidays/countries/eswatini.py]
1 # python-holidays
2 # ---------------
3 # A fast, efficient Python library for generating country, province and state
4 # specific sets of holidays on the fly. It aims to make determining whether a
5 # specific date is a holiday as fast and flexible as possible.
6 #
7 # Authors: dr-prodigy <[email protected]> (c) 2017-2022
8 # ryanss <[email protected]> (c) 2014-2017
9 # Website: https://github.com/dr-prodigy/python-holidays
10 # License: MIT (see LICENSE file)
11
12 import warnings
13 from datetime import date
14
15 from dateutil.easter import easter
16 from dateutil.relativedelta import relativedelta as rd
17
18 from holidays.constants import SUN, JAN, APR, MAY, JUL, SEP, DEC
19 from holidays.holiday_base import HolidayBase
20
21
22 class Eswatini(HolidayBase):
23 """
24 https://swazilii.org/sz/legislation/act/1938/71
25 https://www.officeholidays.com/countries/swaziland
26 """
27
28 country = "SZ"
29
30 def _populate(self, year):
31 super()._populate(year)
32
33 # Observed since 1938
34 if year > 1938:
35 self[date(year, JAN, 1)] = "New Year's Day"
36
37 e = easter(year)
38 good_friday = e - rd(days=2)
39 easter_monday = e + rd(days=1)
40 ascension_day = e + rd(days=39)
41 self[good_friday] = "Good Friday"
42 self[easter_monday] = "Easter Monday"
43 self[ascension_day] = "Ascension Day"
44
45 if year > 1968:
46 self[date(year, APR, 25)] = "National Flag Day"
47
48 if year > 1982:
49 # https://www.officeholidays.com/holidays/swaziland/birthday-of-late-king-sobhuza
50 self[date(year, JUL, 22)] = "Birthday of Late King Sobhuza"
51
52 if year > 1986:
53 # https://www.officeholidays.com/holidays/swaziland/birthday-of-king-mswati-iii
54 self[date(year, APR, 19)] = "King's Birthday"
55
56 self[date(year, MAY, 1)] = "Worker's Day"
57 self[date(year, SEP, 6)] = "Independence Day"
58 self[date(year, DEC, 25)] = "Christmas Day"
59 self[date(year, DEC, 26)] = "Boxing Day"
60
61 # Once-off public holidays
62 y2k = "Y2K changeover"
63
64 if year == 1999:
65 # https://mg.co.za/article/1999-12-09-swaziland-declares-bank-holidays/
66 self[date(1999, DEC, 31)] = y2k
67 if year == 2000:
68 self[date(2000, JAN, 3)] = y2k
69
70 # As of 2021/1/1, whenever a public holiday falls on a
71 # Sunday
72 # it rolls over to the following Monday
73 for k, v in list(self.items()):
74
75 if self.observed and k.weekday() == SUN and k.year == year:
76 add_days = 1
77 while self.get(k + rd(days=add_days)) is not None:
78 add_days += 1
79 self[k + rd(days=add_days)] = v + " (Day Off)"
80
81
82 class Swaziland(Eswatini):
83 warnings.warn(
84 "Swaziland is deprecated, use Eswatini instead.",
85 DeprecationWarning,
86 )
87 pass
88
89
90 class SZ(Eswatini):
91 pass
92
93
94 class SZW(Eswatini):
95 pass
96
[end of holidays/countries/eswatini.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/holidays/countries/eswatini.py b/holidays/countries/eswatini.py
--- a/holidays/countries/eswatini.py
+++ b/holidays/countries/eswatini.py
@@ -80,11 +80,13 @@
class Swaziland(Eswatini):
- warnings.warn(
- "Swaziland is deprecated, use Eswatini instead.",
- DeprecationWarning,
- )
- pass
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "Swaziland is deprecated, use Eswatini instead.",
+ DeprecationWarning,
+ )
+
+ super().__init__(*args, **kwargs)
class SZ(Eswatini):
| {"golden_diff": "diff --git a/holidays/countries/eswatini.py b/holidays/countries/eswatini.py\n--- a/holidays/countries/eswatini.py\n+++ b/holidays/countries/eswatini.py\n@@ -80,11 +80,13 @@\n \n \n class Swaziland(Eswatini):\n- warnings.warn(\n- \"Swaziland is deprecated, use Eswatini instead.\",\n- DeprecationWarning,\n- )\n- pass\n+ def __init__(self, *args, **kwargs) -> None:\n+ warnings.warn(\n+ \"Swaziland is deprecated, use Eswatini instead.\",\n+ DeprecationWarning,\n+ )\n+\n+ super().__init__(*args, **kwargs)\n \n \n class SZ(Eswatini):\n", "issue": "DeprecationWarning upon \"import holidays\" in version 0.17\nThe implementation of deprecating the Swaziland calendar contains a bug. Just importing the holidays package is enough to fire the `DeprecationWarning`.\r\n\r\n**Steps to reproduce (in bash):**\r\n\r\n```bash\r\n# Setup\r\npython -m venv demo\r\nsource demo/bin/activate\r\npip install --upgrade pip\r\n\r\n# Bad version\r\npip install holidays==0.17\r\n\r\n# Expose bug\r\npython -W error::DeprecationWarning -c 'import holidays'\r\n\r\n# Workoround\r\npip uninstall -y holidays\r\npip install holidays!=0.17\r\npython -W error::DeprecationWarning -c 'import holidays'\r\n\r\n# Cleanup\r\ndeactivate\r\nrm -rf demo\r\n```\r\n\r\n**Expected behavior:**\r\n\r\nThe `DeprecationWarning` should only fire when the user constructs an instance of the `Swaziland` or a subclass.\r\n\n", "before_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2022\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nimport warnings\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd\n\nfrom holidays.constants import SUN, JAN, APR, MAY, JUL, SEP, DEC\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Eswatini(HolidayBase):\n \"\"\"\n https://swazilii.org/sz/legislation/act/1938/71\n https://www.officeholidays.com/countries/swaziland\n \"\"\"\n\n country = \"SZ\"\n\n def _populate(self, year):\n super()._populate(year)\n\n # Observed since 1938\n if year > 1938:\n self[date(year, JAN, 1)] = \"New Year's Day\"\n\n e = easter(year)\n good_friday = e - rd(days=2)\n easter_monday = e + rd(days=1)\n ascension_day = e + rd(days=39)\n self[good_friday] = \"Good Friday\"\n self[easter_monday] = \"Easter Monday\"\n self[ascension_day] = \"Ascension Day\"\n\n if year > 1968:\n self[date(year, APR, 25)] = \"National Flag Day\"\n\n if year > 1982:\n # https://www.officeholidays.com/holidays/swaziland/birthday-of-late-king-sobhuza\n self[date(year, JUL, 22)] = \"Birthday of Late King Sobhuza\"\n\n if year > 1986:\n # https://www.officeholidays.com/holidays/swaziland/birthday-of-king-mswati-iii\n self[date(year, APR, 19)] = \"King's Birthday\"\n\n self[date(year, MAY, 1)] = \"Worker's Day\"\n self[date(year, SEP, 6)] = \"Independence Day\"\n self[date(year, DEC, 25)] = \"Christmas Day\"\n self[date(year, DEC, 26)] = \"Boxing Day\"\n\n # Once-off public holidays\n y2k = \"Y2K changeover\"\n\n if year == 1999:\n # https://mg.co.za/article/1999-12-09-swaziland-declares-bank-holidays/\n self[date(1999, DEC, 31)] = y2k\n if year == 2000:\n self[date(2000, JAN, 3)] = y2k\n\n # As of 2021/1/1, whenever a public holiday falls on a\n # Sunday\n # it rolls over to the following Monday\n for k, v in list(self.items()):\n\n if self.observed and k.weekday() == SUN and k.year == year:\n add_days = 1\n while self.get(k + rd(days=add_days)) is not None:\n add_days += 1\n self[k + rd(days=add_days)] = v + \" (Day Off)\"\n\n\nclass Swaziland(Eswatini):\n warnings.warn(\n \"Swaziland is deprecated, use Eswatini instead.\",\n DeprecationWarning,\n )\n pass\n\n\nclass SZ(Eswatini):\n pass\n\n\nclass SZW(Eswatini):\n pass\n", "path": "holidays/countries/eswatini.py"}]} | 1,820 | 172 |
gh_patches_debug_32462 | rasdani/github-patches | git_diff | getsentry__sentry-59557 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate the ProjectCombinedRuleIndexEndpoint
[GCP API access logs](https://console.cloud.google.com/logs/query;query=resource.type%20%3D%20k8s_container%0Aresource.labels.namespace_name%20%3D%20default%0Aresource.labels.container_name%20%3D%20sentry%0Alabels.name%20%3D%20sentry.access.api%0AjsonPayload.view%3D~%22ProjectCombinedRuleIndexEndpoint%22;summaryFields=:true:32:beginning;lfeCustomFields=jsonPayload%252Fview,jsonPayload%252Forganization_id;cursorTimestamp=2023-09-06T18:29:05.855473577Z;startTime=2023-09-06T16:51:17.461Z;endTime=2023-09-06T23:51:17.461482Z?project=internal-sentry) show that it's not used by us, and only by 2 customers. It's an undocumented endpoint so we can [set the deprecation header](https://www.notion.so/sentry/Sentry-API-Deprecation-Policy-ccbdea15a34c4fdeb50985685adc3368) and get rid of it.
Related to https://github.com/getsentry/sentry/issues/54005
</issue>
<code>
[start of src/sentry/incidents/endpoints/project_alert_rule_index.py]
1 from __future__ import annotations
2
3 from rest_framework.request import Request
4 from rest_framework.response import Response
5
6 from sentry import features
7 from sentry.api.api_publish_status import ApiPublishStatus
8 from sentry.api.base import region_silo_endpoint
9 from sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint
10 from sentry.api.paginator import CombinedQuerysetIntermediary, CombinedQuerysetPaginator
11 from sentry.api.serializers import CombinedRuleSerializer, serialize
12 from sentry.constants import ObjectStatus
13 from sentry.incidents.endpoints.organization_alert_rule_index import AlertRuleIndexMixin
14 from sentry.incidents.models import AlertRule
15 from sentry.models.rule import Rule
16 from sentry.snuba.dataset import Dataset
17
18
19 @region_silo_endpoint
20 class ProjectCombinedRuleIndexEndpoint(ProjectEndpoint):
21 publish_status = {
22 "GET": ApiPublishStatus.UNKNOWN,
23 }
24
25 def get(self, request: Request, project) -> Response:
26 """
27 Fetches alert rules and legacy rules for a project
28 """
29 alert_rules = AlertRule.objects.fetch_for_project(project)
30 if not features.has("organizations:performance-view", project.organization):
31 # Filter to only error alert rules
32 alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)
33
34 alert_rule_intermediary = CombinedQuerysetIntermediary(alert_rules, ["date_added"])
35 rule_intermediary = CombinedQuerysetIntermediary(
36 Rule.objects.filter(
37 project=project,
38 status=ObjectStatus.ACTIVE,
39 ),
40 ["date_added"],
41 )
42
43 return self.paginate(
44 request,
45 paginator_cls=CombinedQuerysetPaginator,
46 on_results=lambda x: serialize(x, request.user, CombinedRuleSerializer()),
47 default_per_page=25,
48 intermediaries=[alert_rule_intermediary, rule_intermediary],
49 desc=True,
50 )
51
52
53 @region_silo_endpoint
54 class ProjectAlertRuleIndexEndpoint(ProjectEndpoint, AlertRuleIndexMixin):
55 publish_status = {
56 "GET": ApiPublishStatus.UNKNOWN,
57 "POST": ApiPublishStatus.UNKNOWN,
58 }
59 permission_classes = (ProjectAlertRulePermission,)
60
61 def get(self, request: Request, project) -> Response:
62 """
63 Fetches metric alert rules for a project - @deprecated. Use OrganizationAlertRuleIndexEndpoint instead.
64 """
65 return self.fetch_metric_alert(request, project.organization, project)
66
67 def post(self, request: Request, project) -> Response:
68 """
69 Create an alert rule - @deprecated. Use OrganizationAlertRuleIndexEndpoint instead.
70 """
71 return self.create_metric_alert(request, project.organization, project)
72
[end of src/sentry/incidents/endpoints/project_alert_rule_index.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/incidents/endpoints/project_alert_rule_index.py b/src/sentry/incidents/endpoints/project_alert_rule_index.py
--- a/src/sentry/incidents/endpoints/project_alert_rule_index.py
+++ b/src/sentry/incidents/endpoints/project_alert_rule_index.py
@@ -1,5 +1,7 @@
from __future__ import annotations
+from datetime import datetime
+
from rest_framework.request import Request
from rest_framework.response import Response
@@ -7,6 +9,7 @@
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import region_silo_endpoint
from sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint
+from sentry.api.helpers.deprecation import deprecated
from sentry.api.paginator import CombinedQuerysetIntermediary, CombinedQuerysetPaginator
from sentry.api.serializers import CombinedRuleSerializer, serialize
from sentry.constants import ObjectStatus
@@ -18,13 +21,15 @@
@region_silo_endpoint
class ProjectCombinedRuleIndexEndpoint(ProjectEndpoint):
+ DEPRECATION_DATE = datetime.fromisoformat("2024-02-07T00:00:00+00:00:00")
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
}
+ @deprecated(DEPRECATION_DATE, "sentry-api-0-organization-combined-rules")
def get(self, request: Request, project) -> Response:
"""
- Fetches alert rules and legacy rules for a project
+ Fetches alert rules and legacy rules for a project. @deprecated. Use OrganizationCombinedRuleIndexEndpoint instead.
"""
alert_rules = AlertRule.objects.fetch_for_project(project)
if not features.has("organizations:performance-view", project.organization):
| {"golden_diff": "diff --git a/src/sentry/incidents/endpoints/project_alert_rule_index.py b/src/sentry/incidents/endpoints/project_alert_rule_index.py\n--- a/src/sentry/incidents/endpoints/project_alert_rule_index.py\n+++ b/src/sentry/incidents/endpoints/project_alert_rule_index.py\n@@ -1,5 +1,7 @@\n from __future__ import annotations\n \n+from datetime import datetime\n+\n from rest_framework.request import Request\n from rest_framework.response import Response\n \n@@ -7,6 +9,7 @@\n from sentry.api.api_publish_status import ApiPublishStatus\n from sentry.api.base import region_silo_endpoint\n from sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint\n+from sentry.api.helpers.deprecation import deprecated\n from sentry.api.paginator import CombinedQuerysetIntermediary, CombinedQuerysetPaginator\n from sentry.api.serializers import CombinedRuleSerializer, serialize\n from sentry.constants import ObjectStatus\n@@ -18,13 +21,15 @@\n \n @region_silo_endpoint\n class ProjectCombinedRuleIndexEndpoint(ProjectEndpoint):\n+ DEPRECATION_DATE = datetime.fromisoformat(\"2024-02-07T00:00:00+00:00:00\")\n publish_status = {\n \"GET\": ApiPublishStatus.UNKNOWN,\n }\n \n+ @deprecated(DEPRECATION_DATE, \"sentry-api-0-organization-combined-rules\")\n def get(self, request: Request, project) -> Response:\n \"\"\"\n- Fetches alert rules and legacy rules for a project\n+ Fetches alert rules and legacy rules for a project. @deprecated. Use OrganizationCombinedRuleIndexEndpoint instead.\n \"\"\"\n alert_rules = AlertRule.objects.fetch_for_project(project)\n if not features.has(\"organizations:performance-view\", project.organization):\n", "issue": "Deprecate the ProjectCombinedRuleIndexEndpoint\n[GCP API access logs](https://console.cloud.google.com/logs/query;query=resource.type%20%3D%20k8s_container%0Aresource.labels.namespace_name%20%3D%20default%0Aresource.labels.container_name%20%3D%20sentry%0Alabels.name%20%3D%20sentry.access.api%0AjsonPayload.view%3D~%22ProjectCombinedRuleIndexEndpoint%22;summaryFields=:true:32:beginning;lfeCustomFields=jsonPayload%252Fview,jsonPayload%252Forganization_id;cursorTimestamp=2023-09-06T18:29:05.855473577Z;startTime=2023-09-06T16:51:17.461Z;endTime=2023-09-06T23:51:17.461482Z?project=internal-sentry) show that it's not used by us, and only by 2 customers. It's an undocumented endpoint so we can [set the deprecation header](https://www.notion.so/sentry/Sentry-API-Deprecation-Policy-ccbdea15a34c4fdeb50985685adc3368) and get rid of it. \n\nRelated to https://github.com/getsentry/sentry/issues/54005\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom sentry import features\nfrom sentry.api.api_publish_status import ApiPublishStatus\nfrom sentry.api.base import region_silo_endpoint\nfrom sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint\nfrom sentry.api.paginator import CombinedQuerysetIntermediary, CombinedQuerysetPaginator\nfrom sentry.api.serializers import CombinedRuleSerializer, serialize\nfrom sentry.constants import ObjectStatus\nfrom sentry.incidents.endpoints.organization_alert_rule_index import AlertRuleIndexMixin\nfrom sentry.incidents.models import AlertRule\nfrom sentry.models.rule import Rule\nfrom sentry.snuba.dataset import Dataset\n\n\n@region_silo_endpoint\nclass ProjectCombinedRuleIndexEndpoint(ProjectEndpoint):\n publish_status = {\n \"GET\": ApiPublishStatus.UNKNOWN,\n }\n\n def get(self, request: Request, project) -> Response:\n \"\"\"\n Fetches alert rules and legacy rules for a project\n \"\"\"\n alert_rules = AlertRule.objects.fetch_for_project(project)\n if not features.has(\"organizations:performance-view\", project.organization):\n # Filter to only error alert rules\n alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)\n\n alert_rule_intermediary = CombinedQuerysetIntermediary(alert_rules, [\"date_added\"])\n rule_intermediary = CombinedQuerysetIntermediary(\n Rule.objects.filter(\n project=project,\n status=ObjectStatus.ACTIVE,\n ),\n [\"date_added\"],\n )\n\n return self.paginate(\n request,\n paginator_cls=CombinedQuerysetPaginator,\n on_results=lambda x: serialize(x, request.user, CombinedRuleSerializer()),\n default_per_page=25,\n intermediaries=[alert_rule_intermediary, rule_intermediary],\n desc=True,\n )\n\n\n@region_silo_endpoint\nclass ProjectAlertRuleIndexEndpoint(ProjectEndpoint, AlertRuleIndexMixin):\n publish_status = {\n \"GET\": ApiPublishStatus.UNKNOWN,\n \"POST\": ApiPublishStatus.UNKNOWN,\n }\n permission_classes = (ProjectAlertRulePermission,)\n\n def get(self, request: Request, project) -> Response:\n \"\"\"\n Fetches metric alert rules for a project - @deprecated. Use OrganizationAlertRuleIndexEndpoint instead.\n \"\"\"\n return self.fetch_metric_alert(request, project.organization, project)\n\n def post(self, request: Request, project) -> Response:\n \"\"\"\n Create an alert rule - @deprecated. Use OrganizationAlertRuleIndexEndpoint instead.\n \"\"\"\n return self.create_metric_alert(request, project.organization, project)\n", "path": "src/sentry/incidents/endpoints/project_alert_rule_index.py"}]} | 1,582 | 390 |
gh_patches_debug_20394 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1522 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: Collection events for sbazv_de double in calendar
### I Have A Problem With:
A specific source
### What's Your Problem
Collection dates and types are pulled correctly into the calendar but events show up twice per day. Sensor for next collection ('AbfallTermin') reads 'Gelber Sack, Gelber Sack in 7 Tagen'. Config and logs look ok.

### Source (if relevant)
sbazv_de
### Logs
```Shell
no relevant logs
```
### Relevant Configuration
```YAML
waste_collection_schedule:
sources:
- name: sbazv_de
args:
city: Schönefeld
district: Großziethen
street: Kxxxxxxxx
customize:
- type: Restmülltonnen
alias: Restmuell
icon: mdi:trash-can
- type: Gelbe Säcke
alias: GelberSack
icon: mdi:recycle
- type: Papiertonnen
alias: Altpapier
icon: mdi:file-document
- type: Laubsäcke
alias: Laubsack
icon: mdi:trash-can
use_dedicated_calendar: false
dedicated_calendar_title: SBAZV
fetch_time: "04:00"
day_switch_time: "12:00"
sensor:
# ------- Waste Collection Schedule -------
# Nächster Abholtermin
- platform: waste_collection_schedule
name: "AbfallTermin"
value_template: '{{value.types|join(", ")}}{% if value.daysTo == 0 %} Heute{% elif value.daysTo == 1 %} Morgen{% else %} in {{value.daysTo}} Tagen{% endif %}'
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py]
1 import requests
2 from waste_collection_schedule import Collection # type: ignore[attr-defined]
3 from waste_collection_schedule.service.ICS import ICS
4
5 TITLE = "Südbrandenburgischer Abfallzweckverband"
6 DESCRIPTION = "SBAZV Brandenburg, Deutschland"
7 URL = "https://www.sbazv.de"
8 TEST_CASES = {
9 "Wildau": {"city": "wildau", "district": "Wildau", "street": "Miersdorfer Str."}
10 }
11
12 ICON_MAP = {
13 "Restmülltonnen": "mdi:trash-can",
14 "Laubsäcke": "mdi:leaf",
15 "Gelbe Säcke": "mdi:sack",
16 "Papiertonnen": "mdi:package-variant",
17 "Weihnachtsbäume": "mdi:pine-tree",
18 }
19
20 # _LOGGER = logging.getLogger(__name__)
21
22
23 class Source:
24 def __init__(self, city, district, street=None):
25 self._city = city
26 self._district = district
27 self._street = street
28 self._ics = ICS()
29
30 def fetch(self):
31 args = {
32 "city": self._city,
33 "district": self._district,
34 "street": self._street,
35 }
36
37 # get ics file
38 # https://www.sbazv.de/entsorgungstermine/klein.ics?city=Wildau&district=Wildau&street=Miersdorfer+Str.
39 r = requests.get(
40 "https://www.sbazv.de/entsorgungstermine/klein.ics", params=args
41 )
42
43 # parse ics file
44 dates = self._ics.convert(r.text)
45
46 entries = []
47 for d in dates:
48 waste_type = d[1].strip()
49 next_pickup_date = d[0]
50
51 entries.append(
52 Collection(
53 date=next_pickup_date,
54 t=waste_type,
55 icon=ICON_MAP.get(waste_type),
56 )
57 )
58
59 return entries
60
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py
@@ -6,7 +6,12 @@
DESCRIPTION = "SBAZV Brandenburg, Deutschland"
URL = "https://www.sbazv.de"
TEST_CASES = {
- "Wildau": {"city": "wildau", "district": "Wildau", "street": "Miersdorfer Str."}
+ "Wildau": {"city": "wildau", "district": "Wildau", "street": "Miersdorfer Str."},
+ "Schönefeld": {
+ "city": "Schönefeld",
+ "district": "Großziethen",
+ "street": "kleistring",
+ },
}
ICON_MAP = {
@@ -47,7 +52,11 @@
for d in dates:
waste_type = d[1].strip()
next_pickup_date = d[0]
-
+ # remove duplicates
+ if any(
+ e.date == next_pickup_date and e.type == waste_type for e in entries
+ ):
+ continue
entries.append(
Collection(
date=next_pickup_date,
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py\n@@ -6,7 +6,12 @@\n DESCRIPTION = \"SBAZV Brandenburg, Deutschland\"\n URL = \"https://www.sbazv.de\"\n TEST_CASES = {\n- \"Wildau\": {\"city\": \"wildau\", \"district\": \"Wildau\", \"street\": \"Miersdorfer Str.\"}\n+ \"Wildau\": {\"city\": \"wildau\", \"district\": \"Wildau\", \"street\": \"Miersdorfer Str.\"},\n+ \"Sch\u00f6nefeld\": {\n+ \"city\": \"Sch\u00f6nefeld\",\n+ \"district\": \"Gro\u00dfziethen\",\n+ \"street\": \"kleistring\",\n+ },\n }\n \n ICON_MAP = {\n@@ -47,7 +52,11 @@\n for d in dates:\n waste_type = d[1].strip()\n next_pickup_date = d[0]\n-\n+ # remove duplicates\n+ if any(\n+ e.date == next_pickup_date and e.type == waste_type for e in entries\n+ ):\n+ continue\n entries.append(\n Collection(\n date=next_pickup_date,\n", "issue": "[Bug]: Collection events for sbazv_de double in calendar\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nCollection dates and types are pulled correctly into the calendar but events show up twice per day. Sensor for next collection ('AbfallTermin') reads 'Gelber Sack, Gelber Sack in 7 Tagen'. Config and logs look ok.\r\n\r\n\r\n\n\n### Source (if relevant)\n\nsbazv_de\n\n### Logs\n\n```Shell\nno relevant logs\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: sbazv_de\r\n args:\r\n city: Sch\u00f6nefeld\r\n district: Gro\u00dfziethen\r\n street: Kxxxxxxxx\r\n customize:\r\n - type: Restm\u00fclltonnen\r\n alias: Restmuell\r\n icon: mdi:trash-can \r\n - type: Gelbe S\u00e4cke\r\n alias: GelberSack\r\n icon: mdi:recycle\r\n - type: Papiertonnen\r\n alias: Altpapier\r\n icon: mdi:file-document\r\n - type: Laubs\u00e4cke\r\n alias: Laubsack\r\n icon: mdi:trash-can\r\n use_dedicated_calendar: false\r\n dedicated_calendar_title: SBAZV\r\n fetch_time: \"04:00\"\r\n day_switch_time: \"12:00\"\r\n\r\nsensor:\r\n # ------- Waste Collection Schedule ------- \r\n # N\u00e4chster Abholtermin\r\n - platform: waste_collection_schedule\r\n name: \"AbfallTermin\"\r\n value_template: '{{value.types|join(\", \")}}{% if value.daysTo == 0 %} Heute{% elif value.daysTo == 1 %} Morgen{% else %} in {{value.daysTo}} Tagen{% endif %}'\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"S\u00fcdbrandenburgischer Abfallzweckverband\"\nDESCRIPTION = \"SBAZV Brandenburg, Deutschland\"\nURL = \"https://www.sbazv.de\"\nTEST_CASES = {\n \"Wildau\": {\"city\": \"wildau\", \"district\": \"Wildau\", \"street\": \"Miersdorfer Str.\"}\n}\n\nICON_MAP = {\n \"Restm\u00fclltonnen\": \"mdi:trash-can\",\n \"Laubs\u00e4cke\": \"mdi:leaf\",\n \"Gelbe S\u00e4cke\": \"mdi:sack\",\n \"Papiertonnen\": \"mdi:package-variant\",\n \"Weihnachtsb\u00e4ume\": \"mdi:pine-tree\",\n}\n\n# _LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, city, district, street=None):\n self._city = city\n self._district = district\n self._street = street\n self._ics = ICS()\n\n def fetch(self):\n args = {\n \"city\": self._city,\n \"district\": self._district,\n \"street\": self._street,\n }\n\n # get ics file\n # https://www.sbazv.de/entsorgungstermine/klein.ics?city=Wildau&district=Wildau&street=Miersdorfer+Str.\n r = requests.get(\n \"https://www.sbazv.de/entsorgungstermine/klein.ics\", params=args\n )\n\n # parse ics file\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n waste_type = d[1].strip()\n next_pickup_date = d[0]\n\n entries.append(\n Collection(\n date=next_pickup_date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py"}]} | 1,777 | 320 |
gh_patches_debug_57244 | rasdani/github-patches | git_diff | meltano__meltano-6333 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Uvicorn Windows message points to wrong flag
```2022-06-30T19:52:16.704167Z [warning ] Add ff.start_uvicorn: True to your meltano.yml to supress this waring```
Should point to
ff.enable_uvicorn [env: MELTANO_FF_ENABLE_UVICORN] current value: False (default)
Should be an easy one!
</issue>
<code>
[start of src/meltano/api/workers/api_worker.py]
1 """Starts WSGI Webserver that will run the API App for a Meltano Project."""
2 import logging
3 import platform
4 import threading
5
6 from meltano.core.meltano_invoker import MeltanoInvoker
7 from meltano.core.project import Project
8 from meltano.core.project_settings_service import ProjectSettingsService
9 from meltano.core.settings_service import FeatureFlags
10 from meltano.core.utils.pidfile import PIDFile
11
12
13 class APIWorker(threading.Thread):
14 """The Base APIWorker Class."""
15
16 def __init__(self, project: Project, reload=False):
17 """Initialize the API Worker class with the project config.
18
19 Args:
20 project: Project class.
21 reload: Boolean.
22 """
23 super().__init__()
24
25 self.project = project
26 self.reload = reload
27 self.pid_file = PIDFile(self.project.run_dir("gunicorn.pid"))
28 self.settings_service = ProjectSettingsService(self.project.find())
29
30 def run(self):
31 """Run the initalized API Workers with the App Server requested."""
32 with self.settings_service.feature_flag(
33 FeatureFlags.ENABLE_UVICORN, raise_error=False
34 ) as allow:
35
36 enable_uvicorn = allow
37
38 # Use Uvicorn when on Windows
39 if platform.system() == "Windows":
40 if enable_uvicorn:
41 logging.debug("ff.enable_uvicorn enabled, starting uvicorn.")
42 else:
43 logging.warning(
44 "Windows OS detected auto setting ff.enable_uvicorn"
45 )
46 logging.warning(
47 "Add ff.start_uvicorn: True to your meltano.yml to supress this waring"
48 )
49 enable_uvicorn = True
50
51 # Start uvicorn to serve API and Ui
52 if enable_uvicorn:
53 settings_for_apiworker = self.settings_service
54
55 arg_bind_host = str(settings_for_apiworker.get("ui.bind_host"))
56 arg_bind_port = str(settings_for_apiworker.get("ui.bind_port"))
57 arg_loglevel = str(settings_for_apiworker.get("cli.log_level"))
58 arg_forwarded_allow_ips = str(
59 settings_for_apiworker.get("ui.forwarded_allow_ips")
60 )
61
62 # If windows and 127.0.0.1 only allowed changing bind host to accomidate
63 if platform.system() == "Windows":
64 if (
65 arg_forwarded_allow_ips == "127.0.0.1"
66 and arg_bind_host == "0.0.0.0" # noqa: S104
67 ):
68 # If left at 0.0.0.0 the server will respond to any request receieved on any interface
69 arg_bind_host = "127.0.0.1"
70
71 # Setup args for uvicorn using bind info from the project setings service
72 args = [
73 "--host",
74 arg_bind_host,
75 "--port",
76 arg_bind_port,
77 "--loop",
78 "asyncio",
79 "--interface",
80 "wsgi",
81 "--log-level",
82 arg_loglevel,
83 "--forwarded-allow-ips",
84 arg_forwarded_allow_ips,
85 "--timeout-keep-alive",
86 "600",
87 ]
88
89 # Add reload argument if reload is true
90 if self.reload:
91
92 args += [
93 "--reload",
94 ]
95
96 # Add the Meltano API app, factory create_app function combo to the args
97 args += [
98 "--factory",
99 "meltano.api.app:create_app",
100 ]
101
102 # Start uvicorn using the MeltanoInvoker
103 MeltanoInvoker(self.project).invoke(args, command="uvicorn")
104
105 else:
106 # Use Gunicorn when feature flag start_uvicorn is not set
107
108 args = [
109 "--config",
110 "python:meltano.api.wsgi",
111 "--pid",
112 str(self.pid_file),
113 ]
114
115 if self.reload:
116 args += ["--reload"]
117
118 args += ["meltano.api.app:create_app()"]
119
120 MeltanoInvoker(self.project).invoke(args, command="gunicorn")
121
122 def pid_path(self):
123 """Give the path name of the projects gunicorn.pid file location.
124
125 Returns:
126 Path object that gives the direct locationo of the gunicorn.pid file.
127 """
128 return self.project.run_dir("gunicorn.pid")
129
130 def stop(self):
131 """Terminnate active gunicorn workers that have placed a PID in the project's gunicorn.pid file."""
132 self.pid_file.process.terminate()
133
[end of src/meltano/api/workers/api_worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/meltano/api/workers/api_worker.py b/src/meltano/api/workers/api_worker.py
--- a/src/meltano/api/workers/api_worker.py
+++ b/src/meltano/api/workers/api_worker.py
@@ -44,7 +44,7 @@
"Windows OS detected auto setting ff.enable_uvicorn"
)
logging.warning(
- "Add ff.start_uvicorn: True to your meltano.yml to supress this waring"
+ "Add ff.enable_uvicorn: True to your meltano.yml to supress this waring"
)
enable_uvicorn = True
| {"golden_diff": "diff --git a/src/meltano/api/workers/api_worker.py b/src/meltano/api/workers/api_worker.py\n--- a/src/meltano/api/workers/api_worker.py\n+++ b/src/meltano/api/workers/api_worker.py\n@@ -44,7 +44,7 @@\n \"Windows OS detected auto setting ff.enable_uvicorn\"\n )\n logging.warning(\n- \"Add ff.start_uvicorn: True to your meltano.yml to supress this waring\"\n+ \"Add ff.enable_uvicorn: True to your meltano.yml to supress this waring\"\n )\n enable_uvicorn = True\n", "issue": "Uvicorn Windows message points to wrong flag\n```2022-06-30T19:52:16.704167Z [warning ] Add ff.start_uvicorn: True to your meltano.yml to supress this waring```\r\n\r\nShould point to\r\n\r\nff.enable_uvicorn [env: MELTANO_FF_ENABLE_UVICORN] current value: False (default)\r\n\r\nShould be an easy one!\n", "before_files": [{"content": "\"\"\"Starts WSGI Webserver that will run the API App for a Meltano Project.\"\"\"\nimport logging\nimport platform\nimport threading\n\nfrom meltano.core.meltano_invoker import MeltanoInvoker\nfrom meltano.core.project import Project\nfrom meltano.core.project_settings_service import ProjectSettingsService\nfrom meltano.core.settings_service import FeatureFlags\nfrom meltano.core.utils.pidfile import PIDFile\n\n\nclass APIWorker(threading.Thread):\n \"\"\"The Base APIWorker Class.\"\"\"\n\n def __init__(self, project: Project, reload=False):\n \"\"\"Initialize the API Worker class with the project config.\n\n Args:\n project: Project class.\n reload: Boolean.\n \"\"\"\n super().__init__()\n\n self.project = project\n self.reload = reload\n self.pid_file = PIDFile(self.project.run_dir(\"gunicorn.pid\"))\n self.settings_service = ProjectSettingsService(self.project.find())\n\n def run(self):\n \"\"\"Run the initalized API Workers with the App Server requested.\"\"\"\n with self.settings_service.feature_flag(\n FeatureFlags.ENABLE_UVICORN, raise_error=False\n ) as allow:\n\n enable_uvicorn = allow\n\n # Use Uvicorn when on Windows\n if platform.system() == \"Windows\":\n if enable_uvicorn:\n logging.debug(\"ff.enable_uvicorn enabled, starting uvicorn.\")\n else:\n logging.warning(\n \"Windows OS detected auto setting ff.enable_uvicorn\"\n )\n logging.warning(\n \"Add ff.start_uvicorn: True to your meltano.yml to supress this waring\"\n )\n enable_uvicorn = True\n\n # Start uvicorn to serve API and Ui\n if enable_uvicorn:\n settings_for_apiworker = self.settings_service\n\n arg_bind_host = str(settings_for_apiworker.get(\"ui.bind_host\"))\n arg_bind_port = str(settings_for_apiworker.get(\"ui.bind_port\"))\n arg_loglevel = str(settings_for_apiworker.get(\"cli.log_level\"))\n arg_forwarded_allow_ips = str(\n settings_for_apiworker.get(\"ui.forwarded_allow_ips\")\n )\n\n # If windows and 127.0.0.1 only allowed changing bind host to accomidate\n if platform.system() == \"Windows\":\n if (\n arg_forwarded_allow_ips == \"127.0.0.1\"\n and arg_bind_host == \"0.0.0.0\" # noqa: S104\n ):\n # If left at 0.0.0.0 the server will respond to any request receieved on any interface\n arg_bind_host = \"127.0.0.1\"\n\n # Setup args for uvicorn using bind info from the project setings service\n args = [\n \"--host\",\n arg_bind_host,\n \"--port\",\n arg_bind_port,\n \"--loop\",\n \"asyncio\",\n \"--interface\",\n \"wsgi\",\n \"--log-level\",\n arg_loglevel,\n \"--forwarded-allow-ips\",\n arg_forwarded_allow_ips,\n \"--timeout-keep-alive\",\n \"600\",\n ]\n\n # Add reload argument if reload is true\n if self.reload:\n\n args += [\n \"--reload\",\n ]\n\n # Add the Meltano API app, factory create_app function combo to the args\n args += [\n \"--factory\",\n \"meltano.api.app:create_app\",\n ]\n\n # Start uvicorn using the MeltanoInvoker\n MeltanoInvoker(self.project).invoke(args, command=\"uvicorn\")\n\n else:\n # Use Gunicorn when feature flag start_uvicorn is not set\n\n args = [\n \"--config\",\n \"python:meltano.api.wsgi\",\n \"--pid\",\n str(self.pid_file),\n ]\n\n if self.reload:\n args += [\"--reload\"]\n\n args += [\"meltano.api.app:create_app()\"]\n\n MeltanoInvoker(self.project).invoke(args, command=\"gunicorn\")\n\n def pid_path(self):\n \"\"\"Give the path name of the projects gunicorn.pid file location.\n\n Returns:\n Path object that gives the direct locationo of the gunicorn.pid file.\n \"\"\"\n return self.project.run_dir(\"gunicorn.pid\")\n\n def stop(self):\n \"\"\"Terminnate active gunicorn workers that have placed a PID in the project's gunicorn.pid file.\"\"\"\n self.pid_file.process.terminate()\n", "path": "src/meltano/api/workers/api_worker.py"}]} | 1,894 | 140 |
gh_patches_debug_6871 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1495 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The worker should wait all channels are ready.
If the worker has started to train and the grpc channels to connect PS are not ready, a connection error will be raised.
```
grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "failed to connect to all addresses"
debug_error_string = "{"created":"@1574320322.398282300","description":"Failed to pick subchannel","file":"src/core/ext/filters/client_channel/client_channel.cc","file_line":3876,"referenced_errors":[{"created":"@1574320322.398280426","description":"failed to connect to all addresses","file":"src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc","file_line":395,"grpc_status":14}]}"
```
</issue>
<code>
[start of elasticdl/python/worker/main.py]
1 import time
2
3 import grpc
4 from kubernetes import client, config
5
6 from elasticdl.python.common import log_utils
7 from elasticdl.python.common.args import parse_worker_args
8 from elasticdl.python.common.constants import GRPC
9 from elasticdl.python.common.grpc_utils import build_channel
10 from elasticdl.python.worker.worker import Worker
11
12
13 def main():
14 args = parse_worker_args()
15 logger = log_utils.get_logger(__name__)
16 logger.info("Starting worker %d", args.worker_id)
17 if args.master_addr is None:
18 raise ValueError("master_addr is missing for worker")
19
20 master_channel = build_channel(args.master_addr)
21
22 ps_channels = []
23 if args.ps_addrs:
24 # TODO: use ps_addrs from master directly after ps service is working.
25 # Get ps pod ip for ps grpc connection for now.
26 ps_addrs = args.ps_addrs.split(",")
27
28 config.load_incluster_config()
29 api = client.CoreV1Api()
30
31 for addr in ps_addrs:
32 # addr is in the form as "ps-pod-name.namespace.svc:port"
33 addr_splitted = addr.split(".")
34 while True:
35 pod = api.read_namespaced_pod(
36 namespace=addr_splitted[1], name=addr_splitted[0]
37 )
38 if pod.status.pod_ip:
39 break
40 # If ps pod is not ready yet, sleep 2 seconds and try again.
41 time.sleep(2)
42 addr = pod.status.pod_ip + ":" + addr.split(":")[-1]
43 channel = grpc.insecure_channel(
44 addr,
45 options=[
46 (
47 "grpc.max_send_message_length",
48 GRPC.MAX_SEND_MESSAGE_LENGTH,
49 ),
50 (
51 "grpc.max_receive_message_length",
52 GRPC.MAX_RECEIVE_MESSAGE_LENGTH,
53 ),
54 ],
55 )
56 ps_channels.append(channel)
57
58 worker = Worker(args, channel=master_channel, ps_channels=ps_channels)
59 worker.run()
60
61
62 if __name__ == "__main__":
63 main()
64
[end of elasticdl/python/worker/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/python/worker/main.py b/elasticdl/python/worker/main.py
--- a/elasticdl/python/worker/main.py
+++ b/elasticdl/python/worker/main.py
@@ -53,6 +53,13 @@
),
],
)
+
+ # Wait the channel is ready by a Future object.
+ grpc.channel_ready_future(channel).result()
+ logger.info(
+ "grpc channel %s to connect pod %s is ready"
+ % (addr, pod.metadata.name)
+ )
ps_channels.append(channel)
worker = Worker(args, channel=master_channel, ps_channels=ps_channels)
| {"golden_diff": "diff --git a/elasticdl/python/worker/main.py b/elasticdl/python/worker/main.py\n--- a/elasticdl/python/worker/main.py\n+++ b/elasticdl/python/worker/main.py\n@@ -53,6 +53,13 @@\n ),\n ],\n )\n+\n+ # Wait the channel is ready by a Future object.\n+ grpc.channel_ready_future(channel).result()\n+ logger.info(\n+ \"grpc channel %s to connect pod %s is ready\"\n+ % (addr, pod.metadata.name)\n+ )\n ps_channels.append(channel)\n \n worker = Worker(args, channel=master_channel, ps_channels=ps_channels)\n", "issue": "The worker should wait all channels are ready.\nIf the worker has started to train and the grpc channels to connect PS are not ready, a connection error will be raised.\r\n\r\n```\r\ngrpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with:\r\n\tstatus = StatusCode.UNAVAILABLE\r\n\tdetails = \"failed to connect to all addresses\"\r\n\tdebug_error_string = \"{\"created\":\"@1574320322.398282300\",\"description\":\"Failed to pick subchannel\",\"file\":\"src/core/ext/filters/client_channel/client_channel.cc\",\"file_line\":3876,\"referenced_errors\":[{\"created\":\"@1574320322.398280426\",\"description\":\"failed to connect to all addresses\",\"file\":\"src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc\",\"file_line\":395,\"grpc_status\":14}]}\"\r\n```\n", "before_files": [{"content": "import time\n\nimport grpc\nfrom kubernetes import client, config\n\nfrom elasticdl.python.common import log_utils\nfrom elasticdl.python.common.args import parse_worker_args\nfrom elasticdl.python.common.constants import GRPC\nfrom elasticdl.python.common.grpc_utils import build_channel\nfrom elasticdl.python.worker.worker import Worker\n\n\ndef main():\n args = parse_worker_args()\n logger = log_utils.get_logger(__name__)\n logger.info(\"Starting worker %d\", args.worker_id)\n if args.master_addr is None:\n raise ValueError(\"master_addr is missing for worker\")\n\n master_channel = build_channel(args.master_addr)\n\n ps_channels = []\n if args.ps_addrs:\n # TODO: use ps_addrs from master directly after ps service is working.\n # Get ps pod ip for ps grpc connection for now.\n ps_addrs = args.ps_addrs.split(\",\")\n\n config.load_incluster_config()\n api = client.CoreV1Api()\n\n for addr in ps_addrs:\n # addr is in the form as \"ps-pod-name.namespace.svc:port\"\n addr_splitted = addr.split(\".\")\n while True:\n pod = api.read_namespaced_pod(\n namespace=addr_splitted[1], name=addr_splitted[0]\n )\n if pod.status.pod_ip:\n break\n # If ps pod is not ready yet, sleep 2 seconds and try again.\n time.sleep(2)\n addr = pod.status.pod_ip + \":\" + addr.split(\":\")[-1]\n channel = grpc.insecure_channel(\n addr,\n options=[\n (\n \"grpc.max_send_message_length\",\n GRPC.MAX_SEND_MESSAGE_LENGTH,\n ),\n (\n \"grpc.max_receive_message_length\",\n GRPC.MAX_RECEIVE_MESSAGE_LENGTH,\n ),\n ],\n )\n ps_channels.append(channel)\n\n worker = Worker(args, channel=master_channel, ps_channels=ps_channels)\n worker.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/worker/main.py"}]} | 1,291 | 145 |
gh_patches_debug_14345 | rasdani/github-patches | git_diff | buildbot__buildbot-5765 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix reference to tuplematch in base reporters
The tuplematch object was not referenced in the ReporterBase class when referenced from the GitHubStatusPush class.
It seems this is not an issue when it is executed from the test but only occures when called from GitHubStatusPush. I don't know how I can test this.
Fixes #5765
## Contributor Checklist:
* [ ] I have updated the unit tests
* [x] I have created a file in the `master/buildbot/newsfragments` directory (and read the `README.txt` in that directory)
* [x] I have updated the appropriate documentation
</issue>
<code>
[start of master/buildbot/reporters/base.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 import abc
17
18 from twisted.internet import defer
19 from twisted.python import log
20
21 from buildbot import config
22 from buildbot import util
23 from buildbot.reporters import utils
24 from buildbot.util import service
25
26 ENCODING = 'utf-8'
27
28
29 class ReporterBase(service.BuildbotService):
30 name = None
31 __meta__ = abc.ABCMeta
32
33 compare_attrs = ['generators']
34
35 def __init__(self, *args, **kwargs):
36 super().__init__(*args, **kwargs)
37 self.generators = None
38 self._event_consumers = []
39
40 def checkConfig(self, generators):
41 if not isinstance(generators, list):
42 config.error('{}: generators argument must be a list')
43
44 for g in generators:
45 g.check()
46
47 if self.name is None:
48 self.name = self.__class__.__name__
49 for g in generators:
50 self.name += "_" + g.generate_name()
51
52 @defer.inlineCallbacks
53 def reconfigService(self, generators):
54
55 for consumer in self._event_consumers:
56 yield consumer.stopConsuming()
57 self._event_consumers = []
58
59 self.generators = generators
60
61 wanted_event_keys = set()
62 for g in self.generators:
63 wanted_event_keys.update(g.wanted_event_keys)
64
65 for key in sorted(list(wanted_event_keys)):
66 consumer = yield self.master.mq.startConsuming(self._got_event, key)
67 self._event_consumers.append(consumer)
68
69 @defer.inlineCallbacks
70 def stopService(self):
71 for consumer in self._event_consumers:
72 yield consumer.stopConsuming()
73 self._event_consumers = []
74 yield super().stopService()
75
76 def _does_generator_want_key(self, generator, key):
77 for filter in generator.wanted_event_keys:
78 if util.tuplematch.matchTuple(key, filter):
79 return True
80 return False
81
82 @defer.inlineCallbacks
83 def _got_event(self, key, msg):
84 try:
85 reports = []
86 for g in self.generators:
87 if self._does_generator_want_key(g, key):
88 report = yield g.generate(self.master, self, key, msg)
89 if report is not None:
90 reports.append(report)
91
92 if reports:
93 yield self.sendMessage(reports)
94 except Exception as e:
95 log.err(e, 'Got exception when handling reporter events')
96
97 def getResponsibleUsersForBuild(self, master, buildid):
98 # Use library method but subclassers may want to override that
99 return utils.getResponsibleUsersForBuild(master, buildid)
100
101 @abc.abstractmethod
102 def sendMessage(self, reports):
103 pass
104
[end of master/buildbot/reporters/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/master/buildbot/reporters/base.py b/master/buildbot/reporters/base.py
--- a/master/buildbot/reporters/base.py
+++ b/master/buildbot/reporters/base.py
@@ -19,9 +19,9 @@
from twisted.python import log
from buildbot import config
-from buildbot import util
from buildbot.reporters import utils
from buildbot.util import service
+from buildbot.util import tuplematch
ENCODING = 'utf-8'
@@ -75,7 +75,7 @@
def _does_generator_want_key(self, generator, key):
for filter in generator.wanted_event_keys:
- if util.tuplematch.matchTuple(key, filter):
+ if tuplematch.matchTuple(key, filter):
return True
return False
| {"golden_diff": "diff --git a/master/buildbot/reporters/base.py b/master/buildbot/reporters/base.py\n--- a/master/buildbot/reporters/base.py\n+++ b/master/buildbot/reporters/base.py\n@@ -19,9 +19,9 @@\n from twisted.python import log\n \n from buildbot import config\n-from buildbot import util\n from buildbot.reporters import utils\n from buildbot.util import service\n+from buildbot.util import tuplematch\n \n ENCODING = 'utf-8'\n \n@@ -75,7 +75,7 @@\n \n def _does_generator_want_key(self, generator, key):\n for filter in generator.wanted_event_keys:\n- if util.tuplematch.matchTuple(key, filter):\n+ if tuplematch.matchTuple(key, filter):\n return True\n return False\n", "issue": "Fix reference to tuplematch in base reporters\nThe tuplematch object was not referenced in the ReporterBase class when referenced from the GitHubStatusPush class.\r\n\r\nIt seems this is not an issue when it is executed from the test but only occures when called from GitHubStatusPush. I don't know how I can test this.\r\n\r\nFixes #5765 \r\n\r\n\r\n## Contributor Checklist:\r\n\r\n* [ ] I have updated the unit tests\r\n* [x] I have created a file in the `master/buildbot/newsfragments` directory (and read the `README.txt` in that directory)\r\n* [x] I have updated the appropriate documentation\r\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport abc\n\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nfrom buildbot import config\nfrom buildbot import util\nfrom buildbot.reporters import utils\nfrom buildbot.util import service\n\nENCODING = 'utf-8'\n\n\nclass ReporterBase(service.BuildbotService):\n name = None\n __meta__ = abc.ABCMeta\n\n compare_attrs = ['generators']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.generators = None\n self._event_consumers = []\n\n def checkConfig(self, generators):\n if not isinstance(generators, list):\n config.error('{}: generators argument must be a list')\n\n for g in generators:\n g.check()\n\n if self.name is None:\n self.name = self.__class__.__name__\n for g in generators:\n self.name += \"_\" + g.generate_name()\n\n @defer.inlineCallbacks\n def reconfigService(self, generators):\n\n for consumer in self._event_consumers:\n yield consumer.stopConsuming()\n self._event_consumers = []\n\n self.generators = generators\n\n wanted_event_keys = set()\n for g in self.generators:\n wanted_event_keys.update(g.wanted_event_keys)\n\n for key in sorted(list(wanted_event_keys)):\n consumer = yield self.master.mq.startConsuming(self._got_event, key)\n self._event_consumers.append(consumer)\n\n @defer.inlineCallbacks\n def stopService(self):\n for consumer in self._event_consumers:\n yield consumer.stopConsuming()\n self._event_consumers = []\n yield super().stopService()\n\n def _does_generator_want_key(self, generator, key):\n for filter in generator.wanted_event_keys:\n if util.tuplematch.matchTuple(key, filter):\n return True\n return False\n\n @defer.inlineCallbacks\n def _got_event(self, key, msg):\n try:\n reports = []\n for g in self.generators:\n if self._does_generator_want_key(g, key):\n report = yield g.generate(self.master, self, key, msg)\n if report is not None:\n reports.append(report)\n\n if reports:\n yield self.sendMessage(reports)\n except Exception as e:\n log.err(e, 'Got exception when handling reporter events')\n\n def getResponsibleUsersForBuild(self, master, buildid):\n # Use library method but subclassers may want to override that\n return utils.getResponsibleUsersForBuild(master, buildid)\n\n @abc.abstractmethod\n def sendMessage(self, reports):\n pass\n", "path": "master/buildbot/reporters/base.py"}]} | 1,623 | 177 |
gh_patches_debug_8036 | rasdani/github-patches | git_diff | saleor__saleor-2345 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Featured products section should not be shown if there is none
If there is no featured products, the home page should not show the empty section.
### Screenshots

</issue>
<code>
[start of saleor/core/views.py]
1 import json
2
3 from django.contrib import messages
4 from django.template.response import TemplateResponse
5 from django.utils.translation import pgettext_lazy
6 from impersonate.views import impersonate as orig_impersonate
7
8 from ..account.models import User
9 from ..dashboard.views import staff_member_required
10 from ..product.utils import products_for_homepage
11 from ..product.utils.availability import products_with_availability
12 from ..seo.schema.webpage import get_webpage_schema
13
14
15 def home(request):
16 products = products_for_homepage()[:8]
17 products = products_with_availability(
18 products, discounts=request.discounts, taxes=request.taxes,
19 local_currency=request.currency)
20 webpage_schema = get_webpage_schema(request)
21 return TemplateResponse(
22 request, 'home.html', {
23 'parent': None,
24 'products': products,
25 'webpage_schema': json.dumps(webpage_schema)})
26
27
28 @staff_member_required
29 def styleguide(request):
30 return TemplateResponse(request, 'styleguide.html')
31
32
33 def impersonate(request, uid):
34 response = orig_impersonate(request, uid)
35 if request.session.modified:
36 msg = pgettext_lazy(
37 'Impersonation message',
38 'You are now logged as {}'.format(User.objects.get(pk=uid)))
39 messages.success(request, msg)
40 return response
41
42
43 def handle_404(request, exception=None):
44 return TemplateResponse(request, '404.html', status=404)
45
46
47 def manifest(request):
48 return TemplateResponse(
49 request, 'manifest.json', content_type='application/json')
50
[end of saleor/core/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/core/views.py b/saleor/core/views.py
--- a/saleor/core/views.py
+++ b/saleor/core/views.py
@@ -14,9 +14,9 @@
def home(request):
products = products_for_homepage()[:8]
- products = products_with_availability(
+ products = list(products_with_availability(
products, discounts=request.discounts, taxes=request.taxes,
- local_currency=request.currency)
+ local_currency=request.currency))
webpage_schema = get_webpage_schema(request)
return TemplateResponse(
request, 'home.html', {
| {"golden_diff": "diff --git a/saleor/core/views.py b/saleor/core/views.py\n--- a/saleor/core/views.py\n+++ b/saleor/core/views.py\n@@ -14,9 +14,9 @@\n \n def home(request):\n products = products_for_homepage()[:8]\n- products = products_with_availability(\n+ products = list(products_with_availability(\n products, discounts=request.discounts, taxes=request.taxes,\n- local_currency=request.currency)\n+ local_currency=request.currency))\n webpage_schema = get_webpage_schema(request)\n return TemplateResponse(\n request, 'home.html', {\n", "issue": "Featured products section should not be shown if there is none\nIf there is no featured products, the home page should not show the empty section.\r\n\r\n### Screenshots\r\n\r\n\n", "before_files": [{"content": "import json\n\nfrom django.contrib import messages\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import pgettext_lazy\nfrom impersonate.views import impersonate as orig_impersonate\n\nfrom ..account.models import User\nfrom ..dashboard.views import staff_member_required\nfrom ..product.utils import products_for_homepage\nfrom ..product.utils.availability import products_with_availability\nfrom ..seo.schema.webpage import get_webpage_schema\n\n\ndef home(request):\n products = products_for_homepage()[:8]\n products = products_with_availability(\n products, discounts=request.discounts, taxes=request.taxes,\n local_currency=request.currency)\n webpage_schema = get_webpage_schema(request)\n return TemplateResponse(\n request, 'home.html', {\n 'parent': None,\n 'products': products,\n 'webpage_schema': json.dumps(webpage_schema)})\n\n\n@staff_member_required\ndef styleguide(request):\n return TemplateResponse(request, 'styleguide.html')\n\n\ndef impersonate(request, uid):\n response = orig_impersonate(request, uid)\n if request.session.modified:\n msg = pgettext_lazy(\n 'Impersonation message',\n 'You are now logged as {}'.format(User.objects.get(pk=uid)))\n messages.success(request, msg)\n return response\n\n\ndef handle_404(request, exception=None):\n return TemplateResponse(request, '404.html', status=404)\n\n\ndef manifest(request):\n return TemplateResponse(\n request, 'manifest.json', content_type='application/json')\n", "path": "saleor/core/views.py"}]} | 1,005 | 134 |
gh_patches_debug_1146 | rasdani/github-patches | git_diff | locustio__locust-1760 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Locust stopped working after Flast 2.0 got released
in setup.py I can see:
` "flask>=1.1.2", `
I guess it should be hardcoded to ==1.1.2 for now.
it crashes with:
```
File "/root/.local/share/virtualenvs/xxxxxxx/lib/python3.6/site-packages/locust/web.py", line 102, in __init__
app.jinja_options["extensions"].append("jinja2.ext.do")
KeyError: 'extensions'
```
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 import ast
3 import os
4 import re
5 import sys
6
7 from setuptools import find_packages, setup
8
9 ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
10
11 # parse version from locust/__init__.py
12 _version_re = re.compile(r"__version__\s+=\s+(.*)")
13 _init_file = os.path.join(ROOT_PATH, "locust", "__init__.py")
14 with open(_init_file, "rb") as f:
15 version = str(ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1)))
16
17 setup(
18 name="locust",
19 version=version,
20 install_requires=[
21 "gevent>=20.9.0",
22 "flask>=1.1.2",
23 "Werkzeug>=1.0.1",
24 "requests>=2.9.1",
25 "msgpack>=0.6.2",
26 "pyzmq>=16.0.2",
27 "geventhttpclient>=1.4.4",
28 "ConfigArgParse>=1.0",
29 "psutil>=5.6.7",
30 "Flask-BasicAuth>=0.2.0",
31 ],
32 test_suite="locust.test",
33 tests_require=[
34 "cryptography",
35 "mock",
36 "pyquery",
37 ],
38 extras_require={
39 ":sys_platform == 'win32'": ["pywin32"],
40 },
41 )
42
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,7 @@
version=version,
install_requires=[
"gevent>=20.9.0",
- "flask>=1.1.2",
+ "flask==1.1.2",
"Werkzeug>=1.0.1",
"requests>=2.9.1",
"msgpack>=0.6.2",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,7 @@\n version=version,\n install_requires=[\n \"gevent>=20.9.0\",\n- \"flask>=1.1.2\",\n+ \"flask==1.1.2\",\n \"Werkzeug>=1.0.1\",\n \"requests>=2.9.1\",\n \"msgpack>=0.6.2\",\n", "issue": "Locust stopped working after Flast 2.0 got released\nin setup.py I can see:\r\n` \"flask>=1.1.2\", `\r\nI guess it should be hardcoded to ==1.1.2 for now.\r\n\r\nit crashes with:\r\n```\r\nFile \"/root/.local/share/virtualenvs/xxxxxxx/lib/python3.6/site-packages/locust/web.py\", line 102, in __init__\r\napp.jinja_options[\"extensions\"].append(\"jinja2.ext.do\")\r\nKeyError: 'extensions'\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r\"__version__\\s+=\\s+(.*)\")\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, \"rb\") as f:\n version = str(ast.literal_eval(_version_re.search(f.read().decode(\"utf-8\")).group(1)))\n\nsetup(\n name=\"locust\",\n version=version,\n install_requires=[\n \"gevent>=20.9.0\",\n \"flask>=1.1.2\",\n \"Werkzeug>=1.0.1\",\n \"requests>=2.9.1\",\n \"msgpack>=0.6.2\",\n \"pyzmq>=16.0.2\",\n \"geventhttpclient>=1.4.4\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\",\n ],\n test_suite=\"locust.test\",\n tests_require=[\n \"cryptography\",\n \"mock\",\n \"pyquery\",\n ],\n extras_require={\n \":sys_platform == 'win32'\": [\"pywin32\"],\n },\n)\n", "path": "setup.py"}]} | 1,037 | 111 |
gh_patches_debug_33387 | rasdani/github-patches | git_diff | scrapy__scrapy-791 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
_nons function is not used in scrapy.contrib.linkextractors.lxmlhtml
There is either a bug or some stray code in https://github.com/scrapy/scrapy/blob/master/scrapy/contrib/linkextractors/lxmlhtml.py#L37: `tag = _nons(el.tag)` local variable is not used, and so `_nons` function is also unused. @redapple - what was the intended behavior?
</issue>
<code>
[start of scrapy/contrib/linkextractors/lxmlhtml.py]
1 """
2 Link extractor based on lxml.html
3 """
4
5 import re
6 from urlparse import urlparse, urljoin
7
8 import lxml.etree as etree
9
10 from scrapy.selector import Selector
11 from scrapy.link import Link
12 from scrapy.utils.misc import arg_to_iter
13 from scrapy.utils.python import unique as unique_list, str_to_unicode
14 from scrapy.linkextractor import FilteringLinkExtractor
15 from scrapy.utils.response import get_base_url
16
17
18 # from lxml/src/lxml/html/__init__.py
19 XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
20
21 _collect_string_content = etree.XPath("string()")
22
23 def _nons(tag):
24 if isinstance(tag, basestring):
25 if tag[0] == '{' and tag[1:len(XHTML_NAMESPACE)+1] == XHTML_NAMESPACE:
26 return tag.split('}')[-1]
27 return tag
28
29
30 class LxmlParserLinkExtractor(object):
31 def __init__(self, tag="a", attr="href", process=None, unique=False):
32 self.scan_tag = tag if callable(tag) else lambda t: t == tag
33 self.scan_attr = attr if callable(attr) else lambda a: a == attr
34 self.process_attr = process if callable(process) else lambda v: v
35 self.unique = unique
36
37 def _iter_links(self, document):
38 for el in document.iter(etree.Element):
39 tag = _nons(el.tag)
40 if not self.scan_tag(el.tag):
41 continue
42 attribs = el.attrib
43 for attrib in attribs:
44 yield (el, attrib, attribs[attrib])
45
46 def _extract_links(self, selector, response_url, response_encoding, base_url):
47 links = []
48 # hacky way to get the underlying lxml parsed document
49 for el, attr, attr_val in self._iter_links(selector._root):
50 if self.scan_tag(el.tag) and self.scan_attr(attr):
51 # pseudo _root.make_links_absolute(base_url)
52 attr_val = urljoin(base_url, attr_val)
53 url = self.process_attr(attr_val)
54 if url is None:
55 continue
56 if isinstance(url, unicode):
57 url = url.encode(response_encoding)
58 # to fix relative links after process_value
59 url = urljoin(response_url, url)
60 link = Link(url, _collect_string_content(el) or u'',
61 nofollow=True if el.get('rel') == 'nofollow' else False)
62 links.append(link)
63
64 return unique_list(links, key=lambda link: link.url) \
65 if self.unique else links
66
67 def extract_links(self, response):
68 html = Selector(response)
69 base_url = get_base_url(response)
70 return self._extract_links(html, response.url, response.encoding, base_url)
71
72 def _process_links(self, links):
73 """ Normalize and filter extracted links
74
75 The subclass should override it if neccessary
76 """
77 links = unique_list(links, key=lambda link: link.url) if self.unique else links
78 return links
79
80
81 class LxmlLinkExtractor(FilteringLinkExtractor):
82
83 def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
84 tags=('a', 'area'), attrs=('href',), canonicalize=True, unique=True, process_value=None,
85 deny_extensions=None):
86 tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))
87 tag_func = lambda x: x in tags
88 attr_func = lambda x: x in attrs
89 lx = LxmlParserLinkExtractor(tag=tag_func, attr=attr_func,
90 unique=unique, process=process_value)
91
92 super(LxmlLinkExtractor, self).__init__(lx, allow, deny,
93 allow_domains, deny_domains, restrict_xpaths, canonicalize,
94 deny_extensions)
95
96 def extract_links(self, response):
97 html = Selector(response)
98 base_url = get_base_url(response)
99 if self.restrict_xpaths:
100 docs = [subdoc
101 for x in self.restrict_xpaths
102 for subdoc in html.xpath(x)]
103 else:
104 docs = [html]
105 all_links = []
106 for doc in docs:
107 links = self._extract_links(doc, response.url, response.encoding, base_url)
108 all_links.extend(self._process_links(links))
109 return unique_list(all_links)
110
111
[end of scrapy/contrib/linkextractors/lxmlhtml.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/contrib/linkextractors/lxmlhtml.py b/scrapy/contrib/linkextractors/lxmlhtml.py
--- a/scrapy/contrib/linkextractors/lxmlhtml.py
+++ b/scrapy/contrib/linkextractors/lxmlhtml.py
@@ -36,30 +36,30 @@
def _iter_links(self, document):
for el in document.iter(etree.Element):
- tag = _nons(el.tag)
- if not self.scan_tag(el.tag):
+ if not self.scan_tag(_nons(el.tag)):
continue
attribs = el.attrib
for attrib in attribs:
+ if not self.scan_attr(attrib):
+ continue
yield (el, attrib, attribs[attrib])
def _extract_links(self, selector, response_url, response_encoding, base_url):
links = []
# hacky way to get the underlying lxml parsed document
for el, attr, attr_val in self._iter_links(selector._root):
- if self.scan_tag(el.tag) and self.scan_attr(attr):
- # pseudo _root.make_links_absolute(base_url)
- attr_val = urljoin(base_url, attr_val)
- url = self.process_attr(attr_val)
- if url is None:
- continue
- if isinstance(url, unicode):
- url = url.encode(response_encoding)
- # to fix relative links after process_value
- url = urljoin(response_url, url)
- link = Link(url, _collect_string_content(el) or u'',
- nofollow=True if el.get('rel') == 'nofollow' else False)
- links.append(link)
+ # pseudo lxml.html.HtmlElement.make_links_absolute(base_url)
+ attr_val = urljoin(base_url, attr_val)
+ url = self.process_attr(attr_val)
+ if url is None:
+ continue
+ if isinstance(url, unicode):
+ url = url.encode(response_encoding)
+ # to fix relative links after process_value
+ url = urljoin(response_url, url)
+ link = Link(url, _collect_string_content(el) or u'',
+ nofollow=True if el.get('rel') == 'nofollow' else False)
+ links.append(link)
return unique_list(links, key=lambda link: link.url) \
if self.unique else links
| {"golden_diff": "diff --git a/scrapy/contrib/linkextractors/lxmlhtml.py b/scrapy/contrib/linkextractors/lxmlhtml.py\n--- a/scrapy/contrib/linkextractors/lxmlhtml.py\n+++ b/scrapy/contrib/linkextractors/lxmlhtml.py\n@@ -36,30 +36,30 @@\n \n def _iter_links(self, document):\n for el in document.iter(etree.Element):\n- tag = _nons(el.tag)\n- if not self.scan_tag(el.tag):\n+ if not self.scan_tag(_nons(el.tag)):\n continue\n attribs = el.attrib\n for attrib in attribs:\n+ if not self.scan_attr(attrib):\n+ continue\n yield (el, attrib, attribs[attrib])\n \n def _extract_links(self, selector, response_url, response_encoding, base_url):\n links = []\n # hacky way to get the underlying lxml parsed document\n for el, attr, attr_val in self._iter_links(selector._root):\n- if self.scan_tag(el.tag) and self.scan_attr(attr):\n- # pseudo _root.make_links_absolute(base_url)\n- attr_val = urljoin(base_url, attr_val)\n- url = self.process_attr(attr_val)\n- if url is None:\n- continue\n- if isinstance(url, unicode):\n- url = url.encode(response_encoding)\n- # to fix relative links after process_value\n- url = urljoin(response_url, url)\n- link = Link(url, _collect_string_content(el) or u'',\n- nofollow=True if el.get('rel') == 'nofollow' else False)\n- links.append(link)\n+ # pseudo lxml.html.HtmlElement.make_links_absolute(base_url)\n+ attr_val = urljoin(base_url, attr_val)\n+ url = self.process_attr(attr_val)\n+ if url is None:\n+ continue\n+ if isinstance(url, unicode):\n+ url = url.encode(response_encoding)\n+ # to fix relative links after process_value\n+ url = urljoin(response_url, url)\n+ link = Link(url, _collect_string_content(el) or u'',\n+ nofollow=True if el.get('rel') == 'nofollow' else False)\n+ links.append(link)\n \n return unique_list(links, key=lambda link: link.url) \\\n if self.unique else links\n", "issue": "_nons function is not used in scrapy.contrib.linkextractors.lxmlhtml\nThere is either a bug or some stray code in https://github.com/scrapy/scrapy/blob/master/scrapy/contrib/linkextractors/lxmlhtml.py#L37: `tag = _nons(el.tag)` local variable is not used, and so `_nons` function is also unused. @redapple - what was the intended behavior?\n\n", "before_files": [{"content": "\"\"\"\nLink extractor based on lxml.html\n\"\"\"\n\nimport re\nfrom urlparse import urlparse, urljoin\n\nimport lxml.etree as etree\n\nfrom scrapy.selector import Selector\nfrom scrapy.link import Link\nfrom scrapy.utils.misc import arg_to_iter\nfrom scrapy.utils.python import unique as unique_list, str_to_unicode\nfrom scrapy.linkextractor import FilteringLinkExtractor\nfrom scrapy.utils.response import get_base_url\n\n\n# from lxml/src/lxml/html/__init__.py\nXHTML_NAMESPACE = \"http://www.w3.org/1999/xhtml\"\n\n_collect_string_content = etree.XPath(\"string()\")\n\ndef _nons(tag):\n if isinstance(tag, basestring):\n if tag[0] == '{' and tag[1:len(XHTML_NAMESPACE)+1] == XHTML_NAMESPACE:\n return tag.split('}')[-1]\n return tag\n\n\nclass LxmlParserLinkExtractor(object):\n def __init__(self, tag=\"a\", attr=\"href\", process=None, unique=False):\n self.scan_tag = tag if callable(tag) else lambda t: t == tag\n self.scan_attr = attr if callable(attr) else lambda a: a == attr\n self.process_attr = process if callable(process) else lambda v: v\n self.unique = unique\n\n def _iter_links(self, document):\n for el in document.iter(etree.Element):\n tag = _nons(el.tag)\n if not self.scan_tag(el.tag):\n continue\n attribs = el.attrib\n for attrib in attribs:\n yield (el, attrib, attribs[attrib])\n\n def _extract_links(self, selector, response_url, response_encoding, base_url):\n links = []\n # hacky way to get the underlying lxml parsed document\n for el, attr, attr_val in self._iter_links(selector._root):\n if self.scan_tag(el.tag) and self.scan_attr(attr):\n # pseudo _root.make_links_absolute(base_url)\n attr_val = urljoin(base_url, attr_val)\n url = self.process_attr(attr_val)\n if url is None:\n continue\n if isinstance(url, unicode):\n url = url.encode(response_encoding)\n # to fix relative links after process_value\n url = urljoin(response_url, url)\n link = Link(url, _collect_string_content(el) or u'',\n nofollow=True if el.get('rel') == 'nofollow' else False)\n links.append(link)\n\n return unique_list(links, key=lambda link: link.url) \\\n if self.unique else links\n\n def extract_links(self, response):\n html = Selector(response)\n base_url = get_base_url(response)\n return self._extract_links(html, response.url, response.encoding, base_url)\n\n def _process_links(self, links):\n \"\"\" Normalize and filter extracted links\n\n The subclass should override it if neccessary\n \"\"\"\n links = unique_list(links, key=lambda link: link.url) if self.unique else links\n return links\n\n\nclass LxmlLinkExtractor(FilteringLinkExtractor):\n\n def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),\n tags=('a', 'area'), attrs=('href',), canonicalize=True, unique=True, process_value=None,\n deny_extensions=None):\n tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))\n tag_func = lambda x: x in tags\n attr_func = lambda x: x in attrs\n lx = LxmlParserLinkExtractor(tag=tag_func, attr=attr_func,\n unique=unique, process=process_value)\n\n super(LxmlLinkExtractor, self).__init__(lx, allow, deny,\n allow_domains, deny_domains, restrict_xpaths, canonicalize,\n deny_extensions)\n\n def extract_links(self, response):\n html = Selector(response)\n base_url = get_base_url(response)\n if self.restrict_xpaths:\n docs = [subdoc\n for x in self.restrict_xpaths\n for subdoc in html.xpath(x)]\n else:\n docs = [html]\n all_links = []\n for doc in docs:\n links = self._extract_links(doc, response.url, response.encoding, base_url)\n all_links.extend(self._process_links(links))\n return unique_list(all_links)\n\n", "path": "scrapy/contrib/linkextractors/lxmlhtml.py"}]} | 1,778 | 511 |
gh_patches_debug_4157 | rasdani/github-patches | git_diff | GPflow__GPflow-648 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reproducibility using seeds
Been running some experiments, and it seems I cannot get the same reproducible result even when setting numpy and tensorflow seeds at the top of the script like
```
np.random.seed(72)
tf.set_random_seed(72)
```
Naturally, either I'm setting them wrong or I'm missing a RNG. I call Param.randomize() and Model.optimize(), and they seem to be the only sources of randomness at the moment (and I'm not even sure about the latter) so one of them is likely the culprit. Is there any way to either set the seed appropriately without messing with the source code or maybe adding an option allowing seeds to be set in the future?
</issue>
<code>
[start of gpflow/training/optimizer.py]
1 # Copyright 2017 Artem Artemev @awav
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # pylint: disable=no-self-use
16 # pylint: disable=too-few-public-methods
17
18 import abc
19
20
21 class Optimizer:
22 @abc.abstractmethod
23 def make_optimize_tensor(self, model, session=None, var_list=None, **kwargs):
24 """
25 Make optimization tensor.
26 The `make_optimize_tensor` method builds optimization tensor and initializes
27 all necessary variables created by optimizer.
28
29 :param model: GPflow model.
30 :param session: Tensorflow session.
31 :param var_list: List of variables for training.
32 :param kwargs: Dictionary of extra parameters necessary for building
33 optimizer tensor.
34 :return: Tensorflow optimization tensor or operation.
35 """
36 pass
37
38 @abc.abstractmethod
39 def minimize(self, model, session=None, var_list=None, feed_dict=None,
40 maxiter=1000, initialize=True, anchor=True, **kwargs):
41 raise NotImplementedError()
42
43 @staticmethod
44 def _gen_var_list(model, var_list):
45 var_list = var_list or []
46 return list(set(model.trainable_tensors).union(var_list))
47
48 @staticmethod
49 def _gen_feed_dict(model, feed_dict):
50 feed_dict = feed_dict or {}
51 model_feeds = {} if model.feeds is None else model.feeds
52 feed_dict.update(model_feeds)
53 if feed_dict == {}:
54 return None
55 return feed_dict
56
[end of gpflow/training/optimizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gpflow/training/optimizer.py b/gpflow/training/optimizer.py
--- a/gpflow/training/optimizer.py
+++ b/gpflow/training/optimizer.py
@@ -43,7 +43,8 @@
@staticmethod
def _gen_var_list(model, var_list):
var_list = var_list or []
- return list(set(model.trainable_tensors).union(var_list))
+ all_vars = list(set(model.trainable_tensors).union(var_list))
+ return sorted(all_vars, key=lambda x: x.name)
@staticmethod
def _gen_feed_dict(model, feed_dict):
| {"golden_diff": "diff --git a/gpflow/training/optimizer.py b/gpflow/training/optimizer.py\n--- a/gpflow/training/optimizer.py\n+++ b/gpflow/training/optimizer.py\n@@ -43,7 +43,8 @@\n @staticmethod\n def _gen_var_list(model, var_list):\n var_list = var_list or []\n- return list(set(model.trainable_tensors).union(var_list))\n+ all_vars = list(set(model.trainable_tensors).union(var_list))\n+ return sorted(all_vars, key=lambda x: x.name)\n \n @staticmethod\n def _gen_feed_dict(model, feed_dict):\n", "issue": "Reproducibility using seeds\nBeen running some experiments, and it seems I cannot get the same reproducible result even when setting numpy and tensorflow seeds at the top of the script like\r\n\r\n```\r\nnp.random.seed(72)\r\ntf.set_random_seed(72)\r\n```\r\n\r\nNaturally, either I'm setting them wrong or I'm missing a RNG. I call Param.randomize() and Model.optimize(), and they seem to be the only sources of randomness at the moment (and I'm not even sure about the latter) so one of them is likely the culprit. Is there any way to either set the seed appropriately without messing with the source code or maybe adding an option allowing seeds to be set in the future?\n", "before_files": [{"content": "# Copyright 2017 Artem Artemev @awav\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: disable=no-self-use\n# pylint: disable=too-few-public-methods\n\nimport abc\n\n\nclass Optimizer:\n @abc.abstractmethod\n def make_optimize_tensor(self, model, session=None, var_list=None, **kwargs):\n \"\"\"\n Make optimization tensor.\n The `make_optimize_tensor` method builds optimization tensor and initializes\n all necessary variables created by optimizer.\n\n :param model: GPflow model.\n :param session: Tensorflow session.\n :param var_list: List of variables for training.\n :param kwargs: Dictionary of extra parameters necessary for building\n optimizer tensor.\n :return: Tensorflow optimization tensor or operation.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def minimize(self, model, session=None, var_list=None, feed_dict=None,\n maxiter=1000, initialize=True, anchor=True, **kwargs):\n raise NotImplementedError()\n\n @staticmethod\n def _gen_var_list(model, var_list):\n var_list = var_list or []\n return list(set(model.trainable_tensors).union(var_list))\n\n @staticmethod\n def _gen_feed_dict(model, feed_dict):\n feed_dict = feed_dict or {}\n model_feeds = {} if model.feeds is None else model.feeds\n feed_dict.update(model_feeds)\n if feed_dict == {}:\n return None\n return feed_dict\n", "path": "gpflow/training/optimizer.py"}]} | 1,222 | 140 |
gh_patches_debug_3024 | rasdani/github-patches | git_diff | MongoEngine__mongoengine-1454 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rename modifier missing from update
Not sure if this is intentional or not but it would be useful to have the `$rename` operator (or "modifier" for the update method for QuerySet and Document) available.
I'm currently working around it with `exec_js`, like so:
``` python
Document.objects.exec_js("""
function() {
db[collection].update({}, {$rename: {foo: 'bar'}});
}""")
```
</issue>
<code>
[start of mongoengine/base/common.py]
1 from mongoengine.errors import NotRegistered
2
3 __all__ = ('UPDATE_OPERATORS', 'get_document', '_document_registry')
4
5
6 UPDATE_OPERATORS = set(['set', 'unset', 'inc', 'dec', 'pop', 'push',
7 'push_all', 'pull', 'pull_all', 'add_to_set',
8 'set_on_insert', 'min', 'max'])
9
10
11 _document_registry = {}
12
13
14 def get_document(name):
15 """Get a document class by name."""
16 doc = _document_registry.get(name, None)
17 if not doc:
18 # Possible old style name
19 single_end = name.split('.')[-1]
20 compound_end = '.%s' % single_end
21 possible_match = [k for k in _document_registry.keys()
22 if k.endswith(compound_end) or k == single_end]
23 if len(possible_match) == 1:
24 doc = _document_registry.get(possible_match.pop(), None)
25 if not doc:
26 raise NotRegistered("""
27 `%s` has not been registered in the document registry.
28 Importing the document class automatically registers it, has it
29 been imported?
30 """.strip() % name)
31 return doc
32
[end of mongoengine/base/common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mongoengine/base/common.py b/mongoengine/base/common.py
--- a/mongoengine/base/common.py
+++ b/mongoengine/base/common.py
@@ -5,7 +5,7 @@
UPDATE_OPERATORS = set(['set', 'unset', 'inc', 'dec', 'pop', 'push',
'push_all', 'pull', 'pull_all', 'add_to_set',
- 'set_on_insert', 'min', 'max'])
+ 'set_on_insert', 'min', 'max', 'rename'])
_document_registry = {}
| {"golden_diff": "diff --git a/mongoengine/base/common.py b/mongoengine/base/common.py\n--- a/mongoengine/base/common.py\n+++ b/mongoengine/base/common.py\n@@ -5,7 +5,7 @@\n \n UPDATE_OPERATORS = set(['set', 'unset', 'inc', 'dec', 'pop', 'push',\n 'push_all', 'pull', 'pull_all', 'add_to_set',\n- 'set_on_insert', 'min', 'max'])\n+ 'set_on_insert', 'min', 'max', 'rename'])\n \n \n _document_registry = {}\n", "issue": "Rename modifier missing from update\nNot sure if this is intentional or not but it would be useful to have the `$rename` operator (or \"modifier\" for the update method for QuerySet and Document) available.\n\nI'm currently working around it with `exec_js`, like so:\n\n``` python\nDocument.objects.exec_js(\"\"\"\nfunction() {\n db[collection].update({}, {$rename: {foo: 'bar'}});\n}\"\"\")\n```\n\n", "before_files": [{"content": "from mongoengine.errors import NotRegistered\n\n__all__ = ('UPDATE_OPERATORS', 'get_document', '_document_registry')\n\n\nUPDATE_OPERATORS = set(['set', 'unset', 'inc', 'dec', 'pop', 'push',\n 'push_all', 'pull', 'pull_all', 'add_to_set',\n 'set_on_insert', 'min', 'max'])\n\n\n_document_registry = {}\n\n\ndef get_document(name):\n \"\"\"Get a document class by name.\"\"\"\n doc = _document_registry.get(name, None)\n if not doc:\n # Possible old style name\n single_end = name.split('.')[-1]\n compound_end = '.%s' % single_end\n possible_match = [k for k in _document_registry.keys()\n if k.endswith(compound_end) or k == single_end]\n if len(possible_match) == 1:\n doc = _document_registry.get(possible_match.pop(), None)\n if not doc:\n raise NotRegistered(\"\"\"\n `%s` has not been registered in the document registry.\n Importing the document class automatically registers it, has it\n been imported?\n \"\"\".strip() % name)\n return doc\n", "path": "mongoengine/base/common.py"}]} | 932 | 123 |
gh_patches_debug_11130 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1423 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Breaking change in sync httpx refactor from #1403
elastic/apm-agent-python#1403 refactored a lot of the httpx code. However it seems that the sync version of the instrumentation still calls `self._set_disttracing_headers` which was removed in the same PR, causing any form of sync httpx instrumentation to crash.
**Environment (please complete the following information)**
- Agent version: 6.7.1
</issue>
<code>
[start of elasticapm/instrumentation/packages/httpx/sync/httpcore.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2021, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
32 from elasticapm.instrumentation.packages.httpx import utils
33 from elasticapm.traces import DroppedSpan, capture_span, execution_context
34 from elasticapm.utils import default_ports
35 from elasticapm.utils.disttracing import TracingOptions
36
37
38 class HTTPCoreInstrumentation(AbstractInstrumentedModule):
39 name = "httpcore"
40
41 instrument_list = [
42 ("httpcore._sync.connection", "SyncHTTPConnection.request"), # < httpcore 0.13
43 ("httpcore._sync.connection", "SyncHTTPConnection.handle_request"), # >= httpcore 0.13
44 ("httpcore._sync.connection", "HTTPConnection.handle_request"), # httpcore >= 0.14 (hopefully...)
45 ]
46
47 def call(self, module, method, wrapped, instance, args, kwargs):
48 url, method, headers = utils.get_request_data(args, kwargs)
49 scheme, host, port, target = url
50 if port != default_ports.get(scheme):
51 host += ":" + str(port)
52
53 signature = "%s %s" % (method.upper(), host)
54
55 url = "%s://%s%s" % (scheme, host, target)
56
57 transaction = execution_context.get_transaction()
58
59 with capture_span(
60 signature,
61 span_type="external",
62 span_subtype="http",
63 extra={"http": {"url": url}},
64 leaf=True,
65 ) as span:
66 # if httpcore has been called in a leaf span, this span might be a DroppedSpan.
67 leaf_span = span
68 while isinstance(leaf_span, DroppedSpan):
69 leaf_span = leaf_span.parent
70
71 if headers is not None:
72 # It's possible that there are only dropped spans, e.g. if we started dropping spans.
73 # In this case, the transaction.id is used
74 parent_id = leaf_span.id if leaf_span else transaction.id
75 trace_parent = transaction.trace_parent.copy_from(
76 span_id=parent_id, trace_options=TracingOptions(recorded=True)
77 )
78 utils.set_disttracing_headers(headers, trace_parent, transaction)
79 if leaf_span:
80 leaf_span.dist_tracing_propagated = True
81 response = wrapped(*args, **kwargs)
82 status_code = utils.get_status(response)
83 if status_code:
84 if span.context:
85 span.context["http"]["status_code"] = status_code
86 span.set_success() if status_code < 400 else span.set_failure()
87 return response
88
89 def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):
90 # since we don't have a span, we set the span id to the transaction id
91 trace_parent = transaction.trace_parent.copy_from(
92 span_id=transaction.id, trace_options=TracingOptions(recorded=False)
93 )
94 if "headers" in kwargs:
95 headers = kwargs["headers"]
96 if headers is None:
97 headers = []
98 kwargs["headers"] = headers
99 self._set_disttracing_headers(headers, trace_parent, transaction)
100 return args, kwargs
101
[end of elasticapm/instrumentation/packages/httpx/sync/httpcore.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/instrumentation/packages/httpx/sync/httpcore.py b/elasticapm/instrumentation/packages/httpx/sync/httpcore.py
--- a/elasticapm/instrumentation/packages/httpx/sync/httpcore.py
+++ b/elasticapm/instrumentation/packages/httpx/sync/httpcore.py
@@ -91,10 +91,6 @@
trace_parent = transaction.trace_parent.copy_from(
span_id=transaction.id, trace_options=TracingOptions(recorded=False)
)
- if "headers" in kwargs:
- headers = kwargs["headers"]
- if headers is None:
- headers = []
- kwargs["headers"] = headers
- self._set_disttracing_headers(headers, trace_parent, transaction)
+ headers = utils.get_request_data(args, kwargs)[2]
+ utils.set_disttracing_headers(headers, trace_parent, transaction)
return args, kwargs
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/httpx/sync/httpcore.py b/elasticapm/instrumentation/packages/httpx/sync/httpcore.py\n--- a/elasticapm/instrumentation/packages/httpx/sync/httpcore.py\n+++ b/elasticapm/instrumentation/packages/httpx/sync/httpcore.py\n@@ -91,10 +91,6 @@\n trace_parent = transaction.trace_parent.copy_from(\n span_id=transaction.id, trace_options=TracingOptions(recorded=False)\n )\n- if \"headers\" in kwargs:\n- headers = kwargs[\"headers\"]\n- if headers is None:\n- headers = []\n- kwargs[\"headers\"] = headers\n- self._set_disttracing_headers(headers, trace_parent, transaction)\n+ headers = utils.get_request_data(args, kwargs)[2]\n+ utils.set_disttracing_headers(headers, trace_parent, transaction)\n return args, kwargs\n", "issue": "Breaking change in sync httpx refactor from #1403\nelastic/apm-agent-python#1403 refactored a lot of the httpx code. However it seems that the sync version of the instrumentation still calls `self._set_disttracing_headers` which was removed in the same PR, causing any form of sync httpx instrumentation to crash.\r\n\r\n**Environment (please complete the following information)**\r\n- Agent version: 6.7.1\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2021, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.instrumentation.packages.httpx import utils\nfrom elasticapm.traces import DroppedSpan, capture_span, execution_context\nfrom elasticapm.utils import default_ports\nfrom elasticapm.utils.disttracing import TracingOptions\n\n\nclass HTTPCoreInstrumentation(AbstractInstrumentedModule):\n name = \"httpcore\"\n\n instrument_list = [\n (\"httpcore._sync.connection\", \"SyncHTTPConnection.request\"), # < httpcore 0.13\n (\"httpcore._sync.connection\", \"SyncHTTPConnection.handle_request\"), # >= httpcore 0.13\n (\"httpcore._sync.connection\", \"HTTPConnection.handle_request\"), # httpcore >= 0.14 (hopefully...)\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n url, method, headers = utils.get_request_data(args, kwargs)\n scheme, host, port, target = url\n if port != default_ports.get(scheme):\n host += \":\" + str(port)\n\n signature = \"%s %s\" % (method.upper(), host)\n\n url = \"%s://%s%s\" % (scheme, host, target)\n\n transaction = execution_context.get_transaction()\n\n with capture_span(\n signature,\n span_type=\"external\",\n span_subtype=\"http\",\n extra={\"http\": {\"url\": url}},\n leaf=True,\n ) as span:\n # if httpcore has been called in a leaf span, this span might be a DroppedSpan.\n leaf_span = span\n while isinstance(leaf_span, DroppedSpan):\n leaf_span = leaf_span.parent\n\n if headers is not None:\n # It's possible that there are only dropped spans, e.g. if we started dropping spans.\n # In this case, the transaction.id is used\n parent_id = leaf_span.id if leaf_span else transaction.id\n trace_parent = transaction.trace_parent.copy_from(\n span_id=parent_id, trace_options=TracingOptions(recorded=True)\n )\n utils.set_disttracing_headers(headers, trace_parent, transaction)\n if leaf_span:\n leaf_span.dist_tracing_propagated = True\n response = wrapped(*args, **kwargs)\n status_code = utils.get_status(response)\n if status_code:\n if span.context:\n span.context[\"http\"][\"status_code\"] = status_code\n span.set_success() if status_code < 400 else span.set_failure()\n return response\n\n def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):\n # since we don't have a span, we set the span id to the transaction id\n trace_parent = transaction.trace_parent.copy_from(\n span_id=transaction.id, trace_options=TracingOptions(recorded=False)\n )\n if \"headers\" in kwargs:\n headers = kwargs[\"headers\"]\n if headers is None:\n headers = []\n kwargs[\"headers\"] = headers\n self._set_disttracing_headers(headers, trace_parent, transaction)\n return args, kwargs\n", "path": "elasticapm/instrumentation/packages/httpx/sync/httpcore.py"}]} | 1,855 | 204 |
gh_patches_debug_28407 | rasdani/github-patches | git_diff | pantsbuild__pants-16295 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Not able to load resources when using pants vs sbt
**Describe the bug**
When using sbt we are able to call `Thread.currentThread().getContextClassLoader().getResources` and get a list of URLs. When using pants the list is empty.
This at the moment limits us from using Flyway with pants.
**Pants version**
2.13.0a1 and main.
**OS**
MacOS
**Additional info**
Example repo to reproduce the issue:
https://github.com/somdoron/test-pants-resources
I think the issue is, that pants only compress files in the resources zip file and not the directories.
</issue>
<code>
[start of src/python/pants/jvm/resources.py]
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3 import itertools
4 import logging
5 from itertools import chain
6
7 from pants.core.target_types import ResourcesFieldSet, ResourcesGeneratorFieldSet
8 from pants.core.util_rules import stripped_source_files
9 from pants.core.util_rules.source_files import SourceFilesRequest
10 from pants.core.util_rules.stripped_source_files import StrippedSourceFiles
11 from pants.core.util_rules.system_binaries import ZipBinary
12 from pants.engine.fs import Digest, MergeDigests
13 from pants.engine.internals.selectors import MultiGet
14 from pants.engine.process import Process, ProcessResult
15 from pants.engine.rules import Get, collect_rules, rule
16 from pants.engine.target import SourcesField
17 from pants.engine.unions import UnionRule
18 from pants.jvm import compile
19 from pants.jvm.compile import (
20 ClasspathDependenciesRequest,
21 ClasspathEntry,
22 ClasspathEntryRequest,
23 ClasspathEntryRequests,
24 CompileResult,
25 FallibleClasspathEntries,
26 FallibleClasspathEntry,
27 )
28
29 logger = logging.getLogger(__name__)
30
31
32 class JvmResourcesRequest(ClasspathEntryRequest):
33 field_sets = (
34 ResourcesFieldSet,
35 ResourcesGeneratorFieldSet,
36 )
37
38
39 @rule(desc="Assemble resources")
40 async def assemble_resources_jar(
41 zip: ZipBinary,
42 request: JvmResourcesRequest,
43 ) -> FallibleClasspathEntry:
44 # Request the component's direct dependency classpath, and additionally any prerequisite.
45 # Filter out any dependencies that are generated by our current target so that each resource
46 # only appears in a single input JAR.
47 # NOTE: Generated dependencies will have the same dependencies as the current target, so we
48 # don't need to inspect those dependencies.
49 optional_prereq_request = [*((request.prerequisite,) if request.prerequisite else ())]
50 fallibles = await MultiGet(
51 Get(FallibleClasspathEntries, ClasspathEntryRequests(optional_prereq_request)),
52 Get(FallibleClasspathEntries, ClasspathDependenciesRequest(request, ignore_generated=True)),
53 )
54 direct_dependency_classpath_entries = FallibleClasspathEntries(
55 itertools.chain(*fallibles)
56 ).if_all_succeeded()
57
58 if direct_dependency_classpath_entries is None:
59 return FallibleClasspathEntry(
60 description=str(request.component),
61 result=CompileResult.DEPENDENCY_FAILED,
62 output=None,
63 exit_code=1,
64 )
65
66 source_files = await Get(
67 StrippedSourceFiles,
68 SourceFilesRequest([tgt.get(SourcesField) for tgt in request.component.members]),
69 )
70
71 output_filename = f"{request.component.representative.address.path_safe_spec}.resources.jar"
72 output_files = [output_filename]
73
74 resources_jar_input_digest = source_files.snapshot.digest
75 resources_jar_result = await Get(
76 ProcessResult,
77 Process(
78 argv=[
79 zip.path,
80 output_filename,
81 *source_files.snapshot.files,
82 ],
83 description="Build partial JAR containing resources files",
84 input_digest=resources_jar_input_digest,
85 output_files=output_files,
86 ),
87 )
88
89 cpe = ClasspathEntry(resources_jar_result.output_digest, output_files, [])
90
91 merged_cpe_digest = await Get(
92 Digest,
93 MergeDigests(chain((cpe.digest,), (i.digest for i in direct_dependency_classpath_entries))),
94 )
95
96 merged_cpe = ClasspathEntry.merge(
97 digest=merged_cpe_digest, entries=[cpe, *direct_dependency_classpath_entries]
98 )
99
100 return FallibleClasspathEntry(output_filename, CompileResult.SUCCEEDED, merged_cpe, 0)
101
102
103 def rules():
104 return [
105 *collect_rules(),
106 *compile.rules(),
107 *stripped_source_files.rules(),
108 UnionRule(ClasspathEntryRequest, JvmResourcesRequest),
109 ]
110
[end of src/python/pants/jvm/resources.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/jvm/resources.py b/src/python/pants/jvm/resources.py
--- a/src/python/pants/jvm/resources.py
+++ b/src/python/pants/jvm/resources.py
@@ -3,6 +3,7 @@
import itertools
import logging
from itertools import chain
+from pathlib import Path
from pants.core.target_types import ResourcesFieldSet, ResourcesGeneratorFieldSet
from pants.core.util_rules import stripped_source_files
@@ -71,6 +72,13 @@
output_filename = f"{request.component.representative.address.path_safe_spec}.resources.jar"
output_files = [output_filename]
+ # #16231: Valid JAR files need the directories of each resource file as well as the files
+ # themselves.
+
+ paths = {Path(filename) for filename in source_files.snapshot.files}
+ directories = {parent for path in paths for parent in path.parents}
+ input_files = {str(path) for path in chain(paths, directories)}
+
resources_jar_input_digest = source_files.snapshot.digest
resources_jar_result = await Get(
ProcessResult,
@@ -78,7 +86,7 @@
argv=[
zip.path,
output_filename,
- *source_files.snapshot.files,
+ *sorted(input_files),
],
description="Build partial JAR containing resources files",
input_digest=resources_jar_input_digest,
| {"golden_diff": "diff --git a/src/python/pants/jvm/resources.py b/src/python/pants/jvm/resources.py\n--- a/src/python/pants/jvm/resources.py\n+++ b/src/python/pants/jvm/resources.py\n@@ -3,6 +3,7 @@\n import itertools\n import logging\n from itertools import chain\n+from pathlib import Path\n \n from pants.core.target_types import ResourcesFieldSet, ResourcesGeneratorFieldSet\n from pants.core.util_rules import stripped_source_files\n@@ -71,6 +72,13 @@\n output_filename = f\"{request.component.representative.address.path_safe_spec}.resources.jar\"\n output_files = [output_filename]\n \n+ # #16231: Valid JAR files need the directories of each resource file as well as the files\n+ # themselves.\n+\n+ paths = {Path(filename) for filename in source_files.snapshot.files}\n+ directories = {parent for path in paths for parent in path.parents}\n+ input_files = {str(path) for path in chain(paths, directories)}\n+\n resources_jar_input_digest = source_files.snapshot.digest\n resources_jar_result = await Get(\n ProcessResult,\n@@ -78,7 +86,7 @@\n argv=[\n zip.path,\n output_filename,\n- *source_files.snapshot.files,\n+ *sorted(input_files),\n ],\n description=\"Build partial JAR containing resources files\",\n input_digest=resources_jar_input_digest,\n", "issue": "Not able to load resources when using pants vs sbt\n**Describe the bug**\r\nWhen using sbt we are able to call `Thread.currentThread().getContextClassLoader().getResources` and get a list of URLs. When using pants the list is empty. \r\n\r\nThis at the moment limits us from using Flyway with pants.\r\n\r\n**Pants version**\r\n2.13.0a1 and main.\r\n\r\n**OS**\r\nMacOS\r\n\r\n**Additional info**\r\nExample repo to reproduce the issue:\r\nhttps://github.com/somdoron/test-pants-resources\r\n\r\nI think the issue is, that pants only compress files in the resources zip file and not the directories.\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nimport itertools\nimport logging\nfrom itertools import chain\n\nfrom pants.core.target_types import ResourcesFieldSet, ResourcesGeneratorFieldSet\nfrom pants.core.util_rules import stripped_source_files\nfrom pants.core.util_rules.source_files import SourceFilesRequest\nfrom pants.core.util_rules.stripped_source_files import StrippedSourceFiles\nfrom pants.core.util_rules.system_binaries import ZipBinary\nfrom pants.engine.fs import Digest, MergeDigests\nfrom pants.engine.internals.selectors import MultiGet\nfrom pants.engine.process import Process, ProcessResult\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.engine.target import SourcesField\nfrom pants.engine.unions import UnionRule\nfrom pants.jvm import compile\nfrom pants.jvm.compile import (\n ClasspathDependenciesRequest,\n ClasspathEntry,\n ClasspathEntryRequest,\n ClasspathEntryRequests,\n CompileResult,\n FallibleClasspathEntries,\n FallibleClasspathEntry,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass JvmResourcesRequest(ClasspathEntryRequest):\n field_sets = (\n ResourcesFieldSet,\n ResourcesGeneratorFieldSet,\n )\n\n\n@rule(desc=\"Assemble resources\")\nasync def assemble_resources_jar(\n zip: ZipBinary,\n request: JvmResourcesRequest,\n) -> FallibleClasspathEntry:\n # Request the component's direct dependency classpath, and additionally any prerequisite.\n # Filter out any dependencies that are generated by our current target so that each resource\n # only appears in a single input JAR.\n # NOTE: Generated dependencies will have the same dependencies as the current target, so we\n # don't need to inspect those dependencies.\n optional_prereq_request = [*((request.prerequisite,) if request.prerequisite else ())]\n fallibles = await MultiGet(\n Get(FallibleClasspathEntries, ClasspathEntryRequests(optional_prereq_request)),\n Get(FallibleClasspathEntries, ClasspathDependenciesRequest(request, ignore_generated=True)),\n )\n direct_dependency_classpath_entries = FallibleClasspathEntries(\n itertools.chain(*fallibles)\n ).if_all_succeeded()\n\n if direct_dependency_classpath_entries is None:\n return FallibleClasspathEntry(\n description=str(request.component),\n result=CompileResult.DEPENDENCY_FAILED,\n output=None,\n exit_code=1,\n )\n\n source_files = await Get(\n StrippedSourceFiles,\n SourceFilesRequest([tgt.get(SourcesField) for tgt in request.component.members]),\n )\n\n output_filename = f\"{request.component.representative.address.path_safe_spec}.resources.jar\"\n output_files = [output_filename]\n\n resources_jar_input_digest = source_files.snapshot.digest\n resources_jar_result = await Get(\n ProcessResult,\n Process(\n argv=[\n zip.path,\n output_filename,\n *source_files.snapshot.files,\n ],\n description=\"Build partial JAR containing resources files\",\n input_digest=resources_jar_input_digest,\n output_files=output_files,\n ),\n )\n\n cpe = ClasspathEntry(resources_jar_result.output_digest, output_files, [])\n\n merged_cpe_digest = await Get(\n Digest,\n MergeDigests(chain((cpe.digest,), (i.digest for i in direct_dependency_classpath_entries))),\n )\n\n merged_cpe = ClasspathEntry.merge(\n digest=merged_cpe_digest, entries=[cpe, *direct_dependency_classpath_entries]\n )\n\n return FallibleClasspathEntry(output_filename, CompileResult.SUCCEEDED, merged_cpe, 0)\n\n\ndef rules():\n return [\n *collect_rules(),\n *compile.rules(),\n *stripped_source_files.rules(),\n UnionRule(ClasspathEntryRequest, JvmResourcesRequest),\n ]\n", "path": "src/python/pants/jvm/resources.py"}]} | 1,710 | 307 |
gh_patches_debug_28333 | rasdani/github-patches | git_diff | pypa__pip-2766 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Version self check should not warn for post releases
Post releases are explicitly designed to just fix small errors that won't affect the code itself, things like doc updates. However if we release a post release then the pip version self check will tell everyone to go download it, even though using it isn't really all that important.
Ideally this should just ignore post releases.
</issue>
<code>
[start of pip/utils/outdated.py]
1 from __future__ import absolute_import
2
3 import datetime
4 import json
5 import logging
6 import os.path
7 import sys
8
9 from pip._vendor import lockfile
10 from pip._vendor import pkg_resources
11
12 from pip.compat import total_seconds
13 from pip.index import PyPI
14 from pip.locations import USER_CACHE_DIR, running_under_virtualenv
15 from pip.utils import ensure_dir
16 from pip.utils.filesystem import check_path_owner
17
18
19 SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
20
21
22 logger = logging.getLogger(__name__)
23
24
25 class VirtualenvSelfCheckState(object):
26 def __init__(self):
27 self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json")
28
29 # Load the existing state
30 try:
31 with open(self.statefile_path) as statefile:
32 self.state = json.load(statefile)
33 except (IOError, ValueError):
34 self.state = {}
35
36 def save(self, pypi_version, current_time):
37 # Attempt to write out our version check file
38 with open(self.statefile_path, "w") as statefile:
39 json.dump(
40 {
41 "last_check": current_time.strftime(SELFCHECK_DATE_FMT),
42 "pypi_version": pypi_version,
43 },
44 statefile,
45 sort_keys=True,
46 separators=(",", ":")
47 )
48
49
50 class GlobalSelfCheckState(object):
51 def __init__(self):
52 self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json")
53
54 # Load the existing state
55 try:
56 with open(self.statefile_path) as statefile:
57 self.state = json.load(statefile)[sys.prefix]
58 except (IOError, ValueError, KeyError):
59 self.state = {}
60
61 def save(self, pypi_version, current_time):
62 # Check to make sure that we own the directory
63 if not check_path_owner(os.path.dirname(self.statefile_path)):
64 return
65
66 # Now that we've ensured the directory is owned by this user, we'll go
67 # ahead and make sure that all our directories are created.
68 ensure_dir(os.path.dirname(self.statefile_path))
69
70 # Attempt to write out our version check file
71 with lockfile.LockFile(self.statefile_path):
72 if os.path.exists(self.statefile_path):
73 with open(self.statefile_path) as statefile:
74 state = json.load(statefile)
75 else:
76 state = {}
77
78 state[sys.prefix] = {
79 "last_check": current_time.strftime(SELFCHECK_DATE_FMT),
80 "pypi_version": pypi_version,
81 }
82
83 with open(self.statefile_path, "w") as statefile:
84 json.dump(state, statefile, sort_keys=True,
85 separators=(",", ":"))
86
87
88 def load_selfcheck_statefile():
89 if running_under_virtualenv():
90 return VirtualenvSelfCheckState()
91 else:
92 return GlobalSelfCheckState()
93
94
95 def pip_version_check(session):
96 """Check for an update for pip.
97
98 Limit the frequency of checks to once per week. State is stored either in
99 the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
100 of the pip script path.
101 """
102 import pip # imported here to prevent circular imports
103 pypi_version = None
104
105 try:
106 state = load_selfcheck_statefile()
107
108 current_time = datetime.datetime.utcnow()
109 # Determine if we need to refresh the state
110 if "last_check" in state.state and "pypi_version" in state.state:
111 last_check = datetime.datetime.strptime(
112 state.state["last_check"],
113 SELFCHECK_DATE_FMT
114 )
115 if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:
116 pypi_version = state.state["pypi_version"]
117
118 # Refresh the version if we need to or just see if we need to warn
119 if pypi_version is None:
120 resp = session.get(
121 PyPI.pip_json_url,
122 headers={"Accept": "application/json"},
123 )
124 resp.raise_for_status()
125 pypi_version = resp.json()["info"]["version"]
126
127 # save that we've performed a check
128 state.save(pypi_version, current_time)
129
130 pip_version = pkg_resources.parse_version(pip.__version__)
131
132 # Determine if our pypi_version is older
133 if pip_version < pkg_resources.parse_version(pypi_version):
134 logger.warning(
135 "You are using pip version %s, however version %s is "
136 "available.\nYou should consider upgrading via the "
137 "'pip install --upgrade pip' command." % (pip.__version__,
138 pypi_version)
139 )
140
141 except Exception:
142 logger.debug(
143 "There was an error checking the latest version of pip",
144 exc_info=True,
145 )
146
[end of pip/utils/outdated.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pip/utils/outdated.py b/pip/utils/outdated.py
--- a/pip/utils/outdated.py
+++ b/pip/utils/outdated.py
@@ -7,7 +7,7 @@
import sys
from pip._vendor import lockfile
-from pip._vendor import pkg_resources
+from pip._vendor.packaging import version as packaging_version
from pip.compat import total_seconds
from pip.index import PyPI
@@ -122,15 +122,23 @@
headers={"Accept": "application/json"},
)
resp.raise_for_status()
- pypi_version = resp.json()["info"]["version"]
+ pypi_version = [
+ v for v in sorted(
+ list(resp.json()["releases"]),
+ key=packaging_version.parse,
+ )
+ if not packaging_version.parse(v).is_prerelease
+ ][-1]
# save that we've performed a check
state.save(pypi_version, current_time)
- pip_version = pkg_resources.parse_version(pip.__version__)
+ pip_version = packaging_version.parse(pip.__version__)
+ remote_version = packaging_version.parse(pypi_version)
# Determine if our pypi_version is older
- if pip_version < pkg_resources.parse_version(pypi_version):
+ if (pip_version < remote_version and
+ pip_version.base_version != remote_version.base_version):
logger.warning(
"You are using pip version %s, however version %s is "
"available.\nYou should consider upgrading via the "
| {"golden_diff": "diff --git a/pip/utils/outdated.py b/pip/utils/outdated.py\n--- a/pip/utils/outdated.py\n+++ b/pip/utils/outdated.py\n@@ -7,7 +7,7 @@\n import sys\n \n from pip._vendor import lockfile\n-from pip._vendor import pkg_resources\n+from pip._vendor.packaging import version as packaging_version\n \n from pip.compat import total_seconds\n from pip.index import PyPI\n@@ -122,15 +122,23 @@\n headers={\"Accept\": \"application/json\"},\n )\n resp.raise_for_status()\n- pypi_version = resp.json()[\"info\"][\"version\"]\n+ pypi_version = [\n+ v for v in sorted(\n+ list(resp.json()[\"releases\"]),\n+ key=packaging_version.parse,\n+ )\n+ if not packaging_version.parse(v).is_prerelease\n+ ][-1]\n \n # save that we've performed a check\n state.save(pypi_version, current_time)\n \n- pip_version = pkg_resources.parse_version(pip.__version__)\n+ pip_version = packaging_version.parse(pip.__version__)\n+ remote_version = packaging_version.parse(pypi_version)\n \n # Determine if our pypi_version is older\n- if pip_version < pkg_resources.parse_version(pypi_version):\n+ if (pip_version < remote_version and\n+ pip_version.base_version != remote_version.base_version):\n logger.warning(\n \"You are using pip version %s, however version %s is \"\n \"available.\\nYou should consider upgrading via the \"\n", "issue": "Version self check should not warn for post releases\nPost releases are explicitly designed to just fix small errors that won't affect the code itself, things like doc updates. However if we release a post release then the pip version self check will tell everyone to go download it, even though using it isn't really all that important.\n\nIdeally this should just ignore post releases.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport datetime\nimport json\nimport logging\nimport os.path\nimport sys\n\nfrom pip._vendor import lockfile\nfrom pip._vendor import pkg_resources\n\nfrom pip.compat import total_seconds\nfrom pip.index import PyPI\nfrom pip.locations import USER_CACHE_DIR, running_under_virtualenv\nfrom pip.utils import ensure_dir\nfrom pip.utils.filesystem import check_path_owner\n\n\nSELFCHECK_DATE_FMT = \"%Y-%m-%dT%H:%M:%SZ\"\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass VirtualenvSelfCheckState(object):\n def __init__(self):\n self.statefile_path = os.path.join(sys.prefix, \"pip-selfcheck.json\")\n\n # Load the existing state\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)\n except (IOError, ValueError):\n self.state = {}\n\n def save(self, pypi_version, current_time):\n # Attempt to write out our version check file\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(\n {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n },\n statefile,\n sort_keys=True,\n separators=(\",\", \":\")\n )\n\n\nclass GlobalSelfCheckState(object):\n def __init__(self):\n self.statefile_path = os.path.join(USER_CACHE_DIR, \"selfcheck.json\")\n\n # Load the existing state\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)[sys.prefix]\n except (IOError, ValueError, KeyError):\n self.state = {}\n\n def save(self, pypi_version, current_time):\n # Check to make sure that we own the directory\n if not check_path_owner(os.path.dirname(self.statefile_path)):\n return\n\n # Now that we've ensured the directory is owned by this user, we'll go\n # ahead and make sure that all our directories are created.\n ensure_dir(os.path.dirname(self.statefile_path))\n\n # Attempt to write out our version check file\n with lockfile.LockFile(self.statefile_path):\n if os.path.exists(self.statefile_path):\n with open(self.statefile_path) as statefile:\n state = json.load(statefile)\n else:\n state = {}\n\n state[sys.prefix] = {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n }\n\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(state, statefile, sort_keys=True,\n separators=(\",\", \":\"))\n\n\ndef load_selfcheck_statefile():\n if running_under_virtualenv():\n return VirtualenvSelfCheckState()\n else:\n return GlobalSelfCheckState()\n\n\ndef pip_version_check(session):\n \"\"\"Check for an update for pip.\n\n Limit the frequency of checks to once per week. State is stored either in\n the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix\n of the pip script path.\n \"\"\"\n import pip # imported here to prevent circular imports\n pypi_version = None\n\n try:\n state = load_selfcheck_statefile()\n\n current_time = datetime.datetime.utcnow()\n # Determine if we need to refresh the state\n if \"last_check\" in state.state and \"pypi_version\" in state.state:\n last_check = datetime.datetime.strptime(\n state.state[\"last_check\"],\n SELFCHECK_DATE_FMT\n )\n if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:\n pypi_version = state.state[\"pypi_version\"]\n\n # Refresh the version if we need to or just see if we need to warn\n if pypi_version is None:\n resp = session.get(\n PyPI.pip_json_url,\n headers={\"Accept\": \"application/json\"},\n )\n resp.raise_for_status()\n pypi_version = resp.json()[\"info\"][\"version\"]\n\n # save that we've performed a check\n state.save(pypi_version, current_time)\n\n pip_version = pkg_resources.parse_version(pip.__version__)\n\n # Determine if our pypi_version is older\n if pip_version < pkg_resources.parse_version(pypi_version):\n logger.warning(\n \"You are using pip version %s, however version %s is \"\n \"available.\\nYou should consider upgrading via the \"\n \"'pip install --upgrade pip' command.\" % (pip.__version__,\n pypi_version)\n )\n\n except Exception:\n logger.debug(\n \"There was an error checking the latest version of pip\",\n exc_info=True,\n )\n", "path": "pip/utils/outdated.py"}]} | 1,968 | 337 |
gh_patches_debug_16100 | rasdani/github-patches | git_diff | dask__distributed-4963 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add bar chart for worker occupancy
We currently have bar charts for worker memory and the number of tasks processing. A similar chart to number of tasks processing would be worker occupancy, which records the sum of the amount of compute time of all tasks currently residing on the worker. This would be exactly like the number of tasks processing chart, but use `ws.occupancy` rather than `len(ws.processing)`
I would have used this today when trying to diagnose work stealing issues.
The relevant code for the number of processing tasks is here. It's made slightly complex because the code for this chart is currently merged with the memory chart. It might make sense to break these apart.
https://github.com/dask/distributed/blob/9d4f0bf2fc804f955a869febd3b51423c4382908/distributed/dashboard/components/scheduler.py#L1017-L1139
</issue>
<code>
[start of distributed/dashboard/scheduler.py]
1 from urllib.parse import urljoin
2
3 from tornado import web
4 from tornado.ioloop import IOLoop
5
6 try:
7 import numpy as np
8 except ImportError:
9 np = False
10
11 from .components.nvml import gpu_doc # noqa: 1708
12 from .components.nvml import NVML_ENABLED, gpu_memory_doc, gpu_utilization_doc
13 from .components.scheduler import (
14 AggregateAction,
15 BandwidthTypes,
16 BandwidthWorkers,
17 ComputePerKey,
18 CurrentLoad,
19 MemoryByKey,
20 NBytes,
21 NBytesCluster,
22 SystemMonitor,
23 TaskGraph,
24 TaskProgress,
25 TaskStream,
26 WorkerTable,
27 events_doc,
28 graph_doc,
29 individual_doc,
30 individual_profile_doc,
31 individual_profile_server_doc,
32 profile_doc,
33 profile_server_doc,
34 status_doc,
35 stealing_doc,
36 systemmonitor_doc,
37 tasks_doc,
38 workers_doc,
39 )
40 from .core import BokehApplication
41 from .worker import counters_doc
42
43 template_variables = {
44 "pages": ["status", "workers", "tasks", "system", "profile", "graph", "info"]
45 }
46
47 if NVML_ENABLED:
48 template_variables["pages"].insert(4, "gpu")
49
50
51 def connect(application, http_server, scheduler, prefix=""):
52 bokeh_app = BokehApplication(
53 applications, scheduler, prefix=prefix, template_variables=template_variables
54 )
55 application.add_application(bokeh_app)
56 bokeh_app.initialize(IOLoop.current())
57
58 bokeh_app.add_handlers(
59 r".*",
60 [
61 (
62 r"/",
63 web.RedirectHandler,
64 {"url": urljoin((prefix or "").strip("/") + "/", r"status")},
65 )
66 ],
67 )
68
69
70 applications = {
71 "/system": systemmonitor_doc,
72 "/stealing": stealing_doc,
73 "/workers": workers_doc,
74 "/events": events_doc,
75 "/counters": counters_doc,
76 "/tasks": tasks_doc,
77 "/status": status_doc,
78 "/profile": profile_doc,
79 "/profile-server": profile_server_doc,
80 "/graph": graph_doc,
81 "/gpu": gpu_doc,
82 "/individual-task-stream": individual_doc(
83 TaskStream, 100, n_rectangles=1000, clear_interval="10s"
84 ),
85 "/individual-progress": individual_doc(TaskProgress, 100, height=160),
86 "/individual-graph": individual_doc(TaskGraph, 200),
87 "/individual-nbytes": individual_doc(NBytes, 100),
88 "/individual-nbytes-cluster": individual_doc(NBytesCluster, 100),
89 "/individual-cpu": individual_doc(CurrentLoad, 100, fig_attr="cpu_figure"),
90 "/individual-nprocessing": individual_doc(
91 CurrentLoad, 100, fig_attr="processing_figure"
92 ),
93 "/individual-workers": individual_doc(WorkerTable, 500),
94 "/individual-bandwidth-types": individual_doc(BandwidthTypes, 500),
95 "/individual-bandwidth-workers": individual_doc(BandwidthWorkers, 500),
96 "/individual-memory-by-key": individual_doc(MemoryByKey, 500),
97 "/individual-compute-time-per-key": individual_doc(ComputePerKey, 500),
98 "/individual-aggregate-time-per-action": individual_doc(AggregateAction, 500),
99 "/individual-scheduler-system": individual_doc(SystemMonitor, 500),
100 "/individual-profile": individual_profile_doc,
101 "/individual-profile-server": individual_profile_server_doc,
102 "/individual-gpu-memory": gpu_memory_doc,
103 "/individual-gpu-utilization": gpu_utilization_doc,
104 }
105
[end of distributed/dashboard/scheduler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/dashboard/scheduler.py b/distributed/dashboard/scheduler.py
--- a/distributed/dashboard/scheduler.py
+++ b/distributed/dashboard/scheduler.py
@@ -19,6 +19,7 @@
MemoryByKey,
NBytes,
NBytesCluster,
+ Occupancy,
SystemMonitor,
TaskGraph,
TaskProgress,
@@ -90,6 +91,7 @@
"/individual-nprocessing": individual_doc(
CurrentLoad, 100, fig_attr="processing_figure"
),
+ "/individual-occupancy": individual_doc(Occupancy, 100),
"/individual-workers": individual_doc(WorkerTable, 500),
"/individual-bandwidth-types": individual_doc(BandwidthTypes, 500),
"/individual-bandwidth-workers": individual_doc(BandwidthWorkers, 500),
| {"golden_diff": "diff --git a/distributed/dashboard/scheduler.py b/distributed/dashboard/scheduler.py\n--- a/distributed/dashboard/scheduler.py\n+++ b/distributed/dashboard/scheduler.py\n@@ -19,6 +19,7 @@\n MemoryByKey,\n NBytes,\n NBytesCluster,\n+ Occupancy,\n SystemMonitor,\n TaskGraph,\n TaskProgress,\n@@ -90,6 +91,7 @@\n \"/individual-nprocessing\": individual_doc(\n CurrentLoad, 100, fig_attr=\"processing_figure\"\n ),\n+ \"/individual-occupancy\": individual_doc(Occupancy, 100),\n \"/individual-workers\": individual_doc(WorkerTable, 500),\n \"/individual-bandwidth-types\": individual_doc(BandwidthTypes, 500),\n \"/individual-bandwidth-workers\": individual_doc(BandwidthWorkers, 500),\n", "issue": "Add bar chart for worker occupancy\nWe currently have bar charts for worker memory and the number of tasks processing. A similar chart to number of tasks processing would be worker occupancy, which records the sum of the amount of compute time of all tasks currently residing on the worker. This would be exactly like the number of tasks processing chart, but use `ws.occupancy` rather than `len(ws.processing)`\r\n\r\nI would have used this today when trying to diagnose work stealing issues. \r\n\r\nThe relevant code for the number of processing tasks is here. It's made slightly complex because the code for this chart is currently merged with the memory chart. It might make sense to break these apart.\r\n\r\nhttps://github.com/dask/distributed/blob/9d4f0bf2fc804f955a869febd3b51423c4382908/distributed/dashboard/components/scheduler.py#L1017-L1139\n", "before_files": [{"content": "from urllib.parse import urljoin\n\nfrom tornado import web\nfrom tornado.ioloop import IOLoop\n\ntry:\n import numpy as np\nexcept ImportError:\n np = False\n\nfrom .components.nvml import gpu_doc # noqa: 1708\nfrom .components.nvml import NVML_ENABLED, gpu_memory_doc, gpu_utilization_doc\nfrom .components.scheduler import (\n AggregateAction,\n BandwidthTypes,\n BandwidthWorkers,\n ComputePerKey,\n CurrentLoad,\n MemoryByKey,\n NBytes,\n NBytesCluster,\n SystemMonitor,\n TaskGraph,\n TaskProgress,\n TaskStream,\n WorkerTable,\n events_doc,\n graph_doc,\n individual_doc,\n individual_profile_doc,\n individual_profile_server_doc,\n profile_doc,\n profile_server_doc,\n status_doc,\n stealing_doc,\n systemmonitor_doc,\n tasks_doc,\n workers_doc,\n)\nfrom .core import BokehApplication\nfrom .worker import counters_doc\n\ntemplate_variables = {\n \"pages\": [\"status\", \"workers\", \"tasks\", \"system\", \"profile\", \"graph\", \"info\"]\n}\n\nif NVML_ENABLED:\n template_variables[\"pages\"].insert(4, \"gpu\")\n\n\ndef connect(application, http_server, scheduler, prefix=\"\"):\n bokeh_app = BokehApplication(\n applications, scheduler, prefix=prefix, template_variables=template_variables\n )\n application.add_application(bokeh_app)\n bokeh_app.initialize(IOLoop.current())\n\n bokeh_app.add_handlers(\n r\".*\",\n [\n (\n r\"/\",\n web.RedirectHandler,\n {\"url\": urljoin((prefix or \"\").strip(\"/\") + \"/\", r\"status\")},\n )\n ],\n )\n\n\napplications = {\n \"/system\": systemmonitor_doc,\n \"/stealing\": stealing_doc,\n \"/workers\": workers_doc,\n \"/events\": events_doc,\n \"/counters\": counters_doc,\n \"/tasks\": tasks_doc,\n \"/status\": status_doc,\n \"/profile\": profile_doc,\n \"/profile-server\": profile_server_doc,\n \"/graph\": graph_doc,\n \"/gpu\": gpu_doc,\n \"/individual-task-stream\": individual_doc(\n TaskStream, 100, n_rectangles=1000, clear_interval=\"10s\"\n ),\n \"/individual-progress\": individual_doc(TaskProgress, 100, height=160),\n \"/individual-graph\": individual_doc(TaskGraph, 200),\n \"/individual-nbytes\": individual_doc(NBytes, 100),\n \"/individual-nbytes-cluster\": individual_doc(NBytesCluster, 100),\n \"/individual-cpu\": individual_doc(CurrentLoad, 100, fig_attr=\"cpu_figure\"),\n \"/individual-nprocessing\": individual_doc(\n CurrentLoad, 100, fig_attr=\"processing_figure\"\n ),\n \"/individual-workers\": individual_doc(WorkerTable, 500),\n \"/individual-bandwidth-types\": individual_doc(BandwidthTypes, 500),\n \"/individual-bandwidth-workers\": individual_doc(BandwidthWorkers, 500),\n \"/individual-memory-by-key\": individual_doc(MemoryByKey, 500),\n \"/individual-compute-time-per-key\": individual_doc(ComputePerKey, 500),\n \"/individual-aggregate-time-per-action\": individual_doc(AggregateAction, 500),\n \"/individual-scheduler-system\": individual_doc(SystemMonitor, 500),\n \"/individual-profile\": individual_profile_doc,\n \"/individual-profile-server\": individual_profile_server_doc,\n \"/individual-gpu-memory\": gpu_memory_doc,\n \"/individual-gpu-utilization\": gpu_utilization_doc,\n}\n", "path": "distributed/dashboard/scheduler.py"}]} | 1,737 | 191 |
gh_patches_debug_50578 | rasdani/github-patches | git_diff | streamlit__streamlit-4076 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Slider value visually overlaps with the edge of the sidebar
# Summary
The value label collides with the edges of the sidebar. It should change alignment when it gets close to the edges.
# Steps to reproduce
```py
min_weight = st.sidebar.slider("Minimum weight", 2500, 6500)
```
## Expected behavior:
The value label should not collide with the edges.
## Actual behavior:
<img width="382" alt="Screen Shot 2020-09-30 at 22 42 44" src="https://user-images.githubusercontent.com/589034/94772484-97067200-036e-11eb-9f82-10453aa7452e.png">
## Is this a regression?
no
# Debug info
- Streamlit version: 0.65.2
- Browser version: Chrome
</issue>
<code>
[start of e2e/scripts/st_slider.py]
1 # Copyright 2018-2021 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16
17 w1 = st.slider("Label 1", 0, 100, 25, 1)
18 st.write("Value 1:", w1)
19
20 w2 = st.slider("Label 2", 0.0, 100.0, (25.0, 75.0), 0.5)
21 st.write("Value 2:", w2)
22
23 w3 = st.slider(
24 "Label 3 - This is a very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very long label",
25 0,
26 100,
27 1,
28 1,
29 )
30 st.write("Value 3:", w3)
31
32 if st._is_running_with_streamlit:
33
34 def on_change():
35 st.session_state.slider_changed = True
36
37 st.slider(
38 "Label 4",
39 min_value=0,
40 max_value=100,
41 value=25,
42 step=1,
43 key="slider4",
44 on_change=on_change,
45 )
46 st.write("Value 4:", st.session_state.slider4)
47 st.write("Slider changed:", "slider_changed" in st.session_state)
48
[end of e2e/scripts/st_slider.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/e2e/scripts/st_slider.py b/e2e/scripts/st_slider.py
--- a/e2e/scripts/st_slider.py
+++ b/e2e/scripts/st_slider.py
@@ -14,6 +14,13 @@
import streamlit as st
+s1 = st.sidebar.slider("Label A", 0, 12345678, 12345678)
+st.sidebar.write("Value A:", s1)
+
+with st.sidebar.expander("Expander"):
+ s2 = st.slider("Label B", 10000, 25000, 10000)
+ st.write("Value B:", s2)
+
w1 = st.slider("Label 1", 0, 100, 25, 1)
st.write("Value 1:", w1)
| {"golden_diff": "diff --git a/e2e/scripts/st_slider.py b/e2e/scripts/st_slider.py\n--- a/e2e/scripts/st_slider.py\n+++ b/e2e/scripts/st_slider.py\n@@ -14,6 +14,13 @@\n \n import streamlit as st\n \n+s1 = st.sidebar.slider(\"Label A\", 0, 12345678, 12345678)\n+st.sidebar.write(\"Value A:\", s1)\n+\n+with st.sidebar.expander(\"Expander\"):\n+ s2 = st.slider(\"Label B\", 10000, 25000, 10000)\n+ st.write(\"Value B:\", s2)\n+\n w1 = st.slider(\"Label 1\", 0, 100, 25, 1)\n st.write(\"Value 1:\", w1)\n", "issue": "Slider value visually overlaps with the edge of the sidebar\n# Summary\r\n\r\nThe value label collides with the edges of the sidebar. It should change alignment when it gets close to the edges. \r\n\r\n# Steps to reproduce\r\n\r\n```py\r\nmin_weight = st.sidebar.slider(\"Minimum weight\", 2500, 6500)\r\n```\r\n\r\n## Expected behavior:\r\n\r\nThe value label should not collide with the edges. \r\n\r\n## Actual behavior:\r\n\r\n<img width=\"382\" alt=\"Screen Shot 2020-09-30 at 22 42 44\" src=\"https://user-images.githubusercontent.com/589034/94772484-97067200-036e-11eb-9f82-10453aa7452e.png\">\r\n\r\n\r\n## Is this a regression?\r\n\r\nno\r\n\r\n# Debug info\r\n\r\n- Streamlit version: 0.65.2\r\n- Browser version: Chrome\r\n\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nw1 = st.slider(\"Label 1\", 0, 100, 25, 1)\nst.write(\"Value 1:\", w1)\n\nw2 = st.slider(\"Label 2\", 0.0, 100.0, (25.0, 75.0), 0.5)\nst.write(\"Value 2:\", w2)\n\nw3 = st.slider(\n \"Label 3 - This is a very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very long label\",\n 0,\n 100,\n 1,\n 1,\n)\nst.write(\"Value 3:\", w3)\n\nif st._is_running_with_streamlit:\n\n def on_change():\n st.session_state.slider_changed = True\n\n st.slider(\n \"Label 4\",\n min_value=0,\n max_value=100,\n value=25,\n step=1,\n key=\"slider4\",\n on_change=on_change,\n )\n st.write(\"Value 4:\", st.session_state.slider4)\n st.write(\"Slider changed:\", \"slider_changed\" in st.session_state)\n", "path": "e2e/scripts/st_slider.py"}]} | 1,273 | 195 |
gh_patches_debug_2419 | rasdani/github-patches | git_diff | e-valuation__EvaP-1321 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Evaluation preview button visibility
As a teaching assistant, I might be a contributor to a given course and therefore get my own feedback in the main evaluation. If that course also has an exam evaluation, I see that listed on my "own evaluations" page with the option to preview the questionnaire. However, as not being responsible, I miss the access rights to preview the linked page, resulting in an error.
I would like to either don't have the preview button (it already knows while rendering that page that I am not a contributor, shown through the corresponding icon next to the exam evaluation title) or to give me the rights to preview the questionnaire.
</issue>
<code>
[start of evap/evaluation/templatetags/evaluation_filters.py]
1 from django.forms import TypedChoiceField
2 from django.template import Library
3
4 from evap.evaluation.models import BASE_UNIPOLAR_CHOICES
5 from evap.evaluation.tools import STATES_ORDERED, STATE_DESCRIPTIONS
6 from evap.rewards.tools import can_reward_points_be_used_by
7 from evap.student.forms import HeadingField
8
9
10 register = Library()
11
12
13 @register.filter(name='zip')
14 def _zip(a, b):
15 return zip(a, b)
16
17
18 @register.filter
19 def ordering_index(evaluation):
20 if evaluation.state in ['new', 'prepared', 'editor_approved', 'approved']:
21 return evaluation.days_until_evaluation
22 elif evaluation.state == "in_evaluation":
23 return 100000 + evaluation.days_left_for_evaluation
24 return 200000 + evaluation.days_left_for_evaluation
25
26
27 # from http://www.jongales.com/blog/2009/10/19/percentage-django-template-tag/
28 @register.filter
29 def percentage(fraction, population):
30 try:
31 return "{0:.0f}%".format(int(float(fraction) / float(population) * 100))
32 except ValueError:
33 return None
34 except ZeroDivisionError:
35 return None
36
37
38 @register.filter
39 def percentage_one_decimal(fraction, population):
40 try:
41 return "{0:.1f}%".format((float(fraction) / float(population)) * 100)
42 except ValueError:
43 return None
44 except ZeroDivisionError:
45 return None
46
47
48 @register.filter
49 def percentage_value(fraction, population):
50 try:
51 return "{0:0f}".format((float(fraction) / float(population)) * 100)
52 except ValueError:
53 return None
54 except ZeroDivisionError:
55 return None
56
57
58 @register.filter
59 def to_colors(choices):
60 if not choices:
61 # When displaying the course distribution, there are no associated voting choices.
62 # In that case, we just use the colors of a unipolar scale.
63 return BASE_UNIPOLAR_CHOICES['colors']
64 return choices.colors
65
66
67 @register.filter
68 def statename(state):
69 return STATES_ORDERED.get(state)
70
71
72 @register.filter
73 def statedescription(state):
74 return STATE_DESCRIPTIONS.get(state)
75
76
77 @register.filter
78 def can_results_page_be_seen_by(evaluation, user):
79 return evaluation.can_results_page_be_seen_by(user)
80
81
82 @register.filter(name='can_reward_points_be_used_by')
83 def _can_reward_points_be_used_by(user):
84 return can_reward_points_be_used_by(user)
85
86
87 @register.filter
88 def is_choice_field(field):
89 return isinstance(field.field, TypedChoiceField)
90
91
92 @register.filter
93 def is_heading_field(field):
94 return isinstance(field.field, HeadingField)
95
96
97 @register.filter
98 def is_user_editor_or_delegate(evaluation, user):
99 return evaluation.is_user_editor_or_delegate(user)
100
101
102 @register.filter
103 def message_class(level):
104 return {
105 'debug': 'info',
106 'info': 'info',
107 'success': 'success',
108 'warning': 'warning',
109 'error': 'danger',
110 }.get(level, 'info')
111
112
113 @register.filter
114 def hours_and_minutes(time_left_for_evaluation):
115 hours = time_left_for_evaluation.seconds // 3600
116 minutes = (time_left_for_evaluation.seconds // 60) % 60
117 return "{:02}:{:02}".format(hours, minutes)
118
119
120 @register.filter
121 def has_nonresponsible_editor(evaluation):
122 return evaluation.contributions.filter(can_edit=True).exclude(contributor__in=evaluation.course.responsibles.all()).exists()
123
[end of evap/evaluation/templatetags/evaluation_filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/evaluation/templatetags/evaluation_filters.py b/evap/evaluation/templatetags/evaluation_filters.py
--- a/evap/evaluation/templatetags/evaluation_filters.py
+++ b/evap/evaluation/templatetags/evaluation_filters.py
@@ -99,6 +99,10 @@
return evaluation.is_user_editor_or_delegate(user)
[email protected]
+def is_user_responsible_or_contributor_or_delegate(evaluation, user):
+ return evaluation.is_user_responsible_or_contributor_or_delegate(user)
+
@register.filter
def message_class(level):
return {
| {"golden_diff": "diff --git a/evap/evaluation/templatetags/evaluation_filters.py b/evap/evaluation/templatetags/evaluation_filters.py\n--- a/evap/evaluation/templatetags/evaluation_filters.py\n+++ b/evap/evaluation/templatetags/evaluation_filters.py\n@@ -99,6 +99,10 @@\n return evaluation.is_user_editor_or_delegate(user)\n \n \[email protected]\n+def is_user_responsible_or_contributor_or_delegate(evaluation, user):\n+ return evaluation.is_user_responsible_or_contributor_or_delegate(user)\n+\n @register.filter\n def message_class(level):\n return {\n", "issue": "Evaluation preview button visibility\nAs a teaching assistant, I might be a contributor to a given course and therefore get my own feedback in the main evaluation. If that course also has an exam evaluation, I see that listed on my \"own evaluations\" page with the option to preview the questionnaire. However, as not being responsible, I miss the access rights to preview the linked page, resulting in an error.\r\n\r\nI would like to either don't have the preview button (it already knows while rendering that page that I am not a contributor, shown through the corresponding icon next to the exam evaluation title) or to give me the rights to preview the questionnaire.\n", "before_files": [{"content": "from django.forms import TypedChoiceField\nfrom django.template import Library\n\nfrom evap.evaluation.models import BASE_UNIPOLAR_CHOICES\nfrom evap.evaluation.tools import STATES_ORDERED, STATE_DESCRIPTIONS\nfrom evap.rewards.tools import can_reward_points_be_used_by\nfrom evap.student.forms import HeadingField\n\n\nregister = Library()\n\n\[email protected](name='zip')\ndef _zip(a, b):\n return zip(a, b)\n\n\[email protected]\ndef ordering_index(evaluation):\n if evaluation.state in ['new', 'prepared', 'editor_approved', 'approved']:\n return evaluation.days_until_evaluation\n elif evaluation.state == \"in_evaluation\":\n return 100000 + evaluation.days_left_for_evaluation\n return 200000 + evaluation.days_left_for_evaluation\n\n\n# from http://www.jongales.com/blog/2009/10/19/percentage-django-template-tag/\[email protected]\ndef percentage(fraction, population):\n try:\n return \"{0:.0f}%\".format(int(float(fraction) / float(population) * 100))\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\[email protected]\ndef percentage_one_decimal(fraction, population):\n try:\n return \"{0:.1f}%\".format((float(fraction) / float(population)) * 100)\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\[email protected]\ndef percentage_value(fraction, population):\n try:\n return \"{0:0f}\".format((float(fraction) / float(population)) * 100)\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\[email protected]\ndef to_colors(choices):\n if not choices:\n # When displaying the course distribution, there are no associated voting choices.\n # In that case, we just use the colors of a unipolar scale.\n return BASE_UNIPOLAR_CHOICES['colors']\n return choices.colors\n\n\[email protected]\ndef statename(state):\n return STATES_ORDERED.get(state)\n\n\[email protected]\ndef statedescription(state):\n return STATE_DESCRIPTIONS.get(state)\n\n\[email protected]\ndef can_results_page_be_seen_by(evaluation, user):\n return evaluation.can_results_page_be_seen_by(user)\n\n\[email protected](name='can_reward_points_be_used_by')\ndef _can_reward_points_be_used_by(user):\n return can_reward_points_be_used_by(user)\n\n\[email protected]\ndef is_choice_field(field):\n return isinstance(field.field, TypedChoiceField)\n\n\[email protected]\ndef is_heading_field(field):\n return isinstance(field.field, HeadingField)\n\n\[email protected]\ndef is_user_editor_or_delegate(evaluation, user):\n return evaluation.is_user_editor_or_delegate(user)\n\n\[email protected]\ndef message_class(level):\n return {\n 'debug': 'info',\n 'info': 'info',\n 'success': 'success',\n 'warning': 'warning',\n 'error': 'danger',\n }.get(level, 'info')\n\n\[email protected]\ndef hours_and_minutes(time_left_for_evaluation):\n hours = time_left_for_evaluation.seconds // 3600\n minutes = (time_left_for_evaluation.seconds // 60) % 60\n return \"{:02}:{:02}\".format(hours, minutes)\n\n\[email protected]\ndef has_nonresponsible_editor(evaluation):\n return evaluation.contributions.filter(can_edit=True).exclude(contributor__in=evaluation.course.responsibles.all()).exists()\n", "path": "evap/evaluation/templatetags/evaluation_filters.py"}]} | 1,726 | 142 |
gh_patches_debug_3654 | rasdani/github-patches | git_diff | spack__spack-2961 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fail to build libmng [AutotoolsPackage]
@alalazo I got this error while installing `libmng`:
```
...
==> Already patched libmng
==> Building libmng [AutotoolsPackage]
==> Executing phase : 'autoreconf'
==> Executing phase : 'configure'
==> Error: ProcessError: Command exited with status 1:
'/my/path/spack/var/spack/stage/libmng-2.0.2-2x5fkukzf3sf4uexegr3n35jwmy5pclu/libmng-2.0.2/configure' '--prefix=/my/path/spack/opt/spack/linux-scientificcernslc6-x86_64/gcc-6.2.0/libmng-2.0.2-2x5fkukzf3sf4uexegr3n35jwmy5pclu'
/my/path/spack/lib/spack/spack/build_systems/autotools.py:265, in configure:
258 def configure(self, spec, prefix):
259 """Runs configure with the arguments specified in :py:meth:`.configure_args`
260 and an appropriately set prefix.
261 """
262 options = ['--prefix={0}'.format(prefix)] + self.configure_args()
263
264 with working_dir(self.build_directory, create=True)
```
And this is the spack-build.out:
```
...
checking for a BSD-compatible install... /usr/bin/install -c
checking whether build environment is sane... yes
checking for a thread-safe mkdir -p... /bin/mkdir -p
checking for gawk... gawk
checking whether make sets $(MAKE)... yes
checking whether make supports nested variables... yes
configure: error: source directory already configured; run "make distclean" there first
```
Before merge #2859 I could install it correctly but now I get this, do you think that could be related with the commit or maybe I have to install it in a different way now?
</issue>
<code>
[start of var/spack/repos/builtin/packages/libmng/package.py]
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the LICENSE file for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from spack import *
26
27
28 class Libmng(AutotoolsPackage):
29 """libmng -THE reference library for reading, displaying, writing
30 and examining Multiple-Image Network Graphics. MNG is the animation
31 extension to the popular PNG image-format."""
32 homepage = "http://sourceforge.net/projects/libmng/"
33 url = "http://downloads.sourceforge.net/project/libmng/libmng-devel/2.0.2/libmng-2.0.2.tar.gz"
34
35 version('2.0.2', '1ffefaed4aac98475ee6267422cbca55')
36
37 depends_on("jpeg")
38 depends_on("zlib")
39 depends_on("lcms")
40
41 def patch(self):
42 # jpeg requires stdio to beincluded before its headrs.
43 filter_file(r'^(\#include \<jpeglib\.h\>)',
44 '#include<stdio.h>\n\\1', 'libmng_types.h')
45
[end of var/spack/repos/builtin/packages/libmng/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/var/spack/repos/builtin/packages/libmng/package.py b/var/spack/repos/builtin/packages/libmng/package.py
--- a/var/spack/repos/builtin/packages/libmng/package.py
+++ b/var/spack/repos/builtin/packages/libmng/package.py
@@ -42,3 +42,7 @@
# jpeg requires stdio to beincluded before its headrs.
filter_file(r'^(\#include \<jpeglib\.h\>)',
'#include<stdio.h>\n\\1', 'libmng_types.h')
+
+ @run_before('configure')
+ def clean_configure_directory(self):
+ make('distclean')
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/libmng/package.py b/var/spack/repos/builtin/packages/libmng/package.py\n--- a/var/spack/repos/builtin/packages/libmng/package.py\n+++ b/var/spack/repos/builtin/packages/libmng/package.py\n@@ -42,3 +42,7 @@\n # jpeg requires stdio to beincluded before its headrs.\n filter_file(r'^(\\#include \\<jpeglib\\.h\\>)',\n '#include<stdio.h>\\n\\\\1', 'libmng_types.h')\n+\n+ @run_before('configure')\n+ def clean_configure_directory(self):\n+ make('distclean')\n", "issue": "Fail to build libmng [AutotoolsPackage]\n@alalazo I got this error while installing `libmng`:\r\n\r\n```\r\n...\r\n==> Already patched libmng\r\n==> Building libmng [AutotoolsPackage]\r\n==> Executing phase : 'autoreconf'\r\n==> Executing phase : 'configure'\r\n==> Error: ProcessError: Command exited with status 1:\r\n '/my/path/spack/var/spack/stage/libmng-2.0.2-2x5fkukzf3sf4uexegr3n35jwmy5pclu/libmng-2.0.2/configure' '--prefix=/my/path/spack/opt/spack/linux-scientificcernslc6-x86_64/gcc-6.2.0/libmng-2.0.2-2x5fkukzf3sf4uexegr3n35jwmy5pclu'\r\n/my/path/spack/lib/spack/spack/build_systems/autotools.py:265, in configure:\r\n 258 def configure(self, spec, prefix):\r\n 259 \"\"\"Runs configure with the arguments specified in :py:meth:`.configure_args`\r\n 260 and an appropriately set prefix.\r\n 261 \"\"\"\r\n 262 options = ['--prefix={0}'.format(prefix)] + self.configure_args()\r\n 263 \r\n 264 with working_dir(self.build_directory, create=True)\r\n```\r\n\r\nAnd this is the spack-build.out:\r\n\r\n```\r\n...\r\nchecking for a BSD-compatible install... /usr/bin/install -c\r\nchecking whether build environment is sane... yes\r\nchecking for a thread-safe mkdir -p... /bin/mkdir -p\r\nchecking for gawk... gawk\r\nchecking whether make sets $(MAKE)... yes\r\nchecking whether make supports nested variables... yes\r\nconfigure: error: source directory already configured; run \"make distclean\" there first\r\n```\r\n\r\nBefore merge #2859 I could install it correctly but now I get this, do you think that could be related with the commit or maybe I have to install it in a different way now?\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Libmng(AutotoolsPackage):\n \"\"\"libmng -THE reference library for reading, displaying, writing\n and examining Multiple-Image Network Graphics. MNG is the animation\n extension to the popular PNG image-format.\"\"\"\n homepage = \"http://sourceforge.net/projects/libmng/\"\n url = \"http://downloads.sourceforge.net/project/libmng/libmng-devel/2.0.2/libmng-2.0.2.tar.gz\"\n\n version('2.0.2', '1ffefaed4aac98475ee6267422cbca55')\n\n depends_on(\"jpeg\")\n depends_on(\"zlib\")\n depends_on(\"lcms\")\n\n def patch(self):\n # jpeg requires stdio to beincluded before its headrs.\n filter_file(r'^(\\#include \\<jpeglib\\.h\\>)',\n '#include<stdio.h>\\n\\\\1', 'libmng_types.h')\n", "path": "var/spack/repos/builtin/packages/libmng/package.py"}]} | 1,596 | 144 |
gh_patches_debug_66276 | rasdani/github-patches | git_diff | python-poetry__poetry-979 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--no-root behavior is inverted on latest develop
[This](https://github.com/sdispater/poetry/commit/37ec1447b3508ee0bbdb41f8e5773ed5bfae0654#diff-427299ba040b8502b4d29846e595c2d0R59) should probably be `if self.option("no-root")`, to _not_ install the root package when `--no-root` is provided.
</issue>
<code>
[start of poetry/console/commands/install.py]
1 import os
2
3 from .env_command import EnvCommand
4
5
6 class InstallCommand(EnvCommand):
7 """
8 Installs the project dependencies.
9
10 install
11 { --no-dev : Do not install dev dependencies. }
12 { --no-root : Do not install the root package (your project). }
13 { --dry-run : Outputs the operations but will not execute anything
14 (implicitly enables --verbose). }
15 { --E|extras=* : Extra sets of dependencies to install. }
16 { --develop=* : Install given packages in development mode. }
17 """
18
19 help = """The <info>install</info> command reads the <comment>poetry.lock</> file from
20 the current directory, processes it, and downloads and installs all the
21 libraries and dependencies outlined in that file. If the file does not
22 exist it will look for <comment>pyproject.toml</> and do the same.
23
24 <info>poetry install</info>
25 """
26
27 _loggers = ["poetry.repositories.pypi_repository"]
28
29 def handle(self):
30 from clikit.io import NullIO
31 from poetry.installation import Installer
32 from poetry.masonry.builders import SdistBuilder
33 from poetry.masonry.utils.module import ModuleOrPackageNotFound
34 from poetry.utils._compat import decode
35 from poetry.utils.env import NullEnv
36
37 installer = Installer(
38 self.io, self.env, self.poetry.package, self.poetry.locker, self.poetry.pool
39 )
40
41 extras = []
42 for extra in self.option("extras"):
43 if " " in extra:
44 extras += [e.strip() for e in extra.split(" ")]
45 else:
46 extras.append(extra)
47
48 installer.extras(extras)
49 installer.dev_mode(not self.option("no-dev"))
50 installer.develop(self.option("develop"))
51 installer.dry_run(self.option("dry-run"))
52 installer.verbose(self.option("verbose"))
53
54 return_code = installer.run()
55
56 if return_code != 0:
57 return return_code
58
59 if not self.option("no-root"):
60 return 0
61
62 try:
63 builder = SdistBuilder(self.poetry, NullEnv(), NullIO())
64 except ModuleOrPackageNotFound:
65 # This is likely due to the fact that the project is an application
66 # not following the structure expected by Poetry
67 # If this is a true error it will be picked up later by build anyway.
68 return 0
69
70 self.line(
71 " - Installing <info>{}</info> (<comment>{}</comment>)".format(
72 self.poetry.package.pretty_name, self.poetry.package.pretty_version
73 )
74 )
75
76 if self.option("dry-run"):
77 return 0
78
79 setup = self.poetry.file.parent / "setup.py"
80 has_setup = setup.exists()
81
82 if has_setup:
83 self.line("<warning>A setup.py file already exists. Using it.</warning>")
84 else:
85 with setup.open("w", encoding="utf-8") as f:
86 f.write(decode(builder.build_setup()))
87
88 try:
89 self.env.run("pip", "install", "-e", str(setup.parent), "--no-deps")
90 finally:
91 if not has_setup:
92 os.remove(str(setup))
93
[end of poetry/console/commands/install.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/poetry/console/commands/install.py b/poetry/console/commands/install.py
--- a/poetry/console/commands/install.py
+++ b/poetry/console/commands/install.py
@@ -56,7 +56,7 @@
if return_code != 0:
return return_code
- if not self.option("no-root"):
+ if self.option("no-root"):
return 0
try:
| {"golden_diff": "diff --git a/poetry/console/commands/install.py b/poetry/console/commands/install.py\n--- a/poetry/console/commands/install.py\n+++ b/poetry/console/commands/install.py\n@@ -56,7 +56,7 @@\n if return_code != 0:\n return return_code\n \n- if not self.option(\"no-root\"):\n+ if self.option(\"no-root\"):\n return 0\n \n try:\n", "issue": "--no-root behavior is inverted on latest develop\n[This](https://github.com/sdispater/poetry/commit/37ec1447b3508ee0bbdb41f8e5773ed5bfae0654#diff-427299ba040b8502b4d29846e595c2d0R59) should probably be `if self.option(\"no-root\")`, to _not_ install the root package when `--no-root` is provided.\n", "before_files": [{"content": "import os\n\nfrom .env_command import EnvCommand\n\n\nclass InstallCommand(EnvCommand):\n \"\"\"\n Installs the project dependencies.\n\n install\n { --no-dev : Do not install dev dependencies. }\n { --no-root : Do not install the root package (your project). }\n { --dry-run : Outputs the operations but will not execute anything\n (implicitly enables --verbose). }\n { --E|extras=* : Extra sets of dependencies to install. }\n { --develop=* : Install given packages in development mode. }\n \"\"\"\n\n help = \"\"\"The <info>install</info> command reads the <comment>poetry.lock</> file from\nthe current directory, processes it, and downloads and installs all the\nlibraries and dependencies outlined in that file. If the file does not\nexist it will look for <comment>pyproject.toml</> and do the same.\n\n<info>poetry install</info>\n\"\"\"\n\n _loggers = [\"poetry.repositories.pypi_repository\"]\n\n def handle(self):\n from clikit.io import NullIO\n from poetry.installation import Installer\n from poetry.masonry.builders import SdistBuilder\n from poetry.masonry.utils.module import ModuleOrPackageNotFound\n from poetry.utils._compat import decode\n from poetry.utils.env import NullEnv\n\n installer = Installer(\n self.io, self.env, self.poetry.package, self.poetry.locker, self.poetry.pool\n )\n\n extras = []\n for extra in self.option(\"extras\"):\n if \" \" in extra:\n extras += [e.strip() for e in extra.split(\" \")]\n else:\n extras.append(extra)\n\n installer.extras(extras)\n installer.dev_mode(not self.option(\"no-dev\"))\n installer.develop(self.option(\"develop\"))\n installer.dry_run(self.option(\"dry-run\"))\n installer.verbose(self.option(\"verbose\"))\n\n return_code = installer.run()\n\n if return_code != 0:\n return return_code\n\n if not self.option(\"no-root\"):\n return 0\n\n try:\n builder = SdistBuilder(self.poetry, NullEnv(), NullIO())\n except ModuleOrPackageNotFound:\n # This is likely due to the fact that the project is an application\n # not following the structure expected by Poetry\n # If this is a true error it will be picked up later by build anyway.\n return 0\n\n self.line(\n \" - Installing <info>{}</info> (<comment>{}</comment>)\".format(\n self.poetry.package.pretty_name, self.poetry.package.pretty_version\n )\n )\n\n if self.option(\"dry-run\"):\n return 0\n\n setup = self.poetry.file.parent / \"setup.py\"\n has_setup = setup.exists()\n\n if has_setup:\n self.line(\"<warning>A setup.py file already exists. Using it.</warning>\")\n else:\n with setup.open(\"w\", encoding=\"utf-8\") as f:\n f.write(decode(builder.build_setup()))\n\n try:\n self.env.run(\"pip\", \"install\", \"-e\", str(setup.parent), \"--no-deps\")\n finally:\n if not has_setup:\n os.remove(str(setup))\n", "path": "poetry/console/commands/install.py"}]} | 1,534 | 97 |
gh_patches_debug_31555 | rasdani/github-patches | git_diff | chainer__chainer-970 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SplitAxis should return a tuple with a single element
`Function.__call__` returns a `Variable` when its forward function returns only one return value. When the number of return values depends on input values, such as `split_axis`, a user need to check the type of the return value of `Function`. A user needs to write a complicated source code.
I have two design choice:
- Only `SplitAxis` always returns a tuple of `Variable`s
- Add keyword argument `force_tuple` to `Function.__call__`
</issue>
<code>
[start of chainer/functions/array/split_axis.py]
1 import collections
2
3 import six
4
5 from chainer import cuda
6 from chainer import function
7 from chainer.utils import type_check
8
9
10 class SplitAxis(function.Function):
11
12 """Function that splits multiple arrays towards the specified axis."""
13
14 def __init__(self, indices_or_sections, axis):
15 if not isinstance(indices_or_sections, (int, collections.Iterable)):
16 raise TypeError('indices_or_sections must be integer or 1-D array')
17 self.indices_or_sections = indices_or_sections
18 self.axis = axis
19
20 def check_type_forward(self, in_types):
21 type_check.expect(in_types.size() == 1)
22 type_check.expect(in_types[0].ndim > self.axis)
23
24 if isinstance(self.indices_or_sections, collections.Iterable):
25 max_index = type_check.Variable(
26 self.indices_or_sections[-1], 'max_index')
27 type_check.expect(in_types[0].shape[self.axis] > max_index)
28 else:
29 sections = type_check.Variable(
30 self.indices_or_sections, 'sections')
31 type_check.expect(in_types[0].shape[self.axis] % sections == 0)
32
33 def forward(self, x):
34 if isinstance(self.indices_or_sections, collections.Iterable):
35 cdimx = x[0].shape[self.axis]
36 ind = list(self.indices_or_sections)
37 ind.append(cdimx)
38 prev_i = 0
39 for i in ind:
40 cdimy = max(0, min(i, cdimx) - prev_i)
41 if cdimy == 0:
42 raise ValueError('Not support if shape contains 0')
43 prev_i = i
44 xp = cuda.get_array_module(*x)
45 return tuple(xp.split(x[0], self.indices_or_sections, self.axis))
46
47 def backward(self, x, gys):
48 xp = cuda.get_array_module(*x)
49 if any(gy is None for gy in gys):
50 gx = xp.zeros_like(x[0])
51 gxs = xp.split(gx, self.indices_or_sections, self.axis)
52 for gxi, gy in six.moves.zip(gxs, gys):
53 if gy is None:
54 continue
55 gxi[:] = gy
56 return gx,
57 else:
58 return xp.concatenate(gys, axis=self.axis),
59
60
61 def split_axis(x, indices_or_sections, axis):
62 """Splits given variables along an axis.
63
64 Args:
65 x (tuple of Variables): Variables to be split.
66 indices_or_sections (int or 1-D array): If this argument is an integer,
67 N, the array will be divided into N equal arrays along axis.
68 If it is a 1-D array of sorted integers, it
69 indicates the positions where the array is split.
70 axis (int): Axis that the input array is split along.
71
72 Returns:
73 tuple or Variable: Tuple of :class:`~chainer.Variable` objects
74 if the number of outputs is more than 1 or
75 :class:`~chainer.Variable` otherwise.
76
77 .. note::
78 This function raises :class:`ValueError` if at least
79 one of the outputs is split to zero-size
80 (i.e. `axis`-th value of its shape is zero).
81
82 """
83 return SplitAxis(indices_or_sections, axis)(x)
84
[end of chainer/functions/array/split_axis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/functions/array/split_axis.py b/chainer/functions/array/split_axis.py
--- a/chainer/functions/array/split_axis.py
+++ b/chainer/functions/array/split_axis.py
@@ -2,6 +2,7 @@
import six
+import chainer
from chainer import cuda
from chainer import function
from chainer.utils import type_check
@@ -58,7 +59,7 @@
return xp.concatenate(gys, axis=self.axis),
-def split_axis(x, indices_or_sections, axis):
+def split_axis(x, indices_or_sections, axis, force_tuple=False):
"""Splits given variables along an axis.
Args:
@@ -68,16 +69,23 @@
If it is a 1-D array of sorted integers, it
indicates the positions where the array is split.
axis (int): Axis that the input array is split along.
+ force_tuple (bool): If ``True``, this method returns a tuple even when
+ the number of outputs is one.
Returns:
tuple or Variable: Tuple of :class:`~chainer.Variable` objects
if the number of outputs is more than 1 or
:class:`~chainer.Variable` otherwise.
+ When ``force_tuple`` is ``True``, returned value is always a tuple
+ regardless of the number of outputs.
.. note::
This function raises :class:`ValueError` if at least
one of the outputs is split to zero-size
- (i.e. `axis`-th value of its shape is zero).
+ (i.e. ``axis``-th value of its shape is zero).
"""
- return SplitAxis(indices_or_sections, axis)(x)
+ res = SplitAxis(indices_or_sections, axis)(x)
+ if force_tuple and isinstance(res, chainer.Variable):
+ res = (res,)
+ return res
| {"golden_diff": "diff --git a/chainer/functions/array/split_axis.py b/chainer/functions/array/split_axis.py\n--- a/chainer/functions/array/split_axis.py\n+++ b/chainer/functions/array/split_axis.py\n@@ -2,6 +2,7 @@\n \n import six\n \n+import chainer\n from chainer import cuda\n from chainer import function\n from chainer.utils import type_check\n@@ -58,7 +59,7 @@\n return xp.concatenate(gys, axis=self.axis),\n \n \n-def split_axis(x, indices_or_sections, axis):\n+def split_axis(x, indices_or_sections, axis, force_tuple=False):\n \"\"\"Splits given variables along an axis.\n \n Args:\n@@ -68,16 +69,23 @@\n If it is a 1-D array of sorted integers, it\n indicates the positions where the array is split.\n axis (int): Axis that the input array is split along.\n+ force_tuple (bool): If ``True``, this method returns a tuple even when\n+ the number of outputs is one.\n \n Returns:\n tuple or Variable: Tuple of :class:`~chainer.Variable` objects\n if the number of outputs is more than 1 or\n :class:`~chainer.Variable` otherwise.\n+ When ``force_tuple`` is ``True``, returned value is always a tuple\n+ regardless of the number of outputs.\n \n .. note::\n This function raises :class:`ValueError` if at least\n one of the outputs is split to zero-size\n- (i.e. `axis`-th value of its shape is zero).\n+ (i.e. ``axis``-th value of its shape is zero).\n \n \"\"\"\n- return SplitAxis(indices_or_sections, axis)(x)\n+ res = SplitAxis(indices_or_sections, axis)(x)\n+ if force_tuple and isinstance(res, chainer.Variable):\n+ res = (res,)\n+ return res\n", "issue": "SplitAxis should return a tuple with a single element\n`Function.__call__` returns a `Variable` when its forward function returns only one return value. When the number of return values depends on input values, such as `split_axis`, a user need to check the type of the return value of `Function`. A user needs to write a complicated source code.\n\nI have two design choice:\n- Only `SplitAxis` always returns a tuple of `Variable`s\n- Add keyword argument `force_tuple` to `Function.__call__`\n\n", "before_files": [{"content": "import collections\n\nimport six\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass SplitAxis(function.Function):\n\n \"\"\"Function that splits multiple arrays towards the specified axis.\"\"\"\n\n def __init__(self, indices_or_sections, axis):\n if not isinstance(indices_or_sections, (int, collections.Iterable)):\n raise TypeError('indices_or_sections must be integer or 1-D array')\n self.indices_or_sections = indices_or_sections\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n type_check.expect(in_types[0].ndim > self.axis)\n\n if isinstance(self.indices_or_sections, collections.Iterable):\n max_index = type_check.Variable(\n self.indices_or_sections[-1], 'max_index')\n type_check.expect(in_types[0].shape[self.axis] > max_index)\n else:\n sections = type_check.Variable(\n self.indices_or_sections, 'sections')\n type_check.expect(in_types[0].shape[self.axis] % sections == 0)\n\n def forward(self, x):\n if isinstance(self.indices_or_sections, collections.Iterable):\n cdimx = x[0].shape[self.axis]\n ind = list(self.indices_or_sections)\n ind.append(cdimx)\n prev_i = 0\n for i in ind:\n cdimy = max(0, min(i, cdimx) - prev_i)\n if cdimy == 0:\n raise ValueError('Not support if shape contains 0')\n prev_i = i\n xp = cuda.get_array_module(*x)\n return tuple(xp.split(x[0], self.indices_or_sections, self.axis))\n\n def backward(self, x, gys):\n xp = cuda.get_array_module(*x)\n if any(gy is None for gy in gys):\n gx = xp.zeros_like(x[0])\n gxs = xp.split(gx, self.indices_or_sections, self.axis)\n for gxi, gy in six.moves.zip(gxs, gys):\n if gy is None:\n continue\n gxi[:] = gy\n return gx,\n else:\n return xp.concatenate(gys, axis=self.axis),\n\n\ndef split_axis(x, indices_or_sections, axis):\n \"\"\"Splits given variables along an axis.\n\n Args:\n x (tuple of Variables): Variables to be split.\n indices_or_sections (int or 1-D array): If this argument is an integer,\n N, the array will be divided into N equal arrays along axis.\n If it is a 1-D array of sorted integers, it\n indicates the positions where the array is split.\n axis (int): Axis that the input array is split along.\n\n Returns:\n tuple or Variable: Tuple of :class:`~chainer.Variable` objects\n if the number of outputs is more than 1 or\n :class:`~chainer.Variable` otherwise.\n\n .. note::\n This function raises :class:`ValueError` if at least\n one of the outputs is split to zero-size\n (i.e. `axis`-th value of its shape is zero).\n\n \"\"\"\n return SplitAxis(indices_or_sections, axis)(x)\n", "path": "chainer/functions/array/split_axis.py"}]} | 1,513 | 421 |
gh_patches_debug_920 | rasdani/github-patches | git_diff | speechbrain__speechbrain-1127 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Broken docs for `speechbrain.alignment.ctc_segmentation`
Hi, thanks for maintaining such a wonderful library.
Looks like the documentation for `speechbrain.alignment.ctc_segmentation` is broken:
https://speechbrain.readthedocs.io/en/latest/API/speechbrain.alignment.ctc_segmentation.html
I guess this is caused by unneeded shebang, as shown in the following:
https://github.com/speechbrain/speechbrain/blob/develop/speechbrain/alignment/ctc_segmentation.py#L1-L2
Perhaps this could be related to #819 ?
</issue>
<code>
[start of docs/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 import os
14 import sys
15 import hyperpyyaml
16
17
18 sys.path.insert(0, os.path.abspath("../speechbrain"))
19
20
21 # -- Project information -----------------------------------------------------
22
23 project = "SpeechBrain"
24 copyright = "2021, SpeechBrain"
25 author = "SpeechBrain"
26
27 # The full version, including alpha/beta/rc tags
28 release = "0.5.0"
29
30
31 # -- General configuration ---------------------------------------------------
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = [
37 "sphinx.ext.autodoc",
38 "sphinx.ext.intersphinx",
39 "sphinx.ext.mathjax",
40 "sphinx.ext.viewcode",
41 "sphinx.ext.autosummary",
42 "sphinx.ext.napoleon",
43 "recommonmark",
44 ]
45
46
47 # Napoleon settings
48 napoleon_google_docstring = False
49 napoleon_numpy_docstring = True
50 napoleon_include_init_with_doc = True
51 napoleon_include_private_with_doc = False
52 napoleon_include_special_with_doc = True
53 napoleon_use_admonition_for_examples = False
54 napoleon_use_admonition_for_notes = True
55 napoleon_use_admonition_for_references = False
56 napoleon_use_ivar = False
57 napoleon_use_param = True
58 napoleon_use_rtype = True
59
60 # Intersphinx mapping:
61 intersphinx_mapping = {
62 "python": ("https://docs.python.org/", None),
63 "numpy": ("http://docs.scipy.org/doc/numpy/", None),
64 "torch": ("https://pytorch.org/docs/master/", None),
65 }
66
67 # AUTODOC:
68
69 autodoc_default_options = {}
70
71 # Autodoc mock extra dependencies:
72 autodoc_mock_imports = ["numba", "sklearn"]
73
74 # Order of API items:
75 autodoc_member_order = "bysource"
76 autodoc_default_options = {"member-order": "bysource"}
77
78 # Don't show inherited docstrings:
79 autodoc_inherit_docstrings = False
80
81 # Add any paths that contain templates here, relative to this directory.
82 templates_path = ["_templates"]
83
84 # List of patterns, relative to source directory, that match files and
85 # directories to ignore when looking for source files.
86 # This pattern also affects html_static_path and html_extra_path.
87 exclude_patterns = ["_apidoc_templates"]
88
89 # -- Better apidoc -----------------------------------------------------------
90
91
92 def run_apidoc(app):
93 """Generage API documentation"""
94 import better_apidoc
95
96 better_apidoc.APP = app
97
98 better_apidoc.main(
99 [
100 "better-apidoc",
101 "-t",
102 "_apidoc_templates",
103 "--force",
104 "--no-toc",
105 "--separate",
106 "-o",
107 "API",
108 os.path.dirname(hyperpyyaml.__file__),
109 ]
110 )
111 better_apidoc.main(
112 [
113 "better-apidoc",
114 "-t",
115 "_apidoc_templates",
116 "--force",
117 "--no-toc",
118 "--separate",
119 "-o",
120 "API",
121 os.path.join("../", "speechbrain"),
122 ]
123 )
124
125
126 # -- Options for HTML output -------------------------------------------------
127
128 # The theme to use for HTML and HTML Help pages. See the documentation for
129 # a list of builtin themes.
130 #
131 html_theme = "sphinx_rtd_theme"
132 # See https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html
133 # for rtd theme options
134 html_theme_options = {
135 # Toc options
136 "collapse_navigation": False,
137 "sticky_navigation": True,
138 "navigation_depth": 4,
139 "includehidden": True,
140 }
141
142
143 # Add any paths that contain custom static files (such as style sheets) here,
144 # relative to this directory. They are copied after the builtin static files,
145 # so a file named "default.css" will overwrite the builtin "default.css".
146 html_static_path = ["_static"]
147
148 source_suffix = {
149 ".rst": "restructuredtext",
150 ".txt": "markdown",
151 ".md": "markdown",
152 }
153
154
155 def setup(app):
156 app.connect("builder-inited", run_apidoc)
157
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -69,7 +69,7 @@
autodoc_default_options = {}
# Autodoc mock extra dependencies:
-autodoc_mock_imports = ["numba", "sklearn"]
+autodoc_mock_imports = ["sklearn"]
# Order of API items:
autodoc_member_order = "bysource"
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -69,7 +69,7 @@\n autodoc_default_options = {}\n \n # Autodoc mock extra dependencies:\n-autodoc_mock_imports = [\"numba\", \"sklearn\"]\n+autodoc_mock_imports = [\"sklearn\"]\n \n # Order of API items:\n autodoc_member_order = \"bysource\"\n", "issue": "Broken docs for `speechbrain.alignment.ctc_segmentation`\nHi, thanks for maintaining such a wonderful library.\r\n\r\nLooks like the documentation for `speechbrain.alignment.ctc_segmentation` is broken:\r\nhttps://speechbrain.readthedocs.io/en/latest/API/speechbrain.alignment.ctc_segmentation.html\r\n\r\nI guess this is caused by unneeded shebang, as shown in the following:\r\nhttps://github.com/speechbrain/speechbrain/blob/develop/speechbrain/alignment/ctc_segmentation.py#L1-L2\r\n\r\nPerhaps this could be related to #819 ?\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport hyperpyyaml\n\n\nsys.path.insert(0, os.path.abspath(\"../speechbrain\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"SpeechBrain\"\ncopyright = \"2021, SpeechBrain\"\nauthor = \"SpeechBrain\"\n\n# The full version, including alpha/beta/rc tags\nrelease = \"0.5.0\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.napoleon\",\n \"recommonmark\",\n]\n\n\n# Napoleon settings\nnapoleon_google_docstring = False\nnapoleon_numpy_docstring = True\nnapoleon_include_init_with_doc = True\nnapoleon_include_private_with_doc = False\nnapoleon_include_special_with_doc = True\nnapoleon_use_admonition_for_examples = False\nnapoleon_use_admonition_for_notes = True\nnapoleon_use_admonition_for_references = False\nnapoleon_use_ivar = False\nnapoleon_use_param = True\nnapoleon_use_rtype = True\n\n# Intersphinx mapping:\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/\", None),\n \"numpy\": (\"http://docs.scipy.org/doc/numpy/\", None),\n \"torch\": (\"https://pytorch.org/docs/master/\", None),\n}\n\n# AUTODOC:\n\nautodoc_default_options = {}\n\n# Autodoc mock extra dependencies:\nautodoc_mock_imports = [\"numba\", \"sklearn\"]\n\n# Order of API items:\nautodoc_member_order = \"bysource\"\nautodoc_default_options = {\"member-order\": \"bysource\"}\n\n# Don't show inherited docstrings:\nautodoc_inherit_docstrings = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_apidoc_templates\"]\n\n# -- Better apidoc -----------------------------------------------------------\n\n\ndef run_apidoc(app):\n \"\"\"Generage API documentation\"\"\"\n import better_apidoc\n\n better_apidoc.APP = app\n\n better_apidoc.main(\n [\n \"better-apidoc\",\n \"-t\",\n \"_apidoc_templates\",\n \"--force\",\n \"--no-toc\",\n \"--separate\",\n \"-o\",\n \"API\",\n os.path.dirname(hyperpyyaml.__file__),\n ]\n )\n better_apidoc.main(\n [\n \"better-apidoc\",\n \"-t\",\n \"_apidoc_templates\",\n \"--force\",\n \"--no-toc\",\n \"--separate\",\n \"-o\",\n \"API\",\n os.path.join(\"../\", \"speechbrain\"),\n ]\n )\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n# See https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html\n# for rtd theme options\nhtml_theme_options = {\n # Toc options\n \"collapse_navigation\": False,\n \"sticky_navigation\": True,\n \"navigation_depth\": 4,\n \"includehidden\": True,\n}\n\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".txt\": \"markdown\",\n \".md\": \"markdown\",\n}\n\n\ndef setup(app):\n app.connect(\"builder-inited\", run_apidoc)\n", "path": "docs/conf.py"}]} | 2,026 | 98 |
gh_patches_debug_6953 | rasdani/github-patches | git_diff | svthalia__concrexit-2526 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Vacancies without partner page on homepage lead to 404 errors
### Describe the bug
The vacancy view on the homepage leads to a 404 for vacancies without partner pages, as it tries to go to that page anyway.
### How to reproduce
Steps to reproduce the behaviour:
1. Create an inactive partner
2. Create a vacancy with them
3. Make it appear on the homepage
4. Click it
### Expected behaviour
Going to the vacancy in some way, e.g. on the general vacancy page.
### Screenshots
If applicable, add screenshots to help explain your problem.
### Additional context
I think I sort of fixed this for the main vacancy list, so we can probably share the code there. (Although I believe that that is currently broken too.)
</issue>
<code>
[start of website/partners/templatetags/frontpage_vacancies.py]
1 from django import template
2 from django.urls import reverse
3
4 from partners.models import Vacancy
5
6 register = template.Library()
7
8
9 @register.inclusion_tag("partners/frontpage_vacancies.html")
10 def render_frontpage_vacancies():
11 vacancies = []
12
13 for vacancy in Vacancy.objects.order_by("?")[:6]:
14 url = "{}#vacancy-{}".format(reverse("partners:vacancies"), vacancy.id)
15 if vacancy.partner:
16 url = "{}#vacancy-{}".format(vacancy.partner.get_absolute_url(), vacancy.id)
17
18 vacancies.append(
19 {
20 "title": vacancy.title,
21 "company_name": vacancy.get_company_name(),
22 "url": url,
23 }
24 )
25
26 return {"vacancies": vacancies}
27
[end of website/partners/templatetags/frontpage_vacancies.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/partners/templatetags/frontpage_vacancies.py b/website/partners/templatetags/frontpage_vacancies.py
--- a/website/partners/templatetags/frontpage_vacancies.py
+++ b/website/partners/templatetags/frontpage_vacancies.py
@@ -12,7 +12,7 @@
for vacancy in Vacancy.objects.order_by("?")[:6]:
url = "{}#vacancy-{}".format(reverse("partners:vacancies"), vacancy.id)
- if vacancy.partner:
+ if vacancy.partner and vacancy.partner.is_active:
url = "{}#vacancy-{}".format(vacancy.partner.get_absolute_url(), vacancy.id)
vacancies.append(
| {"golden_diff": "diff --git a/website/partners/templatetags/frontpage_vacancies.py b/website/partners/templatetags/frontpage_vacancies.py\n--- a/website/partners/templatetags/frontpage_vacancies.py\n+++ b/website/partners/templatetags/frontpage_vacancies.py\n@@ -12,7 +12,7 @@\n \n for vacancy in Vacancy.objects.order_by(\"?\")[:6]:\n url = \"{}#vacancy-{}\".format(reverse(\"partners:vacancies\"), vacancy.id)\n- if vacancy.partner:\n+ if vacancy.partner and vacancy.partner.is_active:\n url = \"{}#vacancy-{}\".format(vacancy.partner.get_absolute_url(), vacancy.id)\n \n vacancies.append(\n", "issue": "Vacancies without partner page on homepage lead to 404 errors\n### Describe the bug\r\nThe vacancy view on the homepage leads to a 404 for vacancies without partner pages, as it tries to go to that page anyway.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Create an inactive partner\r\n2. Create a vacancy with them\r\n3. Make it appear on the homepage\r\n4. Click it\r\n\r\n### Expected behaviour\r\nGoing to the vacancy in some way, e.g. on the general vacancy page.\r\n\r\n### Screenshots\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n### Additional context\r\nI think I sort of fixed this for the main vacancy list, so we can probably share the code there. (Although I believe that that is currently broken too.)\r\n\n", "before_files": [{"content": "from django import template\nfrom django.urls import reverse\n\nfrom partners.models import Vacancy\n\nregister = template.Library()\n\n\[email protected]_tag(\"partners/frontpage_vacancies.html\")\ndef render_frontpage_vacancies():\n vacancies = []\n\n for vacancy in Vacancy.objects.order_by(\"?\")[:6]:\n url = \"{}#vacancy-{}\".format(reverse(\"partners:vacancies\"), vacancy.id)\n if vacancy.partner:\n url = \"{}#vacancy-{}\".format(vacancy.partner.get_absolute_url(), vacancy.id)\n\n vacancies.append(\n {\n \"title\": vacancy.title,\n \"company_name\": vacancy.get_company_name(),\n \"url\": url,\n }\n )\n\n return {\"vacancies\": vacancies}\n", "path": "website/partners/templatetags/frontpage_vacancies.py"}]} | 920 | 163 |
gh_patches_debug_38534 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2338 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider sunoco is broken
During the global build at 2021-07-21-14-42-39, spider **sunoco** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/logs/sunoco.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/sunoco.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/sunoco.geojson))
</issue>
<code>
[start of locations/spiders/sunoco.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4
5 from locations.items import GeojsonPointItem
6 from locations.hours import OpeningHours
7
8
9 class SunocoSpider(scrapy.Spider):
10 name = "sunoco"
11 item_attributes = {'brand': "Sunoco", 'brand_wikidata': "Q1423218"}
12 allowed_domains = ["sunoco.com"]
13
14 start_urls = ['https://www.sunoco.com/js/locations.json']
15
16 def parse(self, response):
17 for location in json.loads(response.body_as_unicode()):
18 opening_hours = OpeningHours()
19
20 for key in [
21 'Hrs of Operation Mon-Sat Open',
22 'Hrs of Operation Mon-Sat Close',
23 'Hrs of Operation Sun Open',
24 'Hrs of Operation Sun Close'
25 ]:
26 if location[key] >= 2400:
27 location[key] -= 2400
28
29 for day in ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']:
30 opening_hours.add_range(day=day,
31 open_time=f"{location['Hrs of Operation Mon-Sat Open']:04d}",
32 close_time=f"{location['Hrs of Operation Mon-Sat Close']:04d}",
33 time_format='%H%M')
34
35 opening_hours.add_range(day='Su',
36 open_time=f"{location['Hrs of Operation Sun Open']:04d}",
37 close_time=f"{location['Hrs of Operation Sun Close']:04d}",
38 time_format='%H%M')
39
40 yield GeojsonPointItem(
41 ref=location['Facility ID'],
42 lon=location['Longitude'],
43 lat=location['Latitude'],
44 # name as shown on the Sunoco site
45 name=f"Sunoco #{location['Facility ID']}",
46 addr_full=location['Address'],
47 city=location['City'],
48 state=location['State'],
49 postcode=location['Zip'],
50 country='US',
51 phone=location['Phone'],
52 opening_hours=opening_hours.as_opening_hours(),
53 extras={
54 'amenity:fuel': True,
55 'atm': int(location['ATM'] or 0) == 1,
56 'car_wash': int(location['Car Wash'] or 0) == 1,
57 'fuel:diesel': int(location['Diesel'] or 0) == 1,
58 'fuel:kerosene': int(location['Kerosene'] or 0) == 1
59 }
60 )
61
[end of locations/spiders/sunoco.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/sunoco.py b/locations/spiders/sunoco.py
--- a/locations/spiders/sunoco.py
+++ b/locations/spiders/sunoco.py
@@ -17,44 +17,34 @@
for location in json.loads(response.body_as_unicode()):
opening_hours = OpeningHours()
- for key in [
- 'Hrs of Operation Mon-Sat Open',
- 'Hrs of Operation Mon-Sat Close',
- 'Hrs of Operation Sun Open',
- 'Hrs of Operation Sun Close'
- ]:
- if location[key] >= 2400:
- location[key] -= 2400
-
- for day in ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']:
- opening_hours.add_range(day=day,
- open_time=f"{location['Hrs of Operation Mon-Sat Open']:04d}",
- close_time=f"{location['Hrs of Operation Mon-Sat Close']:04d}",
- time_format='%H%M')
-
- opening_hours.add_range(day='Su',
- open_time=f"{location['Hrs of Operation Sun Open']:04d}",
- close_time=f"{location['Hrs of Operation Sun Close']:04d}",
- time_format='%H%M')
+ for key, val in location.items():
+ if not key.endswith('_Hours'):
+ continue
+ day = key[:2].capitalize()
+ if val == '24 hours':
+ open_time = close_time = '12 AM'
+ else:
+ open_time, close_time = val.split(' to ')
+ opening_hours.add_range(day, open_time, close_time, '%I %p')
yield GeojsonPointItem(
- ref=location['Facility ID'],
+ ref=location['Store_ID'],
lon=location['Longitude'],
lat=location['Latitude'],
# name as shown on the Sunoco site
- name=f"Sunoco #{location['Facility ID']}",
- addr_full=location['Address'],
+ name=f"Sunoco #{location['Store_ID']}",
+ addr_full=location['Street_Address'],
city=location['City'],
state=location['State'],
- postcode=location['Zip'],
+ postcode=location['Postalcode'],
country='US',
phone=location['Phone'],
opening_hours=opening_hours.as_opening_hours(),
extras={
'amenity:fuel': True,
- 'atm': int(location['ATM'] or 0) == 1,
- 'car_wash': int(location['Car Wash'] or 0) == 1,
- 'fuel:diesel': int(location['Diesel'] or 0) == 1,
- 'fuel:kerosene': int(location['Kerosene'] or 0) == 1
+ 'atm': location['ATM'] == 'Y',
+ 'car_wash': location['CarWash'],
+ 'fuel:diesel': location['HasDiesel'] == 'Y',
+ 'fuel:kerosene': location['HasKero'] == 'Y'
}
)
| {"golden_diff": "diff --git a/locations/spiders/sunoco.py b/locations/spiders/sunoco.py\n--- a/locations/spiders/sunoco.py\n+++ b/locations/spiders/sunoco.py\n@@ -17,44 +17,34 @@\n for location in json.loads(response.body_as_unicode()):\n opening_hours = OpeningHours()\n \n- for key in [\n- 'Hrs of Operation Mon-Sat Open',\n- 'Hrs of Operation Mon-Sat Close',\n- 'Hrs of Operation Sun Open',\n- 'Hrs of Operation Sun Close'\n- ]:\n- if location[key] >= 2400:\n- location[key] -= 2400\n-\n- for day in ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']:\n- opening_hours.add_range(day=day,\n- open_time=f\"{location['Hrs of Operation Mon-Sat Open']:04d}\",\n- close_time=f\"{location['Hrs of Operation Mon-Sat Close']:04d}\",\n- time_format='%H%M')\n-\n- opening_hours.add_range(day='Su',\n- open_time=f\"{location['Hrs of Operation Sun Open']:04d}\",\n- close_time=f\"{location['Hrs of Operation Sun Close']:04d}\",\n- time_format='%H%M')\n+ for key, val in location.items():\n+ if not key.endswith('_Hours'):\n+ continue\n+ day = key[:2].capitalize()\n+ if val == '24 hours':\n+ open_time = close_time = '12 AM'\n+ else:\n+ open_time, close_time = val.split(' to ')\n+ opening_hours.add_range(day, open_time, close_time, '%I %p')\n \n yield GeojsonPointItem(\n- ref=location['Facility ID'],\n+ ref=location['Store_ID'],\n lon=location['Longitude'],\n lat=location['Latitude'],\n # name as shown on the Sunoco site\n- name=f\"Sunoco #{location['Facility ID']}\",\n- addr_full=location['Address'],\n+ name=f\"Sunoco #{location['Store_ID']}\",\n+ addr_full=location['Street_Address'],\n city=location['City'],\n state=location['State'],\n- postcode=location['Zip'],\n+ postcode=location['Postalcode'],\n country='US',\n phone=location['Phone'],\n opening_hours=opening_hours.as_opening_hours(),\n extras={\n 'amenity:fuel': True,\n- 'atm': int(location['ATM'] or 0) == 1,\n- 'car_wash': int(location['Car Wash'] or 0) == 1,\n- 'fuel:diesel': int(location['Diesel'] or 0) == 1,\n- 'fuel:kerosene': int(location['Kerosene'] or 0) == 1\n+ 'atm': location['ATM'] == 'Y',\n+ 'car_wash': location['CarWash'],\n+ 'fuel:diesel': location['HasDiesel'] == 'Y',\n+ 'fuel:kerosene': location['HasKero'] == 'Y'\n }\n )\n", "issue": "Spider sunoco is broken\nDuring the global build at 2021-07-21-14-42-39, spider **sunoco** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/logs/sunoco.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/sunoco.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/sunoco.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass SunocoSpider(scrapy.Spider):\n name = \"sunoco\"\n item_attributes = {'brand': \"Sunoco\", 'brand_wikidata': \"Q1423218\"}\n allowed_domains = [\"sunoco.com\"]\n\n start_urls = ['https://www.sunoco.com/js/locations.json']\n\n def parse(self, response):\n for location in json.loads(response.body_as_unicode()):\n opening_hours = OpeningHours()\n\n for key in [\n 'Hrs of Operation Mon-Sat Open',\n 'Hrs of Operation Mon-Sat Close',\n 'Hrs of Operation Sun Open',\n 'Hrs of Operation Sun Close'\n ]:\n if location[key] >= 2400:\n location[key] -= 2400\n\n for day in ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']:\n opening_hours.add_range(day=day,\n open_time=f\"{location['Hrs of Operation Mon-Sat Open']:04d}\",\n close_time=f\"{location['Hrs of Operation Mon-Sat Close']:04d}\",\n time_format='%H%M')\n\n opening_hours.add_range(day='Su',\n open_time=f\"{location['Hrs of Operation Sun Open']:04d}\",\n close_time=f\"{location['Hrs of Operation Sun Close']:04d}\",\n time_format='%H%M')\n\n yield GeojsonPointItem(\n ref=location['Facility ID'],\n lon=location['Longitude'],\n lat=location['Latitude'],\n # name as shown on the Sunoco site\n name=f\"Sunoco #{location['Facility ID']}\",\n addr_full=location['Address'],\n city=location['City'],\n state=location['State'],\n postcode=location['Zip'],\n country='US',\n phone=location['Phone'],\n opening_hours=opening_hours.as_opening_hours(),\n extras={\n 'amenity:fuel': True,\n 'atm': int(location['ATM'] or 0) == 1,\n 'car_wash': int(location['Car Wash'] or 0) == 1,\n 'fuel:diesel': int(location['Diesel'] or 0) == 1,\n 'fuel:kerosene': int(location['Kerosene'] or 0) == 1\n }\n )\n", "path": "locations/spiders/sunoco.py"}]} | 1,373 | 708 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.