problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_1294 | rasdani/github-patches | git_diff | Nitrate__Nitrate-564 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove Django 2.0
Django 2.0 is not supported and marked as insecure. Refer to https://docs.djangoproject.com/en/2.0/
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 from setuptools import setup, find_packages
4
5
6 with open('VERSION.txt', 'r') as f:
7 pkg_version = f.read().strip()
8
9
10 def get_long_description():
11 with open('README.rst', 'r') as f:
12 return f.read()
13
14
15 install_requires = [
16 'beautifulsoup4 >= 4.1.1',
17 'django >= 2.0,<3.0',
18 'django-contrib-comments == 1.9.1',
19 'django-tinymce == 2.7.0',
20 'django-uuslug == 1.1.8',
21 'html2text',
22 'odfpy >= 0.9.6',
23 'python-bugzilla',
24 'xmltodict',
25 'kobo == 0.9.0'
26 ]
27
28 extras_require = {
29 'mysql': ['mysqlclient >= 1.2.3'],
30 'pgsql': ['psycopg2 == 2.7.5'],
31
32 # Required for tcms.auth.backends.KerberosBackend
33 'krbauth': [
34 'kerberos == 1.2.5'
35 ],
36
37 # Packages for building documentation
38 'docs': [
39 'Sphinx >= 1.1.2',
40 'sphinx_rtd_theme',
41 ],
42
43 # Necessary packages for running tests
44 'tests': [
45 'beautifulsoup4',
46 'coverage',
47 'factory_boy',
48 'flake8',
49 'pytest',
50 'pytest-cov',
51 'pytest-django',
52 ],
53
54 # Contain tools that assists the development
55 'devtools': [
56 'django-debug-toolbar',
57 'tox',
58 'django-extensions',
59 'pygraphviz',
60 ],
61
62 # Required packages required to run async tasks
63 'async': [
64 'celery == 4.2.0',
65 ],
66
67 'multiauth': [
68 'social-auth-app-django == 3.1.0',
69 ]
70 }
71
72 setup(
73 name='nitrate-tcms',
74 version=pkg_version,
75 description='A full-featured Test Case Management System',
76 long_description=get_long_description(),
77 author='Nitrate Team',
78 maintainer='Chenxiong Qi',
79 maintainer_email='[email protected]',
80 url='https://github.com/Nitrate/Nitrate/',
81 license='GPLv2+',
82 keywords='test case',
83 install_requires=install_requires,
84 extras_require=extras_require,
85 python_requires='>=3.6',
86 package_dir={'': 'src'},
87 packages=find_packages('src', exclude=['test*']),
88 include_package_data=True,
89 zip_safe=False,
90 classifiers=[
91 'Framework :: Django',
92 'Framework :: Django :: 2.0',
93 'Framework :: Django :: 2.1',
94 'Framework :: Django :: 2.2',
95 'Intended Audience :: Developers',
96 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
97 'Programming Language :: Python :: 3',
98 'Programming Language :: Python :: 3.6',
99 'Programming Language :: Python :: 3.7',
100 'Programming Language :: Python :: 3 :: Only',
101 'Topic :: Software Development :: Quality Assurance',
102 'Topic :: Software Development :: Testing',
103 ],
104 project_urls={
105 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',
106 'Source Code': 'https://github.com/Nitrate/Nitrate',
107 'Documentation': 'https://nitrate.readthedocs.io/',
108 },
109 )
110
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,7 @@
install_requires = [
'beautifulsoup4 >= 4.1.1',
- 'django >= 2.0,<3.0',
+ 'django >= 2.1,<3.0',
'django-contrib-comments == 1.9.1',
'django-tinymce == 2.7.0',
'django-uuslug == 1.1.8',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,7 +14,7 @@\n \n install_requires = [\n 'beautifulsoup4 >= 4.1.1',\n- 'django >= 2.0,<3.0',\n+ 'django >= 2.1,<3.0',\n 'django-contrib-comments == 1.9.1',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n", "issue": "Remove Django 2.0\nDjango 2.0 is not supported and marked as insecure. Refer to https://docs.djangoproject.com/en/2.0/\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.9.1',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='nitrate-tcms',\n version=pkg_version,\n description='A full-featured Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}]} | 1,568 | 120 |
gh_patches_debug_12358 | rasdani/github-patches | git_diff | fossasia__open-event-server-6307 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong event invoice status fields in db schema
**Describe the bug**
The values allowed for invoices are paid, due &incoming [schema choices]. They are currently equal to those of orders.
</issue>
<code>
[start of app/api/schema/event_invoices.py]
1 from marshmallow import validate as validate
2 from marshmallow_jsonapi import fields
3 from marshmallow_jsonapi.flask import Relationship
4
5 from app.api.helpers.static import PAYMENT_COUNTRIES
6 from app.api.helpers.utilities import dasherize
7 from app.api.schema.base import SoftDeletionSchema
8 from utils.common import use_defaults
9
10
11 @use_defaults()
12 class EventInvoiceSchema(SoftDeletionSchema):
13 """
14 Event Invoice API Schema based on event invoice model
15 """
16 class Meta:
17 type_ = 'event-invoice'
18 self_view = 'v1.event_invoice_detail'
19 self_view_kwargs = {'id': '<id>'}
20 inflect = dasherize
21
22 id = fields.Str(dump_only=True)
23 identifier = fields.Str(allow_none=True)
24 amount = fields.Float(validate=lambda n: n >= 0, allow_none=True)
25 address = fields.Str(allow_none=True)
26 city = fields.Str(allow_none=True)
27 state = fields.Str(allow_none=True)
28 country = fields.Str(validate=validate.OneOf(choices=PAYMENT_COUNTRIES), allow_none=True)
29 zipcode = fields.Str(allow_none=True)
30 created_at = fields.DateTime(allow_none=True)
31 completed_at = fields.DateTime(default=None)
32 transaction_id = fields.Str(allow_none=True)
33 paid_via = fields.Str(validate=validate.OneOf(
34 choices=["free", "stripe", "paypal", "transfer", "onsite", "cheque"]), allow_none=True)
35 payment_mode = fields.Str(allow_none=True)
36 brand = fields.Str(allow_none=True)
37 exp_month = fields.Integer(validate=lambda n: 0 <= n <= 12, allow_none=True)
38 exp_year = fields.Integer(validate=lambda n: n >= 2015, allow_none=True)
39 last4 = fields.Str(allow_none=True)
40 stripe_token = fields.Str(allow_none=True)
41 paypal_token = fields.Str(allow_none=True)
42 status = fields.Str(validate=validate.OneOf(
43 choices=["expired", "deleted", "initialized" "completed", "placed", "pending", "cancelled"]), allow_none=True)
44 invoice_pdf_url = fields.Url(allow_none=True)
45 user = Relationship(attribute='user',
46 self_view='v1.event_invoice_user',
47 self_view_kwargs={'id': '<id>'},
48 related_view='v1.user_detail',
49 related_view_kwargs={'event_invoice_id': '<id>'},
50 schema='UserSchemaPublic',
51 type_='user')
52 order = Relationship(attribute='order',
53 self_view='v1.event_invoice_order',
54 self_view_kwargs={'id': '<id>'},
55 related_view='v1.order_detail',
56 related_view_kwargs={'id': '<id>'},
57 schema='OrderSchema',
58 type_='order')
59 event = Relationship(attribute='event',
60 self_view='v1.event_invoice_event',
61 self_view_kwargs={'id': '<id>'},
62 related_view='v1.event_detail',
63 related_view_kwargs={'event_invoice_id': '<id>'},
64 schema='EventSchemaPublic',
65 type_='event')
66 discount_code = Relationship(attribute='discount_code',
67 self_view='v1.event_invoice_discount_code',
68 self_view_kwargs={'id': '<id>'},
69 related_view='v1.discount_code_detail',
70 related_view_kwargs={'event_invoice_id': '<id>'},
71 schema='DiscountCodeSchemaPublic',
72 type_='discount-code')
73
[end of app/api/schema/event_invoices.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/schema/event_invoices.py b/app/api/schema/event_invoices.py
--- a/app/api/schema/event_invoices.py
+++ b/app/api/schema/event_invoices.py
@@ -39,8 +39,7 @@
last4 = fields.Str(allow_none=True)
stripe_token = fields.Str(allow_none=True)
paypal_token = fields.Str(allow_none=True)
- status = fields.Str(validate=validate.OneOf(
- choices=["expired", "deleted", "initialized" "completed", "placed", "pending", "cancelled"]), allow_none=True)
+ status = fields.Str(validate=validate.OneOf(choices=["paid", "due"]), allow_none=True)
invoice_pdf_url = fields.Url(allow_none=True)
user = Relationship(attribute='user',
self_view='v1.event_invoice_user',
| {"golden_diff": "diff --git a/app/api/schema/event_invoices.py b/app/api/schema/event_invoices.py\n--- a/app/api/schema/event_invoices.py\n+++ b/app/api/schema/event_invoices.py\n@@ -39,8 +39,7 @@\n last4 = fields.Str(allow_none=True)\n stripe_token = fields.Str(allow_none=True)\n paypal_token = fields.Str(allow_none=True)\n- status = fields.Str(validate=validate.OneOf(\n- choices=[\"expired\", \"deleted\", \"initialized\" \"completed\", \"placed\", \"pending\", \"cancelled\"]), allow_none=True)\n+ status = fields.Str(validate=validate.OneOf(choices=[\"paid\", \"due\"]), allow_none=True)\n invoice_pdf_url = fields.Url(allow_none=True)\n user = Relationship(attribute='user',\n self_view='v1.event_invoice_user',\n", "issue": "Wrong event invoice status fields in db schema\n**Describe the bug**\r\nThe values allowed for invoices are paid, due &incoming [schema choices]. They are currently equal to those of orders.\r\n\r\n\n", "before_files": [{"content": "from marshmallow import validate as validate\nfrom marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Relationship\n\nfrom app.api.helpers.static import PAYMENT_COUNTRIES\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.schema.base import SoftDeletionSchema\nfrom utils.common import use_defaults\n\n\n@use_defaults()\nclass EventInvoiceSchema(SoftDeletionSchema):\n \"\"\"\n Event Invoice API Schema based on event invoice model\n \"\"\"\n class Meta:\n type_ = 'event-invoice'\n self_view = 'v1.event_invoice_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n identifier = fields.Str(allow_none=True)\n amount = fields.Float(validate=lambda n: n >= 0, allow_none=True)\n address = fields.Str(allow_none=True)\n city = fields.Str(allow_none=True)\n state = fields.Str(allow_none=True)\n country = fields.Str(validate=validate.OneOf(choices=PAYMENT_COUNTRIES), allow_none=True)\n zipcode = fields.Str(allow_none=True)\n created_at = fields.DateTime(allow_none=True)\n completed_at = fields.DateTime(default=None)\n transaction_id = fields.Str(allow_none=True)\n paid_via = fields.Str(validate=validate.OneOf(\n choices=[\"free\", \"stripe\", \"paypal\", \"transfer\", \"onsite\", \"cheque\"]), allow_none=True)\n payment_mode = fields.Str(allow_none=True)\n brand = fields.Str(allow_none=True)\n exp_month = fields.Integer(validate=lambda n: 0 <= n <= 12, allow_none=True)\n exp_year = fields.Integer(validate=lambda n: n >= 2015, allow_none=True)\n last4 = fields.Str(allow_none=True)\n stripe_token = fields.Str(allow_none=True)\n paypal_token = fields.Str(allow_none=True)\n status = fields.Str(validate=validate.OneOf(\n choices=[\"expired\", \"deleted\", \"initialized\" \"completed\", \"placed\", \"pending\", \"cancelled\"]), allow_none=True)\n invoice_pdf_url = fields.Url(allow_none=True)\n user = Relationship(attribute='user',\n self_view='v1.event_invoice_user',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.user_detail',\n related_view_kwargs={'event_invoice_id': '<id>'},\n schema='UserSchemaPublic',\n type_='user')\n order = Relationship(attribute='order',\n self_view='v1.event_invoice_order',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.order_detail',\n related_view_kwargs={'id': '<id>'},\n schema='OrderSchema',\n type_='order')\n event = Relationship(attribute='event',\n self_view='v1.event_invoice_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'event_invoice_id': '<id>'},\n schema='EventSchemaPublic',\n type_='event')\n discount_code = Relationship(attribute='discount_code',\n self_view='v1.event_invoice_discount_code',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.discount_code_detail',\n related_view_kwargs={'event_invoice_id': '<id>'},\n schema='DiscountCodeSchemaPublic',\n type_='discount-code')\n", "path": "app/api/schema/event_invoices.py"}]} | 1,433 | 178 |
gh_patches_debug_24982 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-1923 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Instructions at https://parl.ai yield: Parse Error: unrecognized arguments: -n 100
From the examples section at https://parl.ai/, when running:
```bash
python examples/display_data.py -t babi:task1k:1,squad -n 100
```
I get the error:
```
Parse Error: unrecognized arguments: -n 100
```
</issue>
<code>
[start of parlai/scripts/display_model.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 """Basic example which iterates through the tasks specified and runs the given
7 model on them.
8
9 Examples
10 --------
11
12 .. code-block:: shell
13
14 python examples/display_model.py -t babi:task1k:1 -m "repeat_label"
15 python examples/display_model.py -t "#MovieDD-Reddit" -m "ir_baseline" -mp "-lp 0.5" -dt test
16 """ # noqa: E501
17
18 from parlai.core.params import ParlaiParser
19 from parlai.core.agents import create_agent
20 from parlai.core.worlds import create_task
21
22 import random
23
24
25 def setup_args():
26 parser = ParlaiParser(True, True, 'Display model predictions.')
27 parser.add_argument('-n', '--num-examples', default=10)
28 parser.add_argument('--display-ignore-fields', type=str, default='')
29 # by default we want to display info about the validation set
30 parser.set_defaults(datatype='valid')
31 return parser
32
33
34 def display_model(opt):
35 random.seed(42)
36
37 # Create model and assign it to the specified task
38 agent = create_agent(opt)
39 world = create_task(opt, agent)
40
41 # Show some example dialogs.
42 with world:
43 for _k in range(int(opt['num_examples'])):
44 world.parley()
45 print(world.display() + "\n~~")
46 if world.epoch_done():
47 print("EPOCH DONE")
48 break
49
50
51 if __name__ == '__main__':
52 # Get command line arguments
53 parser = setup_args()
54 opt = parser.parse_args()
55 display_model(opt)
56
[end of parlai/scripts/display_model.py]
[start of parlai/scripts/display_data.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 """Basic example which iterates through the tasks specified and prints them out.
7 Used for verification of data loading and iteration.
8
9 For example, to make sure that bAbI task 1 (1k exs) loads one can run and to
10 see a few of them:
11
12 Examples
13 --------
14
15 .. code-block:: shell
16
17 python display_data.py -t babi:task1k:1
18 """
19
20 from parlai.core.params import ParlaiParser
21 from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
22 from parlai.core.worlds import create_task
23
24 import random
25
26
27 def setup_args(parser=None):
28 if parser is None:
29 parser = ParlaiParser(True, True, 'Display data from a task')
30 parser.add_pytorch_datateacher_args()
31 # Get command line arguments
32 parser.add_argument('-ne', '--num_examples', type=int, default=10)
33 parser.add_argument('-mdl', '--max_display_len', type=int, default=1000)
34 parser.add_argument('--display_ignore_fields', type=str, default='agent_reply')
35 parser.set_defaults(datatype='train:stream')
36 return parser
37
38
39 def display_data(opt):
40 # create repeat label agent and assign it to the specified task
41 agent = RepeatLabelAgent(opt)
42 world = create_task(opt, agent)
43
44 # Show some example dialogs.
45 for _ in range(opt['num_examples']):
46 world.parley()
47
48 # NOTE: If you want to look at the data from here rather than calling
49 # world.display() you could access world.acts[0] directly
50 print(world.display() + '\n~~')
51
52 if world.epoch_done():
53 print('EPOCH DONE')
54 break
55
56 try:
57 # print dataset size if available
58 print(
59 '[ loaded {} episodes with a total of {} examples ]'.format(
60 world.num_episodes(), world.num_examples()
61 )
62 )
63 except Exception:
64 pass
65
66
67 if __name__ == '__main__':
68 random.seed(42)
69
70 # Get command line arguments
71 parser = setup_args()
72 opt = parser.parse_args()
73 display_data(opt)
74
[end of parlai/scripts/display_data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parlai/scripts/display_data.py b/parlai/scripts/display_data.py
--- a/parlai/scripts/display_data.py
+++ b/parlai/scripts/display_data.py
@@ -29,9 +29,9 @@
parser = ParlaiParser(True, True, 'Display data from a task')
parser.add_pytorch_datateacher_args()
# Get command line arguments
- parser.add_argument('-ne', '--num_examples', type=int, default=10)
- parser.add_argument('-mdl', '--max_display_len', type=int, default=1000)
- parser.add_argument('--display_ignore_fields', type=str, default='agent_reply')
+ parser.add_argument('-n', '-ne', '--num-examples', type=int, default=10)
+ parser.add_argument('-mdl', '--max-display-len', type=int, default=1000)
+ parser.add_argument('--display-ignore-fields', type=str, default='agent_reply')
parser.set_defaults(datatype='train:stream')
return parser
diff --git a/parlai/scripts/display_model.py b/parlai/scripts/display_model.py
--- a/parlai/scripts/display_model.py
+++ b/parlai/scripts/display_model.py
@@ -24,7 +24,7 @@
def setup_args():
parser = ParlaiParser(True, True, 'Display model predictions.')
- parser.add_argument('-n', '--num-examples', default=10)
+ parser.add_argument('-n', '-ne', '--num-examples', default=10)
parser.add_argument('--display-ignore-fields', type=str, default='')
# by default we want to display info about the validation set
parser.set_defaults(datatype='valid')
| {"golden_diff": "diff --git a/parlai/scripts/display_data.py b/parlai/scripts/display_data.py\n--- a/parlai/scripts/display_data.py\n+++ b/parlai/scripts/display_data.py\n@@ -29,9 +29,9 @@\n parser = ParlaiParser(True, True, 'Display data from a task')\n parser.add_pytorch_datateacher_args()\n # Get command line arguments\n- parser.add_argument('-ne', '--num_examples', type=int, default=10)\n- parser.add_argument('-mdl', '--max_display_len', type=int, default=1000)\n- parser.add_argument('--display_ignore_fields', type=str, default='agent_reply')\n+ parser.add_argument('-n', '-ne', '--num-examples', type=int, default=10)\n+ parser.add_argument('-mdl', '--max-display-len', type=int, default=1000)\n+ parser.add_argument('--display-ignore-fields', type=str, default='agent_reply')\n parser.set_defaults(datatype='train:stream')\n return parser\n \ndiff --git a/parlai/scripts/display_model.py b/parlai/scripts/display_model.py\n--- a/parlai/scripts/display_model.py\n+++ b/parlai/scripts/display_model.py\n@@ -24,7 +24,7 @@\n \n def setup_args():\n parser = ParlaiParser(True, True, 'Display model predictions.')\n- parser.add_argument('-n', '--num-examples', default=10)\n+ parser.add_argument('-n', '-ne', '--num-examples', default=10)\n parser.add_argument('--display-ignore-fields', type=str, default='')\n # by default we want to display info about the validation set\n parser.set_defaults(datatype='valid')\n", "issue": "Instructions at https://parl.ai yield: Parse Error: unrecognized arguments: -n 100\nFrom the examples section at https://parl.ai/, when running:\r\n\r\n```bash\r\npython examples/display_data.py -t babi:task1k:1,squad -n 100\r\n```\r\n\r\nI get the error:\r\n```\r\nParse Error: unrecognized arguments: -n 100\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Basic example which iterates through the tasks specified and runs the given\nmodel on them.\n\nExamples\n--------\n\n.. code-block:: shell\n\n python examples/display_model.py -t babi:task1k:1 -m \"repeat_label\"\n python examples/display_model.py -t \"#MovieDD-Reddit\" -m \"ir_baseline\" -mp \"-lp 0.5\" -dt test\n\"\"\" # noqa: E501\n\nfrom parlai.core.params import ParlaiParser\nfrom parlai.core.agents import create_agent\nfrom parlai.core.worlds import create_task\n\nimport random\n\n\ndef setup_args():\n parser = ParlaiParser(True, True, 'Display model predictions.')\n parser.add_argument('-n', '--num-examples', default=10)\n parser.add_argument('--display-ignore-fields', type=str, default='')\n # by default we want to display info about the validation set\n parser.set_defaults(datatype='valid')\n return parser\n\n\ndef display_model(opt):\n random.seed(42)\n\n # Create model and assign it to the specified task\n agent = create_agent(opt)\n world = create_task(opt, agent)\n\n # Show some example dialogs.\n with world:\n for _k in range(int(opt['num_examples'])):\n world.parley()\n print(world.display() + \"\\n~~\")\n if world.epoch_done():\n print(\"EPOCH DONE\")\n break\n\n\nif __name__ == '__main__':\n # Get command line arguments\n parser = setup_args()\n opt = parser.parse_args()\n display_model(opt)\n", "path": "parlai/scripts/display_model.py"}, {"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Basic example which iterates through the tasks specified and prints them out.\nUsed for verification of data loading and iteration.\n\nFor example, to make sure that bAbI task 1 (1k exs) loads one can run and to\nsee a few of them:\n\nExamples\n--------\n\n.. code-block:: shell\n\n python display_data.py -t babi:task1k:1\n\"\"\"\n\nfrom parlai.core.params import ParlaiParser\nfrom parlai.agents.repeat_label.repeat_label import RepeatLabelAgent\nfrom parlai.core.worlds import create_task\n\nimport random\n\n\ndef setup_args(parser=None):\n if parser is None:\n parser = ParlaiParser(True, True, 'Display data from a task')\n parser.add_pytorch_datateacher_args()\n # Get command line arguments\n parser.add_argument('-ne', '--num_examples', type=int, default=10)\n parser.add_argument('-mdl', '--max_display_len', type=int, default=1000)\n parser.add_argument('--display_ignore_fields', type=str, default='agent_reply')\n parser.set_defaults(datatype='train:stream')\n return parser\n\n\ndef display_data(opt):\n # create repeat label agent and assign it to the specified task\n agent = RepeatLabelAgent(opt)\n world = create_task(opt, agent)\n\n # Show some example dialogs.\n for _ in range(opt['num_examples']):\n world.parley()\n\n # NOTE: If you want to look at the data from here rather than calling\n # world.display() you could access world.acts[0] directly\n print(world.display() + '\\n~~')\n\n if world.epoch_done():\n print('EPOCH DONE')\n break\n\n try:\n # print dataset size if available\n print(\n '[ loaded {} episodes with a total of {} examples ]'.format(\n world.num_episodes(), world.num_examples()\n )\n )\n except Exception:\n pass\n\n\nif __name__ == '__main__':\n random.seed(42)\n\n # Get command line arguments\n parser = setup_args()\n opt = parser.parse_args()\n display_data(opt)\n", "path": "parlai/scripts/display_data.py"}]} | 1,788 | 384 |
gh_patches_debug_8666 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-570 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove unnecessary Flask app
The current auto instrumentation example includes an unnecessary Flask app in its client, remove that.
</issue>
<code>
[start of docs/examples/auto-instrumentation/client.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from sys import argv
16
17 from flask import Flask
18 from requests import get
19
20 from opentelemetry import propagators, trace
21 from opentelemetry.sdk.trace import TracerProvider
22 from opentelemetry.sdk.trace.export import (
23 ConsoleSpanExporter,
24 SimpleExportSpanProcessor,
25 )
26
27 app = Flask(__name__)
28
29 trace.set_tracer_provider(TracerProvider())
30 tracer = trace.get_tracer_provider().get_tracer(__name__)
31
32 trace.get_tracer_provider().add_span_processor(
33 SimpleExportSpanProcessor(ConsoleSpanExporter())
34 )
35
36
37 assert len(argv) == 2
38
39 with tracer.start_as_current_span("client"):
40
41 with tracer.start_as_current_span("client-server"):
42 headers = {}
43 propagators.inject(dict.__setitem__, headers)
44 requested = get(
45 "http://localhost:8082/server_request",
46 params={"param": argv[1]},
47 headers=headers,
48 )
49
50 assert requested.status_code == 200
51
[end of docs/examples/auto-instrumentation/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/auto-instrumentation/client.py b/docs/examples/auto-instrumentation/client.py
--- a/docs/examples/auto-instrumentation/client.py
+++ b/docs/examples/auto-instrumentation/client.py
@@ -14,7 +14,6 @@
from sys import argv
-from flask import Flask
from requests import get
from opentelemetry import propagators, trace
@@ -24,8 +23,6 @@
SimpleExportSpanProcessor,
)
-app = Flask(__name__)
-
trace.set_tracer_provider(TracerProvider())
tracer = trace.get_tracer_provider().get_tracer(__name__)
| {"golden_diff": "diff --git a/docs/examples/auto-instrumentation/client.py b/docs/examples/auto-instrumentation/client.py\n--- a/docs/examples/auto-instrumentation/client.py\n+++ b/docs/examples/auto-instrumentation/client.py\n@@ -14,7 +14,6 @@\n \n from sys import argv\n \n-from flask import Flask\n from requests import get\n \n from opentelemetry import propagators, trace\n@@ -24,8 +23,6 @@\n SimpleExportSpanProcessor,\n )\n \n-app = Flask(__name__)\n-\n trace.set_tracer_provider(TracerProvider())\n tracer = trace.get_tracer_provider().get_tracer(__name__)\n", "issue": "Remove unnecessary Flask app\nThe current auto instrumentation example includes an unnecessary Flask app in its client, remove that.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom sys import argv\n\nfrom flask import Flask\nfrom requests import get\n\nfrom opentelemetry import propagators, trace\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleExportSpanProcessor,\n)\n\napp = Flask(__name__)\n\ntrace.set_tracer_provider(TracerProvider())\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\ntrace.get_tracer_provider().add_span_processor(\n SimpleExportSpanProcessor(ConsoleSpanExporter())\n)\n\n\nassert len(argv) == 2\n\nwith tracer.start_as_current_span(\"client\"):\n\n with tracer.start_as_current_span(\"client-server\"):\n headers = {}\n propagators.inject(dict.__setitem__, headers)\n requested = get(\n \"http://localhost:8082/server_request\",\n params={\"param\": argv[1]},\n headers=headers,\n )\n\n assert requested.status_code == 200\n", "path": "docs/examples/auto-instrumentation/client.py"}]} | 990 | 134 |
gh_patches_debug_21070 | rasdani/github-patches | git_diff | Mailu__Mailu-2808 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SCAN_MACROS is incorrect case in Rspamd configs
In Mailu/core/rspamd/conf/external_services.conf, the first line is '{% if SCAN_MACROS == 'True' %}'. It is also the same in external_services_group.conf, note the capital 'T' in 'True'. When the mailu.env is generated, it generates SCAN_MACROS=true, i.e . all lowercase. Thus, in order to enable oletools, one must put SCAN_MACROS=True in environment file. I'm not sure what other items are reliant on SCAN_MACROS, but they should be checked as well.
</issue>
<code>
[start of core/rspamd/start.py]
1 #!/usr/bin/env python3
2
3 import os
4 import glob
5 import logging as log
6 import requests
7 import shutil
8 import sys
9 import time
10 from socrate import system,conf
11
12 system.set_env()
13
14 # Actual startup script
15
16 config_files = []
17 for rspamd_file in glob.glob("/conf/*"):
18 conf.jinja(rspamd_file, os.environ, os.path.join("/etc/rspamd/local.d", os.path.basename(rspamd_file)))
19 config_files.append(os.path.basename(rspamd_file))
20
21 for override_file in glob.glob("/overrides/*"):
22 if os.path.basename(override_file) not in config_files:
23 shutil.copyfile(override_file, os.path.join("/etc/rspamd/local.d", os.path.basename(override_file)))
24
25 # Admin may not be up just yet
26 healthcheck = f'http://{os.environ["ADMIN_ADDRESS"]}/internal/rspamd/local_domains'
27 while True:
28 time.sleep(1)
29 try:
30 if requests.get(healthcheck,timeout=2).ok:
31 break
32 except:
33 pass
34 log.warning("Admin is not up just yet, retrying in 1 second")
35
36 # Run rspamd
37 os.system("mkdir -m 755 -p /run/rspamd")
38 os.system("chown rspamd:rspamd /run/rspamd")
39 os.system("find /var/lib/rspamd | grep -v /filter | xargs -n1 chown rspamd:rspamd")
40 os.execv("/usr/sbin/rspamd", ["rspamd", "-f", "-u", "rspamd", "-g", "rspamd"])
41
[end of core/rspamd/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/rspamd/start.py b/core/rspamd/start.py
--- a/core/rspamd/start.py
+++ b/core/rspamd/start.py
@@ -9,13 +9,13 @@
import time
from socrate import system,conf
-system.set_env()
+env = system.set_env()
# Actual startup script
config_files = []
for rspamd_file in glob.glob("/conf/*"):
- conf.jinja(rspamd_file, os.environ, os.path.join("/etc/rspamd/local.d", os.path.basename(rspamd_file)))
+ conf.jinja(rspamd_file, env, os.path.join("/etc/rspamd/local.d", os.path.basename(rspamd_file)))
config_files.append(os.path.basename(rspamd_file))
for override_file in glob.glob("/overrides/*"):
@@ -23,7 +23,7 @@
shutil.copyfile(override_file, os.path.join("/etc/rspamd/local.d", os.path.basename(override_file)))
# Admin may not be up just yet
-healthcheck = f'http://{os.environ["ADMIN_ADDRESS"]}/internal/rspamd/local_domains'
+healthcheck = f'http://{env["ADMIN_ADDRESS"]}/internal/rspamd/local_domains'
while True:
time.sleep(1)
try:
| {"golden_diff": "diff --git a/core/rspamd/start.py b/core/rspamd/start.py\n--- a/core/rspamd/start.py\n+++ b/core/rspamd/start.py\n@@ -9,13 +9,13 @@\n import time\n from socrate import system,conf\n \n-system.set_env()\n+env = system.set_env()\n \n # Actual startup script\n \n config_files = []\n for rspamd_file in glob.glob(\"/conf/*\"):\n- conf.jinja(rspamd_file, os.environ, os.path.join(\"/etc/rspamd/local.d\", os.path.basename(rspamd_file)))\n+ conf.jinja(rspamd_file, env, os.path.join(\"/etc/rspamd/local.d\", os.path.basename(rspamd_file)))\n config_files.append(os.path.basename(rspamd_file))\n \n for override_file in glob.glob(\"/overrides/*\"):\n@@ -23,7 +23,7 @@\n shutil.copyfile(override_file, os.path.join(\"/etc/rspamd/local.d\", os.path.basename(override_file)))\n \n # Admin may not be up just yet\n-healthcheck = f'http://{os.environ[\"ADMIN_ADDRESS\"]}/internal/rspamd/local_domains'\n+healthcheck = f'http://{env[\"ADMIN_ADDRESS\"]}/internal/rspamd/local_domains'\n while True:\n time.sleep(1)\n try:\n", "issue": "SCAN_MACROS is incorrect case in Rspamd configs\nIn Mailu/core/rspamd/conf/external_services.conf, the first line is '{% if SCAN_MACROS == 'True' %}'. It is also the same in external_services_group.conf, note the capital 'T' in 'True'. When the mailu.env is generated, it generates SCAN_MACROS=true, i.e . all lowercase. Thus, in order to enable oletools, one must put SCAN_MACROS=True in environment file. I'm not sure what other items are reliant on SCAN_MACROS, but they should be checked as well. \n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport os\nimport glob\nimport logging as log\nimport requests\nimport shutil\nimport sys\nimport time\nfrom socrate import system,conf\n\nsystem.set_env()\n\n# Actual startup script\n\nconfig_files = []\nfor rspamd_file in glob.glob(\"/conf/*\"):\n conf.jinja(rspamd_file, os.environ, os.path.join(\"/etc/rspamd/local.d\", os.path.basename(rspamd_file)))\n config_files.append(os.path.basename(rspamd_file))\n\nfor override_file in glob.glob(\"/overrides/*\"):\n if os.path.basename(override_file) not in config_files:\n shutil.copyfile(override_file, os.path.join(\"/etc/rspamd/local.d\", os.path.basename(override_file)))\n\n# Admin may not be up just yet\nhealthcheck = f'http://{os.environ[\"ADMIN_ADDRESS\"]}/internal/rspamd/local_domains'\nwhile True:\n time.sleep(1)\n try:\n if requests.get(healthcheck,timeout=2).ok:\n break\n except:\n pass\n log.warning(\"Admin is not up just yet, retrying in 1 second\")\n\n# Run rspamd\nos.system(\"mkdir -m 755 -p /run/rspamd\")\nos.system(\"chown rspamd:rspamd /run/rspamd\")\nos.system(\"find /var/lib/rspamd | grep -v /filter | xargs -n1 chown rspamd:rspamd\")\nos.execv(\"/usr/sbin/rspamd\", [\"rspamd\", \"-f\", \"-u\", \"rspamd\", \"-g\", \"rspamd\"])\n", "path": "core/rspamd/start.py"}]} | 1,083 | 279 |
gh_patches_debug_9817 | rasdani/github-patches | git_diff | bridgecrewio__checkov-211 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add new check: IAM Policies should not be attached to a user
Attach policies only to groups or roles
https://www.terraform.io/docs/providers/aws/r/iam_user_policy.html
https://www.terraform.io/docs/providers/aws/r/iam_user_policy_attachment.html
users: https://www.terraform.io/docs/providers/aws/r/iam_policy_attachment.html
Policies / ManagedPolicyArns: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html
Users: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-policy.html
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/IAMPolicyAttachedToGroupOrRoles.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class IAMPolicyAttachedToGroupOrRoles(BaseResourceCheck):
6 def __init__(self):
7 name = "Ensure IAM policies are attached only to groups or roles (Reducing access management complexity may " \
8 "in-turn reduce opportunity for a principal to inadvertently receive or retain excessive privileges.) "
9 id = "CKV_AWS_40"
10 supported_resources = ['aws_iam_user_policy_attachment', 'aws_iam_user_policy', 'aws_iam_policy_attachment']
11 categories = [CheckCategories.IAM]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def scan_resource_conf(self, conf):
15 if 'user' in conf.keys() or 'users' in conf.keys():
16 return CheckResult.FAILED
17 return CheckResult.PASSED
18
19 check = IAMPolicyAttachedToGroupOrRoles()
20
[end of checkov/terraform/checks/resource/aws/IAMPolicyAttachedToGroupOrRoles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/IAMPolicyAttachedToGroupOrRoles.py b/checkov/terraform/checks/resource/aws/IAMPolicyAttachedToGroupOrRoles.py
--- a/checkov/terraform/checks/resource/aws/IAMPolicyAttachedToGroupOrRoles.py
+++ b/checkov/terraform/checks/resource/aws/IAMPolicyAttachedToGroupOrRoles.py
@@ -12,8 +12,9 @@
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
- if 'user' in conf.keys() or 'users' in conf.keys():
+ if 'user' in conf.keys() or ('users' in conf.keys() and len(conf['users'][0]) > 0):
return CheckResult.FAILED
return CheckResult.PASSED
+
check = IAMPolicyAttachedToGroupOrRoles()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/IAMPolicyAttachedToGroupOrRoles.py b/checkov/terraform/checks/resource/aws/IAMPolicyAttachedToGroupOrRoles.py\n--- a/checkov/terraform/checks/resource/aws/IAMPolicyAttachedToGroupOrRoles.py\n+++ b/checkov/terraform/checks/resource/aws/IAMPolicyAttachedToGroupOrRoles.py\n@@ -12,8 +12,9 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def scan_resource_conf(self, conf):\n- if 'user' in conf.keys() or 'users' in conf.keys():\n+ if 'user' in conf.keys() or ('users' in conf.keys() and len(conf['users'][0]) > 0):\n return CheckResult.FAILED\n return CheckResult.PASSED\n \n+\n check = IAMPolicyAttachedToGroupOrRoles()\n", "issue": "Add new check: IAM Policies should not be attached to a user\nAttach policies only to groups or roles \r\n\r\nhttps://www.terraform.io/docs/providers/aws/r/iam_user_policy.html\r\nhttps://www.terraform.io/docs/providers/aws/r/iam_user_policy_attachment.html\r\nusers: https://www.terraform.io/docs/providers/aws/r/iam_policy_attachment.html\r\n\r\nPolicies / ManagedPolicyArns: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html\r\nUsers: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-policy.html\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass IAMPolicyAttachedToGroupOrRoles(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure IAM policies are attached only to groups or roles (Reducing access management complexity may \" \\\n \"in-turn reduce opportunity for a principal to inadvertently receive or retain excessive privileges.) \"\n id = \"CKV_AWS_40\"\n supported_resources = ['aws_iam_user_policy_attachment', 'aws_iam_user_policy', 'aws_iam_policy_attachment']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'user' in conf.keys() or 'users' in conf.keys():\n return CheckResult.FAILED\n return CheckResult.PASSED\n\ncheck = IAMPolicyAttachedToGroupOrRoles()\n", "path": "checkov/terraform/checks/resource/aws/IAMPolicyAttachedToGroupOrRoles.py"}]} | 929 | 199 |
gh_patches_debug_24218 | rasdani/github-patches | git_diff | Cloud-CV__EvalAI-1042 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change path of storage of Evaluation Scripts
## Current behaviour
Currently, the evaluation scripts are stored in the `submission_files` directory which should not happen ideally.
## Solution
Modify the `RandomFile()` method such that the evaluation scripts should be uploaded in the `evaluation_scripts` directory instead of the `submission_files` directory.
</issue>
<code>
[start of apps/base/utils.py]
1 import os
2 import uuid
3
4 from django.conf import settings
5 from django.utils.deconstruct import deconstructible
6
7 from rest_framework.exceptions import NotFound
8 from rest_framework.pagination import PageNumberPagination
9
10
11 class StandardResultSetPagination(PageNumberPagination):
12 page_size = 100
13 page_size_query_param = 'page_size'
14 max_page_size = 1000
15
16
17 def paginated_queryset(queryset, request, pagination_class=PageNumberPagination()):
18 '''
19 Return a paginated result for a queryset
20 '''
21 paginator = pagination_class
22 paginator.page_size = settings.REST_FRAMEWORK['PAGE_SIZE']
23 result_page = paginator.paginate_queryset(queryset, request)
24 return (paginator, result_page)
25
26
27 @deconstructible
28 class RandomFileName(object):
29 def __init__(self, path):
30 self.path = path
31
32 def __call__(self, instance, filename):
33 extension = os.path.splitext(filename)[1]
34 if 'id' in self.path and instance.pk:
35 self.path = self.path.format(id=instance.pk)
36 elif 'id' not in self.path and instance.pk:
37 path = "submission_files/submission_{id}"
38 self.path = path.format(id=instance.pk)
39 filename = '{}{}'.format(uuid.uuid4(), extension)
40 filename = os.path.join(self.path, filename)
41 return filename
42
43
44 def get_model_object(model_name):
45 def get_model_by_pk(pk):
46 try:
47 model_object = model_name.objects.get(pk=pk)
48 return model_object
49 except model_name.DoesNotExist:
50 raise NotFound('{} {} does not exist'.format(model_name.__name__, pk))
51 get_model_by_pk.__name__ = 'get_{}_object'.format(model_name.__name__.lower())
52 return get_model_by_pk
53
[end of apps/base/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/base/utils.py b/apps/base/utils.py
--- a/apps/base/utils.py
+++ b/apps/base/utils.py
@@ -2,7 +2,6 @@
import uuid
from django.conf import settings
-from django.utils.deconstruct import deconstructible
from rest_framework.exceptions import NotFound
from rest_framework.pagination import PageNumberPagination
@@ -24,20 +23,17 @@
return (paginator, result_page)
-@deconstructible
class RandomFileName(object):
def __init__(self, path):
self.path = path
def __call__(self, instance, filename):
extension = os.path.splitext(filename)[1]
+ path = self.path
if 'id' in self.path and instance.pk:
- self.path = self.path.format(id=instance.pk)
- elif 'id' not in self.path and instance.pk:
- path = "submission_files/submission_{id}"
- self.path = path.format(id=instance.pk)
+ path = self.path.format(id=instance.pk)
filename = '{}{}'.format(uuid.uuid4(), extension)
- filename = os.path.join(self.path, filename)
+ filename = os.path.join(path, filename)
return filename
| {"golden_diff": "diff --git a/apps/base/utils.py b/apps/base/utils.py\n--- a/apps/base/utils.py\n+++ b/apps/base/utils.py\n@@ -2,7 +2,6 @@\n import uuid\n \n from django.conf import settings\n-from django.utils.deconstruct import deconstructible\n \n from rest_framework.exceptions import NotFound\n from rest_framework.pagination import PageNumberPagination\n@@ -24,20 +23,17 @@\n return (paginator, result_page)\n \n \n-@deconstructible\n class RandomFileName(object):\n def __init__(self, path):\n self.path = path\n \n def __call__(self, instance, filename):\n extension = os.path.splitext(filename)[1]\n+ path = self.path\n if 'id' in self.path and instance.pk:\n- self.path = self.path.format(id=instance.pk)\n- elif 'id' not in self.path and instance.pk:\n- path = \"submission_files/submission_{id}\"\n- self.path = path.format(id=instance.pk)\n+ path = self.path.format(id=instance.pk)\n filename = '{}{}'.format(uuid.uuid4(), extension)\n- filename = os.path.join(self.path, filename)\n+ filename = os.path.join(path, filename)\n return filename\n", "issue": "Change path of storage of Evaluation Scripts\n## Current behaviour\r\n\r\nCurrently, the evaluation scripts are stored in the `submission_files` directory which should not happen ideally.\r\n\r\n## Solution\r\nModify the `RandomFile()` method such that the evaluation scripts should be uploaded in the `evaluation_scripts` directory instead of the `submission_files` directory. \r\n\n", "before_files": [{"content": "import os\nimport uuid\n\nfrom django.conf import settings\nfrom django.utils.deconstruct import deconstructible\n\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.pagination import PageNumberPagination\n\n\nclass StandardResultSetPagination(PageNumberPagination):\n page_size = 100\n page_size_query_param = 'page_size'\n max_page_size = 1000\n\n\ndef paginated_queryset(queryset, request, pagination_class=PageNumberPagination()):\n '''\n Return a paginated result for a queryset\n '''\n paginator = pagination_class\n paginator.page_size = settings.REST_FRAMEWORK['PAGE_SIZE']\n result_page = paginator.paginate_queryset(queryset, request)\n return (paginator, result_page)\n\n\n@deconstructible\nclass RandomFileName(object):\n def __init__(self, path):\n self.path = path\n\n def __call__(self, instance, filename):\n extension = os.path.splitext(filename)[1]\n if 'id' in self.path and instance.pk:\n self.path = self.path.format(id=instance.pk)\n elif 'id' not in self.path and instance.pk:\n path = \"submission_files/submission_{id}\"\n self.path = path.format(id=instance.pk)\n filename = '{}{}'.format(uuid.uuid4(), extension)\n filename = os.path.join(self.path, filename)\n return filename\n\n\ndef get_model_object(model_name):\n def get_model_by_pk(pk):\n try:\n model_object = model_name.objects.get(pk=pk)\n return model_object\n except model_name.DoesNotExist:\n raise NotFound('{} {} does not exist'.format(model_name.__name__, pk))\n get_model_by_pk.__name__ = 'get_{}_object'.format(model_name.__name__.lower())\n return get_model_by_pk\n", "path": "apps/base/utils.py"}]} | 1,077 | 265 |
gh_patches_debug_2828 | rasdani/github-patches | git_diff | svthalia__concrexit-3188 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Non-closable site announcements can secretly be closed if you send the right cookie
### Describe the bug
Non-closable site announcements can secretly be closed if you send the right cookie
### How to reproduce
Steps to reproduce the behaviour:
1. Have an announcement that is closable
2. Close it on your machine
3. Change the announcement to be non-closable
4. It still isn't there because you already closed it.
The `closable` field just controls whether the close button appears or not, but it doesn't influence the actual logic
### Expected behaviour
Always show non-closable announcements
### Screenshots
### Additional context
</issue>
<code>
[start of website/announcements/context_processors.py]
1 """These context processors can be used to expand the context provided to admin views."""
2 from .models import Announcement
3
4
5 def announcements(request):
6 """Get a list of announcements.
7
8 Filters out announcements that have been closed already.
9
10 :param request: the request object
11 :return: a dict containing the list announcements
12 :rtype: dict
13 """
14 closed_announcements = request.session.get("closed_announcements", [])
15 announcements_list = [
16 a
17 for a in Announcement.objects.all()
18 if a.is_visible and a.pk not in closed_announcements
19 ]
20
21 # Announcements set by AnnouncementMiddleware.
22 persistent_announcements = getattr(request, "_announcements", [])
23 return {
24 "announcements": announcements_list,
25 "persistent_announcements": persistent_announcements,
26 }
27
[end of website/announcements/context_processors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/announcements/context_processors.py b/website/announcements/context_processors.py
--- a/website/announcements/context_processors.py
+++ b/website/announcements/context_processors.py
@@ -15,7 +15,7 @@
announcements_list = [
a
for a in Announcement.objects.all()
- if a.is_visible and a.pk not in closed_announcements
+ if a.is_visible and (not a.closeable or a.pk not in closed_announcements)
]
# Announcements set by AnnouncementMiddleware.
| {"golden_diff": "diff --git a/website/announcements/context_processors.py b/website/announcements/context_processors.py\n--- a/website/announcements/context_processors.py\n+++ b/website/announcements/context_processors.py\n@@ -15,7 +15,7 @@\n announcements_list = [\n a\n for a in Announcement.objects.all()\n- if a.is_visible and a.pk not in closed_announcements\n+ if a.is_visible and (not a.closeable or a.pk not in closed_announcements)\n ]\n \n # Announcements set by AnnouncementMiddleware.\n", "issue": "Non-closable site announcements can secretly be closed if you send the right cookie\n### Describe the bug\r\nNon-closable site announcements can secretly be closed if you send the right cookie\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Have an announcement that is closable\r\n2. Close it on your machine\r\n3. Change the announcement to be non-closable\r\n4. It still isn't there because you already closed it.\r\n\r\nThe `closable` field just controls whether the close button appears or not, but it doesn't influence the actual logic\r\n\r\n### Expected behaviour\r\nAlways show non-closable announcements\r\n\r\n### Screenshots\r\n\r\n### Additional context\r\n\n", "before_files": [{"content": "\"\"\"These context processors can be used to expand the context provided to admin views.\"\"\"\nfrom .models import Announcement\n\n\ndef announcements(request):\n \"\"\"Get a list of announcements.\n\n Filters out announcements that have been closed already.\n\n :param request: the request object\n :return: a dict containing the list announcements\n :rtype: dict\n \"\"\"\n closed_announcements = request.session.get(\"closed_announcements\", [])\n announcements_list = [\n a\n for a in Announcement.objects.all()\n if a.is_visible and a.pk not in closed_announcements\n ]\n\n # Announcements set by AnnouncementMiddleware.\n persistent_announcements = getattr(request, \"_announcements\", [])\n return {\n \"announcements\": announcements_list,\n \"persistent_announcements\": persistent_announcements,\n }\n", "path": "website/announcements/context_processors.py"}]} | 893 | 121 |
gh_patches_debug_25342 | rasdani/github-patches | git_diff | getredash__redash-4239 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change PagerDuty's default summary text
Currently PagerDuty's Alert destination default summary text uses the query id and name. We should change it to use the alert name as it's usually better explains what the alert is.
While #4153 implements ability to customize the summary text, it's good to have a saner default regardless.
(If #4153 is not merged before implementing, should be implemented based on its branch)
</issue>
<code>
[start of redash/destinations/pagerduty.py]
1 import logging
2 from redash.destinations import *
3
4 enabled = True
5
6 try:
7 import pypd
8 except ImportError:
9 enabled = False
10
11
12 class PagerDuty(BaseDestination):
13
14 KEY_STRING = '{alert_id}_{query_id}'
15 DESCRIPTION_STR = u'Alert - Redash Query #{query_id}: {query_name}'
16
17 @classmethod
18 def enabled(cls):
19 return enabled
20
21 @classmethod
22 def configuration_schema(cls):
23 return {
24 'type': 'object',
25 'properties': {
26 'integration_key': {
27 'type': 'string',
28 'title': 'PagerDuty Service Integration Key'
29 },
30 'description': {
31 'type': 'string',
32 'title': 'Description for the event, defaults to query',
33 }
34 },
35 "required": ["integration_key"]
36 }
37
38 @classmethod
39 def icon(cls):
40 return 'creative-commons-pd-alt'
41
42 def notify(self, alert, query, user, new_state, app, host, options):
43
44 if alert.custom_subject:
45 default_desc = alert.custom_subject
46 elif options.get('description'):
47 default_desc = options.get('description')
48 else:
49 default_desc = self.DESCRIPTION_STR.format(query_id=query.id, query_name=query.name)
50
51 incident_key = self.KEY_STRING.format(alert_id=alert.id, query_id=query.id)
52 data = {
53 'routing_key': options.get('integration_key'),
54 'incident_key': incident_key,
55 'dedup_key': incident_key,
56 'payload': {
57 'summary': default_desc,
58 'severity': 'error',
59 'source': 'redash',
60 }
61 }
62
63 if alert.custom_body:
64 data['payload']['custom_details'] = alert.custom_body
65
66 if new_state == 'triggered':
67 data['event_action'] = 'trigger'
68 elif new_state == "unknown":
69 logging.info('Unknown state, doing nothing')
70 return
71 else:
72 data['event_action'] = 'resolve'
73
74 try:
75
76 ev = pypd.EventV2.create(data=data)
77 logging.warning(ev)
78
79 except Exception:
80 logging.exception("PagerDuty trigger failed!")
81
82
83 register(PagerDuty)
84
[end of redash/destinations/pagerduty.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/destinations/pagerduty.py b/redash/destinations/pagerduty.py
--- a/redash/destinations/pagerduty.py
+++ b/redash/destinations/pagerduty.py
@@ -12,7 +12,7 @@
class PagerDuty(BaseDestination):
KEY_STRING = '{alert_id}_{query_id}'
- DESCRIPTION_STR = u'Alert - Redash Query #{query_id}: {query_name}'
+ DESCRIPTION_STR = u'Alert: {alert_name}'
@classmethod
def enabled(cls):
@@ -29,7 +29,7 @@
},
'description': {
'type': 'string',
- 'title': 'Description for the event, defaults to query',
+ 'title': 'Description for the event, defaults to alert name',
}
},
"required": ["integration_key"]
@@ -46,7 +46,7 @@
elif options.get('description'):
default_desc = options.get('description')
else:
- default_desc = self.DESCRIPTION_STR.format(query_id=query.id, query_name=query.name)
+ default_desc = self.DESCRIPTION_STR.format(alert_name=alert.name)
incident_key = self.KEY_STRING.format(alert_id=alert.id, query_id=query.id)
data = {
| {"golden_diff": "diff --git a/redash/destinations/pagerduty.py b/redash/destinations/pagerduty.py\n--- a/redash/destinations/pagerduty.py\n+++ b/redash/destinations/pagerduty.py\n@@ -12,7 +12,7 @@\n class PagerDuty(BaseDestination):\n \n KEY_STRING = '{alert_id}_{query_id}'\n- DESCRIPTION_STR = u'Alert - Redash Query #{query_id}: {query_name}'\n+ DESCRIPTION_STR = u'Alert: {alert_name}'\n \n @classmethod\n def enabled(cls):\n@@ -29,7 +29,7 @@\n },\n 'description': {\n 'type': 'string',\n- 'title': 'Description for the event, defaults to query',\n+ 'title': 'Description for the event, defaults to alert name',\n }\n },\n \"required\": [\"integration_key\"]\n@@ -46,7 +46,7 @@\n elif options.get('description'):\n default_desc = options.get('description')\n else:\n- default_desc = self.DESCRIPTION_STR.format(query_id=query.id, query_name=query.name)\n+ default_desc = self.DESCRIPTION_STR.format(alert_name=alert.name)\n \n incident_key = self.KEY_STRING.format(alert_id=alert.id, query_id=query.id)\n data = {\n", "issue": "Change PagerDuty's default summary text\nCurrently PagerDuty's Alert destination default summary text uses the query id and name. We should change it to use the alert name as it's usually better explains what the alert is.\r\n\r\nWhile #4153 implements ability to customize the summary text, it's good to have a saner default regardless.\r\n\r\n(If #4153 is not merged before implementing, should be implemented based on its branch)\n", "before_files": [{"content": "import logging\nfrom redash.destinations import *\n\nenabled = True\n\ntry:\n import pypd\nexcept ImportError:\n enabled = False\n\n\nclass PagerDuty(BaseDestination):\n\n KEY_STRING = '{alert_id}_{query_id}'\n DESCRIPTION_STR = u'Alert - Redash Query #{query_id}: {query_name}'\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def configuration_schema(cls):\n return {\n 'type': 'object',\n 'properties': {\n 'integration_key': {\n 'type': 'string',\n 'title': 'PagerDuty Service Integration Key'\n },\n 'description': {\n 'type': 'string',\n 'title': 'Description for the event, defaults to query',\n }\n },\n \"required\": [\"integration_key\"]\n }\n\n @classmethod\n def icon(cls):\n return 'creative-commons-pd-alt'\n\n def notify(self, alert, query, user, new_state, app, host, options):\n\n if alert.custom_subject:\n default_desc = alert.custom_subject\n elif options.get('description'):\n default_desc = options.get('description')\n else:\n default_desc = self.DESCRIPTION_STR.format(query_id=query.id, query_name=query.name)\n\n incident_key = self.KEY_STRING.format(alert_id=alert.id, query_id=query.id)\n data = {\n 'routing_key': options.get('integration_key'),\n 'incident_key': incident_key,\n 'dedup_key': incident_key,\n 'payload': {\n 'summary': default_desc,\n 'severity': 'error',\n 'source': 'redash',\n }\n }\n\n if alert.custom_body:\n data['payload']['custom_details'] = alert.custom_body\n\n if new_state == 'triggered':\n data['event_action'] = 'trigger'\n elif new_state == \"unknown\":\n logging.info('Unknown state, doing nothing')\n return\n else:\n data['event_action'] = 'resolve'\n\n try:\n\n ev = pypd.EventV2.create(data=data)\n logging.warning(ev)\n\n except Exception:\n logging.exception(\"PagerDuty trigger failed!\")\n\n\nregister(PagerDuty)\n", "path": "redash/destinations/pagerduty.py"}]} | 1,273 | 287 |
gh_patches_debug_18680 | rasdani/github-patches | git_diff | wagtail__wagtail-118 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'AnonymousUser' object has no attribute 'has_usable_password'
Visiting http://localhost:8000/admin/account/ as not logged in user gives the AttributeError above...
Traceback;
``` Environment:
Request Method: GET
Request URL: http://localhost:8000/admin/account/
Django Version: 1.6.2
Python Version: 2.7.6
Installed Applications:
('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'compressor',
'taggit',
'modelcluster',
'django.contrib.admin',
'wagtail.wagtailcore',
'wagtail.wagtailadmin',
'wagtail.wagtaildocs',
'wagtail.wagtailsnippets',
'wagtail.wagtailusers',
'wagtail.wagtailimages',
'wagtail.wagtailembeds',
'wagtail.wagtailsearch',
'wagtail.wagtailredirects')
Installed Middleware:
('django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware')
Traceback:
File "C:\Users\drager\developing\django-env\lib\site-packages\django\core\handlers\base.py" in get_response
114. response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "c:\users\drager\developing\django-env\src\wagtail\wagtail\wagtailadmin\views\account.py" in account
9. 'show_change_password': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True) and request.user.has_usable_password(),
File "C:\Users\drager\developing\django-env\lib\site-packages\django\utils\functional.py" in inner
214. return func(self._wrapped, *args)
Exception Type: AttributeError at /admin/account/
Exception Value: 'AnonymousUser' object has no attribute 'has_usable_password'
```
</issue>
<code>
[start of wagtail/wagtailadmin/views/account.py]
1 from django.conf import settings
2 from django.shortcuts import render, redirect
3 from django.contrib import messages
4 from django.contrib.auth.forms import SetPasswordForm
5 from django.utils.translation import ugettext as _
6
7 def account(request):
8 return render(request, 'wagtailadmin/account/account.html', {
9 'show_change_password': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True) and request.user.has_usable_password(),
10 })
11
12
13 def change_password(request):
14 can_change_password = request.user.has_usable_password()
15
16 if can_change_password:
17 if request.POST:
18 form = SetPasswordForm(request.user, request.POST)
19
20 if form.is_valid():
21 form.save()
22
23 messages.success(request, _("Your password has been changed successfully!"))
24 return redirect('wagtailadmin_account')
25 else:
26 form = SetPasswordForm(request.user)
27 else:
28 form = None
29
30 return render(request, 'wagtailadmin/account/change_password.html', {
31 'form': form,
32 'can_change_password': can_change_password,
33 })
34
[end of wagtail/wagtailadmin/views/account.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/wagtailadmin/views/account.py b/wagtail/wagtailadmin/views/account.py
--- a/wagtail/wagtailadmin/views/account.py
+++ b/wagtail/wagtailadmin/views/account.py
@@ -2,14 +2,17 @@
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.forms import SetPasswordForm
+from django.contrib.auth.decorators import permission_required
from django.utils.translation import ugettext as _
+@permission_required('wagtailadmin.access_admin')
def account(request):
return render(request, 'wagtailadmin/account/account.html', {
'show_change_password': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True) and request.user.has_usable_password(),
})
+@permission_required('wagtailadmin.access_admin')
def change_password(request):
can_change_password = request.user.has_usable_password()
| {"golden_diff": "diff --git a/wagtail/wagtailadmin/views/account.py b/wagtail/wagtailadmin/views/account.py\n--- a/wagtail/wagtailadmin/views/account.py\n+++ b/wagtail/wagtailadmin/views/account.py\n@@ -2,14 +2,17 @@\n from django.shortcuts import render, redirect\n from django.contrib import messages\n from django.contrib.auth.forms import SetPasswordForm\n+from django.contrib.auth.decorators import permission_required\n from django.utils.translation import ugettext as _ \n \n+@permission_required('wagtailadmin.access_admin')\n def account(request):\n return render(request, 'wagtailadmin/account/account.html', {\n 'show_change_password': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True) and request.user.has_usable_password(),\n })\n \n \n+@permission_required('wagtailadmin.access_admin')\n def change_password(request):\n can_change_password = request.user.has_usable_password()\n", "issue": "'AnonymousUser' object has no attribute 'has_usable_password'\nVisiting http://localhost:8000/admin/account/ as not logged in user gives the AttributeError above...\n\nTraceback;\n\n``` Environment:\n\n\nRequest Method: GET\nRequest URL: http://localhost:8000/admin/account/\n\nDjango Version: 1.6.2\nPython Version: 2.7.6\nInstalled Applications:\n('django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'south',\n 'compressor',\n 'taggit',\n 'modelcluster',\n 'django.contrib.admin',\n 'wagtail.wagtailcore',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailredirects')\nInstalled Middleware:\n('django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware')\n\n\nTraceback:\nFile \"C:\\Users\\drager\\developing\\django-env\\lib\\site-packages\\django\\core\\handlers\\base.py\" in get_response\n 114. response = wrapped_callback(request, *callback_args, **callback_kwargs)\nFile \"c:\\users\\drager\\developing\\django-env\\src\\wagtail\\wagtail\\wagtailadmin\\views\\account.py\" in account\n 9. 'show_change_password': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True) and request.user.has_usable_password(),\nFile \"C:\\Users\\drager\\developing\\django-env\\lib\\site-packages\\django\\utils\\functional.py\" in inner\n 214. return func(self._wrapped, *args)\n\nException Type: AttributeError at /admin/account/\nException Value: 'AnonymousUser' object has no attribute 'has_usable_password'\n```\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.forms import SetPasswordForm\nfrom django.utils.translation import ugettext as _ \n\ndef account(request):\n return render(request, 'wagtailadmin/account/account.html', {\n 'show_change_password': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True) and request.user.has_usable_password(),\n })\n\n\ndef change_password(request):\n can_change_password = request.user.has_usable_password()\n\n if can_change_password:\n if request.POST:\n form = SetPasswordForm(request.user, request.POST)\n\n if form.is_valid():\n form.save()\n\n messages.success(request, _(\"Your password has been changed successfully!\"))\n return redirect('wagtailadmin_account')\n else:\n form = SetPasswordForm(request.user)\n else:\n form = None\n\n return render(request, 'wagtailadmin/account/change_password.html', {\n 'form': form,\n 'can_change_password': can_change_password,\n })\n", "path": "wagtail/wagtailadmin/views/account.py"}]} | 1,324 | 200 |
gh_patches_debug_29670 | rasdani/github-patches | git_diff | streamlink__streamlink-3457 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
https://www.micous.com/ Site closing webcasts
<!--
Thanks for reporting a plugin issue!
USE THE TEMPLATE. Otherwise your plugin issue may be rejected.
First, see the contribution guidelines:
https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink
Also check the list of open and closed plugin issues:
https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22
Please see the text preview to avoid unnecessary formatting errors.
-->
## Plugin Issue
<!-- Replace the space character between the square brackets with an x in order to check the boxes -->
- [ ] This is a plugin issue and I have read the contribution guidelines.
- [ ] I am using the latest development version from the master branch.
### Description
I was checking the m3u8 codes for Tango Live so that it would benefit people and maybe we can bring plugins but now I have checked and the mico.us plugin does not work because mico.us is no longer working. It looks like it has turned off broadcasting and playback, I guess only broadcasts can be followed through the application and need to be checked.
<!-- Explain the plugin issue as thoroughly as you can. -->
### Reproduction steps / Explicit stream URLs to test
this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->
1. ...https://www.micous.com/
2. ...
3. ...
### Log output
<!--
TEXT LOG OUTPUT IS REQUIRED for a plugin issue!
Use the `--loglevel debug` parameter and avoid using parameters which suppress log output.
https://streamlink.github.io/cli.html#cmdoption-l
Make sure to **remove usernames and passwords**
You can copy the output to https://gist.github.com/ or paste it below.
Don't post screenshots of the log output and instead copy the text from your terminal application.
-->
```
REPLACE THIS TEXT WITH THE LOG OUTPUT
```
### Additional comments, etc.
[Love Streamlink? Please consider supporting our collective. Thanks!](https://opencollective.com/streamlink/donate)
</issue>
<code>
[start of src/streamlink/plugins/mico.py]
1 import logging
2 import re
3
4 from streamlink.plugin import Plugin
5 from streamlink.plugin.api import validate
6 from streamlink.stream import HLSStream
7 from streamlink.utils import parse_json
8 from streamlink.utils.url import update_scheme
9
10 log = logging.getLogger(__name__)
11
12
13 class Mico(Plugin):
14 author = None
15 category = None
16 title = None
17
18 url_re = re.compile(r'https?://(?:www\.)?micous\.com/live/\d+')
19 json_data_re = re.compile(r'win._profile\s*=\s*({.*})')
20
21 _json_data_schema = validate.Schema(
22 validate.transform(json_data_re.search),
23 validate.any(None, validate.all(
24 validate.get(1),
25 validate.transform(parse_json),
26 validate.any(None, validate.all({
27 'mico_id': int,
28 'nickname': validate.text,
29 'h5_url': validate.all(
30 validate.transform(lambda x: update_scheme('http:', x)),
31 validate.url(),
32 ),
33 'is_live': bool,
34 })),
35 )),
36 )
37
38 @classmethod
39 def can_handle_url(cls, url):
40 return cls.url_re.match(url) is not None
41
42 def get_author(self):
43 if self.author is not None:
44 return self.author
45
46 def get_category(self):
47 if self.category is not None:
48 return self.category
49
50 def get_title(self):
51 if self.title is not None:
52 return self.title
53
54 def _get_streams(self):
55 json_data = self.session.http.get(self.url, schema=self._json_data_schema)
56
57 if not json_data:
58 log.error('Failed to get JSON data')
59 return
60
61 if not json_data['is_live']:
62 log.info('This stream is no longer online')
63 return
64
65 self.author = json_data['mico_id']
66 self.category = 'Live'
67 self.title = json_data['nickname']
68
69 return HLSStream.parse_variant_playlist(self.session, json_data['h5_url'])
70
71
72 __plugin__ = Mico
73
[end of src/streamlink/plugins/mico.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/mico.py b/src/streamlink/plugins/mico.py
deleted file mode 100644
--- a/src/streamlink/plugins/mico.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import logging
-import re
-
-from streamlink.plugin import Plugin
-from streamlink.plugin.api import validate
-from streamlink.stream import HLSStream
-from streamlink.utils import parse_json
-from streamlink.utils.url import update_scheme
-
-log = logging.getLogger(__name__)
-
-
-class Mico(Plugin):
- author = None
- category = None
- title = None
-
- url_re = re.compile(r'https?://(?:www\.)?micous\.com/live/\d+')
- json_data_re = re.compile(r'win._profile\s*=\s*({.*})')
-
- _json_data_schema = validate.Schema(
- validate.transform(json_data_re.search),
- validate.any(None, validate.all(
- validate.get(1),
- validate.transform(parse_json),
- validate.any(None, validate.all({
- 'mico_id': int,
- 'nickname': validate.text,
- 'h5_url': validate.all(
- validate.transform(lambda x: update_scheme('http:', x)),
- validate.url(),
- ),
- 'is_live': bool,
- })),
- )),
- )
-
- @classmethod
- def can_handle_url(cls, url):
- return cls.url_re.match(url) is not None
-
- def get_author(self):
- if self.author is not None:
- return self.author
-
- def get_category(self):
- if self.category is not None:
- return self.category
-
- def get_title(self):
- if self.title is not None:
- return self.title
-
- def _get_streams(self):
- json_data = self.session.http.get(self.url, schema=self._json_data_schema)
-
- if not json_data:
- log.error('Failed to get JSON data')
- return
-
- if not json_data['is_live']:
- log.info('This stream is no longer online')
- return
-
- self.author = json_data['mico_id']
- self.category = 'Live'
- self.title = json_data['nickname']
-
- return HLSStream.parse_variant_playlist(self.session, json_data['h5_url'])
-
-
-__plugin__ = Mico
| {"golden_diff": "diff --git a/src/streamlink/plugins/mico.py b/src/streamlink/plugins/mico.py\ndeleted file mode 100644\n--- a/src/streamlink/plugins/mico.py\n+++ /dev/null\n@@ -1,72 +0,0 @@\n-import logging\n-import re\n-\n-from streamlink.plugin import Plugin\n-from streamlink.plugin.api import validate\n-from streamlink.stream import HLSStream\n-from streamlink.utils import parse_json\n-from streamlink.utils.url import update_scheme\n-\n-log = logging.getLogger(__name__)\n-\n-\n-class Mico(Plugin):\n- author = None\n- category = None\n- title = None\n-\n- url_re = re.compile(r'https?://(?:www\\.)?micous\\.com/live/\\d+')\n- json_data_re = re.compile(r'win._profile\\s*=\\s*({.*})')\n-\n- _json_data_schema = validate.Schema(\n- validate.transform(json_data_re.search),\n- validate.any(None, validate.all(\n- validate.get(1),\n- validate.transform(parse_json),\n- validate.any(None, validate.all({\n- 'mico_id': int,\n- 'nickname': validate.text,\n- 'h5_url': validate.all(\n- validate.transform(lambda x: update_scheme('http:', x)),\n- validate.url(),\n- ),\n- 'is_live': bool,\n- })),\n- )),\n- )\n-\n- @classmethod\n- def can_handle_url(cls, url):\n- return cls.url_re.match(url) is not None\n-\n- def get_author(self):\n- if self.author is not None:\n- return self.author\n-\n- def get_category(self):\n- if self.category is not None:\n- return self.category\n-\n- def get_title(self):\n- if self.title is not None:\n- return self.title\n-\n- def _get_streams(self):\n- json_data = self.session.http.get(self.url, schema=self._json_data_schema)\n-\n- if not json_data:\n- log.error('Failed to get JSON data')\n- return\n-\n- if not json_data['is_live']:\n- log.info('This stream is no longer online')\n- return\n-\n- self.author = json_data['mico_id']\n- self.category = 'Live'\n- self.title = json_data['nickname']\n-\n- return HLSStream.parse_variant_playlist(self.session, json_data['h5_url'])\n-\n-\n-__plugin__ = Mico\n", "issue": "https://www.micous.com/ Site closing webcasts\n<!--\r\nThanks for reporting a plugin issue!\r\nUSE THE TEMPLATE. Otherwise your plugin issue may be rejected.\r\n\r\nFirst, see the contribution guidelines:\r\nhttps://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink\r\n\r\nAlso check the list of open and closed plugin issues:\r\nhttps://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22\r\n\r\nPlease see the text preview to avoid unnecessary formatting errors.\r\n-->\r\n\r\n\r\n## Plugin Issue\r\n\r\n<!-- Replace the space character between the square brackets with an x in order to check the boxes -->\r\n- [ ] This is a plugin issue and I have read the contribution guidelines.\r\n- [ ] I am using the latest development version from the master branch.\r\n\r\n\r\n### Description\r\nI was checking the m3u8 codes for Tango Live so that it would benefit people and maybe we can bring plugins but now I have checked and the mico.us plugin does not work because mico.us is no longer working. It looks like it has turned off broadcasting and playback, I guess only broadcasts can be followed through the application and need to be checked. \r\n<!-- Explain the plugin issue as thoroughly as you can. -->\r\n\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\nthis? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->\r\n\r\n1. ...https://www.micous.com/\r\n2. ...\r\n3. ...\r\n\r\n\r\n### Log output\r\n\r\n<!--\r\nTEXT LOG OUTPUT IS REQUIRED for a plugin issue!\r\nUse the `--loglevel debug` parameter and avoid using parameters which suppress log output.\r\nhttps://streamlink.github.io/cli.html#cmdoption-l\r\n\r\nMake sure to **remove usernames and passwords**\r\nYou can copy the output to https://gist.github.com/ or paste it below.\r\n\r\nDon't post screenshots of the log output and instead copy the text from your terminal application.\r\n-->\r\n\r\n```\r\nREPLACE THIS TEXT WITH THE LOG OUTPUT\r\n```\r\n\r\n\r\n### Additional comments, etc.\r\n\r\n\r\n\r\n[Love Streamlink? Please consider supporting our collective. Thanks!](https://opencollective.com/streamlink/donate)\r\n\n", "before_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils import parse_json\nfrom streamlink.utils.url import update_scheme\n\nlog = logging.getLogger(__name__)\n\n\nclass Mico(Plugin):\n author = None\n category = None\n title = None\n\n url_re = re.compile(r'https?://(?:www\\.)?micous\\.com/live/\\d+')\n json_data_re = re.compile(r'win._profile\\s*=\\s*({.*})')\n\n _json_data_schema = validate.Schema(\n validate.transform(json_data_re.search),\n validate.any(None, validate.all(\n validate.get(1),\n validate.transform(parse_json),\n validate.any(None, validate.all({\n 'mico_id': int,\n 'nickname': validate.text,\n 'h5_url': validate.all(\n validate.transform(lambda x: update_scheme('http:', x)),\n validate.url(),\n ),\n 'is_live': bool,\n })),\n )),\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def get_author(self):\n if self.author is not None:\n return self.author\n\n def get_category(self):\n if self.category is not None:\n return self.category\n\n def get_title(self):\n if self.title is not None:\n return self.title\n\n def _get_streams(self):\n json_data = self.session.http.get(self.url, schema=self._json_data_schema)\n\n if not json_data:\n log.error('Failed to get JSON data')\n return\n\n if not json_data['is_live']:\n log.info('This stream is no longer online')\n return\n\n self.author = json_data['mico_id']\n self.category = 'Live'\n self.title = json_data['nickname']\n\n return HLSStream.parse_variant_playlist(self.session, json_data['h5_url'])\n\n\n__plugin__ = Mico\n", "path": "src/streamlink/plugins/mico.py"}]} | 1,573 | 541 |
gh_patches_debug_30756 | rasdani/github-patches | git_diff | apache__airflow-19592 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cast to string in ds macro functions
As already written in this issue https://github.com/apache/airflow/issues/19241 strptime function required string, but got proxy if the variables ds/next_ds (the types of these variables changed on version 2.2.0) sent.
This change will make the function `ds_add` and `ds_format` backward compatible.
<!--
Thank you for contributing! Please make sure that your code changes
are covered with tests. And in case of new features or big changes
remember to adjust the documentation.
Feel free to ping committers for the review!
In case of existing issue, reference it using one of the following:
closes: #ISSUE
related: #ISSUE
How to write a good git commit message:
http://chris.beams.io/posts/git-commit/
-->
---
**^ Add meaningful description above**
Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information.
In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed.
In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
In case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/main/UPDATING.md).
next_ds changed to proxy and it cannot be used in ds_add macro function
### Apache Airflow version
2.2.0 (latest released)
### Operating System
Ubuntu
### Versions of Apache Airflow Providers
_No response_
### Deployment
Docker-Compose
### Deployment details
_No response_
### What happened
Tried to use this this code:
`some_variable='{{macros.ds_format(macros.ds_add(next_ds, '
'(ti.start_date - ti.execution_date).days), '
'"%Y-%m-%d", "%Y-%m-%d 21:00:00")}}')`
but got this error:
`strptime() argument 1 must be str, not Proxy`
because the `next_ds` variable changed to proxy.
### What you expected to happen
_No response_
### How to reproduce
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of airflow/macros/__init__.py]
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 import time # noqa
19 import uuid # noqa
20 from datetime import datetime, timedelta
21 from random import random # noqa
22
23 import dateutil # noqa
24
25 from airflow.macros import hive # noqa
26
27
28 def ds_add(ds, days):
29 """
30 Add or subtract days from a YYYY-MM-DD
31
32 :param ds: anchor date in ``YYYY-MM-DD`` format to add to
33 :type ds: str
34 :param days: number of days to add to the ds, you can use negative values
35 :type days: int
36
37 >>> ds_add('2015-01-01', 5)
38 '2015-01-06'
39 >>> ds_add('2015-01-06', -5)
40 '2015-01-01'
41 """
42 ds = datetime.strptime(ds, '%Y-%m-%d')
43 if days:
44 ds = ds + timedelta(days)
45 return ds.isoformat()[:10]
46
47
48 def ds_format(ds, input_format, output_format):
49 """
50 Takes an input string and outputs another string
51 as specified in the output format
52
53 :param ds: input string which contains a date
54 :type ds: str
55 :param input_format: input string format. E.g. %Y-%m-%d
56 :type input_format: str
57 :param output_format: output string format E.g. %Y-%m-%d
58 :type output_format: str
59
60 >>> ds_format('2015-01-01', "%Y-%m-%d", "%m-%d-%y")
61 '01-01-15'
62 >>> ds_format('1/5/2015', "%m/%d/%Y", "%Y-%m-%d")
63 '2015-01-05'
64 """
65 return datetime.strptime(ds, input_format).strftime(output_format)
66
67
68 def datetime_diff_for_humans(dt, since=None):
69 """
70 Return a human-readable/approximate difference between two datetimes, or
71 one and now.
72
73 :param dt: The datetime to display the diff for
74 :type dt: datetime.datetime
75 :param since: When to display the date from. If ``None`` then the diff is
76 between ``dt`` and now.
77 :type since: None or datetime.datetime
78 :rtype: str
79 """
80 import pendulum
81
82 return pendulum.instance(dt).diff_for_humans(since)
83
[end of airflow/macros/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/macros/__init__.py b/airflow/macros/__init__.py
--- a/airflow/macros/__init__.py
+++ b/airflow/macros/__init__.py
@@ -19,13 +19,17 @@
import uuid # noqa
from datetime import datetime, timedelta
from random import random # noqa
+from typing import Any, Optional, Union
import dateutil # noqa
+import lazy_object_proxy
from airflow.macros import hive # noqa
+TemplateStringInput = Union[str, lazy_object_proxy.Proxy]
-def ds_add(ds, days):
+
+def ds_add(ds: TemplateStringInput, days: int) -> str:
"""
Add or subtract days from a YYYY-MM-DD
@@ -39,13 +43,13 @@
>>> ds_add('2015-01-06', -5)
'2015-01-01'
"""
- ds = datetime.strptime(ds, '%Y-%m-%d')
- if days:
- ds = ds + timedelta(days)
- return ds.isoformat()[:10]
+ if not days:
+ return str(ds)
+ dt = datetime.strptime(str(ds), "%Y-%m-%d") + timedelta(days=days)
+ return dt.strftime("%Y-%m-%d")
-def ds_format(ds, input_format, output_format):
+def ds_format(ds: TemplateStringInput, input_format: str, output_format: str) -> str:
"""
Takes an input string and outputs another string
as specified in the output format
@@ -62,10 +66,10 @@
>>> ds_format('1/5/2015', "%m/%d/%Y", "%Y-%m-%d")
'2015-01-05'
"""
- return datetime.strptime(ds, input_format).strftime(output_format)
+ return datetime.strptime(str(ds), input_format).strftime(output_format)
-def datetime_diff_for_humans(dt, since=None):
+def datetime_diff_for_humans(dt: Any, since: Optional[datetime] = None) -> str:
"""
Return a human-readable/approximate difference between two datetimes, or
one and now.
| {"golden_diff": "diff --git a/airflow/macros/__init__.py b/airflow/macros/__init__.py\n--- a/airflow/macros/__init__.py\n+++ b/airflow/macros/__init__.py\n@@ -19,13 +19,17 @@\n import uuid # noqa\n from datetime import datetime, timedelta\n from random import random # noqa\n+from typing import Any, Optional, Union\n \n import dateutil # noqa\n+import lazy_object_proxy\n \n from airflow.macros import hive # noqa\n \n+TemplateStringInput = Union[str, lazy_object_proxy.Proxy]\n \n-def ds_add(ds, days):\n+\n+def ds_add(ds: TemplateStringInput, days: int) -> str:\n \"\"\"\n Add or subtract days from a YYYY-MM-DD\n \n@@ -39,13 +43,13 @@\n >>> ds_add('2015-01-06', -5)\n '2015-01-01'\n \"\"\"\n- ds = datetime.strptime(ds, '%Y-%m-%d')\n- if days:\n- ds = ds + timedelta(days)\n- return ds.isoformat()[:10]\n+ if not days:\n+ return str(ds)\n+ dt = datetime.strptime(str(ds), \"%Y-%m-%d\") + timedelta(days=days)\n+ return dt.strftime(\"%Y-%m-%d\")\n \n \n-def ds_format(ds, input_format, output_format):\n+def ds_format(ds: TemplateStringInput, input_format: str, output_format: str) -> str:\n \"\"\"\n Takes an input string and outputs another string\n as specified in the output format\n@@ -62,10 +66,10 @@\n >>> ds_format('1/5/2015', \"%m/%d/%Y\", \"%Y-%m-%d\")\n '2015-01-05'\n \"\"\"\n- return datetime.strptime(ds, input_format).strftime(output_format)\n+ return datetime.strptime(str(ds), input_format).strftime(output_format)\n \n \n-def datetime_diff_for_humans(dt, since=None):\n+def datetime_diff_for_humans(dt: Any, since: Optional[datetime] = None) -> str:\n \"\"\"\n Return a human-readable/approximate difference between two datetimes, or\n one and now.\n", "issue": "Cast to string in ds macro functions\n\r\nAs already written in this issue https://github.com/apache/airflow/issues/19241 strptime function required string, but got proxy if the variables ds/next_ds (the types of these variables changed on version 2.2.0) sent.\r\nThis change will make the function `ds_add` and `ds_format` backward compatible.\r\n\r\n<!--\r\nThank you for contributing! Please make sure that your code changes\r\nare covered with tests. And in case of new features or big changes\r\nremember to adjust the documentation.\r\n\r\nFeel free to ping committers for the review!\r\n\r\nIn case of existing issue, reference it using one of the following:\r\n\r\ncloses: #ISSUE\r\nrelated: #ISSUE\r\n\r\nHow to write a good git commit message:\r\nhttp://chris.beams.io/posts/git-commit/\r\n-->\r\n\r\n---\r\n**^ Add meaningful description above**\r\n\r\nRead the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information.\r\nIn case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed.\r\nIn case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).\r\nIn case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/main/UPDATING.md).\r\n\nnext_ds changed to proxy and it cannot be used in ds_add macro function\n### Apache Airflow version\n\n2.2.0 (latest released)\n\n### Operating System\n\nUbuntu\n\n### Versions of Apache Airflow Providers\n\n_No response_\n\n### Deployment\n\nDocker-Compose\n\n### Deployment details\n\n_No response_\n\n### What happened\n\nTried to use this this code:\r\n`some_variable='{{macros.ds_format(macros.ds_add(next_ds, '\r\n '(ti.start_date - ti.execution_date).days), '\r\n '\"%Y-%m-%d\", \"%Y-%m-%d 21:00:00\")}}')`\r\nbut got this error:\r\n`strptime() argument 1 must be str, not Proxy`\r\nbecause the `next_ds` variable changed to proxy.\n\n### What you expected to happen\n\n_No response_\n\n### How to reproduce\n\n_No response_\n\n### Anything else\n\n_No response_\n\n### Are you willing to submit PR?\n\n- [ ] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport time # noqa\nimport uuid # noqa\nfrom datetime import datetime, timedelta\nfrom random import random # noqa\n\nimport dateutil # noqa\n\nfrom airflow.macros import hive # noqa\n\n\ndef ds_add(ds, days):\n \"\"\"\n Add or subtract days from a YYYY-MM-DD\n\n :param ds: anchor date in ``YYYY-MM-DD`` format to add to\n :type ds: str\n :param days: number of days to add to the ds, you can use negative values\n :type days: int\n\n >>> ds_add('2015-01-01', 5)\n '2015-01-06'\n >>> ds_add('2015-01-06', -5)\n '2015-01-01'\n \"\"\"\n ds = datetime.strptime(ds, '%Y-%m-%d')\n if days:\n ds = ds + timedelta(days)\n return ds.isoformat()[:10]\n\n\ndef ds_format(ds, input_format, output_format):\n \"\"\"\n Takes an input string and outputs another string\n as specified in the output format\n\n :param ds: input string which contains a date\n :type ds: str\n :param input_format: input string format. E.g. %Y-%m-%d\n :type input_format: str\n :param output_format: output string format E.g. %Y-%m-%d\n :type output_format: str\n\n >>> ds_format('2015-01-01', \"%Y-%m-%d\", \"%m-%d-%y\")\n '01-01-15'\n >>> ds_format('1/5/2015', \"%m/%d/%Y\", \"%Y-%m-%d\")\n '2015-01-05'\n \"\"\"\n return datetime.strptime(ds, input_format).strftime(output_format)\n\n\ndef datetime_diff_for_humans(dt, since=None):\n \"\"\"\n Return a human-readable/approximate difference between two datetimes, or\n one and now.\n\n :param dt: The datetime to display the diff for\n :type dt: datetime.datetime\n :param since: When to display the date from. If ``None`` then the diff is\n between ``dt`` and now.\n :type since: None or datetime.datetime\n :rtype: str\n \"\"\"\n import pendulum\n\n return pendulum.instance(dt).diff_for_humans(since)\n", "path": "airflow/macros/__init__.py"}]} | 2,005 | 505 |
gh_patches_debug_22849 | rasdani/github-patches | git_diff | uclapi__uclapi-51 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error responses missing OK property and correct HTTP Status Code
Hello there,
when making an invalid request, say, an invalid token, an error is given but the HTTP code is still `200 OK` when it should give an authentication error (a `400 Bad Request` or even a `401 Unauthorized`).
Request:
```http
GET /roombookings/rooms?token=uclapi-5d58c3c4e6bf9c-c2910ad3b6e054-7ef60f44f1c14f-a05147bfd17fdb HTTP/1.1
Host: uclapi.com
User-Agent: Paw/3.0.16 (Macintosh; OS X/10.12.4) NSURLConnection/1349.63
Cookie: AWSALB=8q9+FZmk9TOAZ/GG2tFsVUuckVO8STONoiGDn6/jd9FBEwFi5Ke/kvz+hIjdCmAwtpOIXGlnudL7LU3AaVxKt1sHWYGjLJnoMypHqu53r7Ub4b73trfiMx5NMVZ6
```
Response:
```http
HTTP/1.1 200 OK
Connection: keep-alive
Content-Type: application/json
Allow: GET, OPTIONS
Server: nginx/1.11.10
Set-Cookie: AWSALB=U2qTk7k+LrfxXibfskgPN1RoWvDQqKhaSBmgvUMOa3AcYgag1BZMcyz+5h5rQ2qhQc+Cm2PYzfjKV466PHcc9dleZHkLYE5O8d5q2WO+7WVbQT6VQMSSSHpOh3xy; Expires=Sat, 11 Mar 2017 14:18:40 GMT; Path=/
Transfer-Encoding: Identity
Date: Sat, 04 Mar 2017 14:18:40 GMT
X-Frame-Options: SAMEORIGIN
Vary: Accept, Cookie
{"error": "Token does not exist"}
```
Furthermore, when a request is successful, the first property is an `"ok": true`, however, with invalid requests, the `"ok": false` is missing from the error response.
Actual:
```json
{
"error": "Token does not exist"
}
```
Expected:
```json
{
"ok": false,
"error": "Token does not exist"
}
```
</issue>
<code>
[start of backend/uclapi/roombookings/token_auth.py]
1 from rest_framework.response import Response
2 from dashboard.models import App
3 from django.core.exceptions import ObjectDoesNotExist
4 from django.http import JsonResponse
5
6
7 def does_token_exist(view_func):
8 def wrapped(request, *args, **kwargs):
9 token = request.GET.get("token")
10
11 if not token:
12 return JsonResponse({
13 "error": "No token provided"
14 })
15
16 try:
17 App.objects.get(api_token=token)
18 except ObjectDoesNotExist:
19 return JsonResponse({
20 "error": "Token does not exist"
21 })
22
23 return view_func(request, *args, **kwargs)
24 return wrapped
25
[end of backend/uclapi/roombookings/token_auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/uclapi/roombookings/token_auth.py b/backend/uclapi/roombookings/token_auth.py
--- a/backend/uclapi/roombookings/token_auth.py
+++ b/backend/uclapi/roombookings/token_auth.py
@@ -1,4 +1,3 @@
-from rest_framework.response import Response
from dashboard.models import App
from django.core.exceptions import ObjectDoesNotExist
from django.http import JsonResponse
@@ -9,16 +8,22 @@
token = request.GET.get("token")
if not token:
- return JsonResponse({
+ response = JsonResponse({
+ "ok": False,
"error": "No token provided"
})
+ response.status_code = 400
+ return response
try:
App.objects.get(api_token=token)
except ObjectDoesNotExist:
- return JsonResponse({
+ response = JsonResponse({
+ "ok": False,
"error": "Token does not exist"
})
+ response.status_code = 400
+ return response
return view_func(request, *args, **kwargs)
return wrapped
| {"golden_diff": "diff --git a/backend/uclapi/roombookings/token_auth.py b/backend/uclapi/roombookings/token_auth.py\n--- a/backend/uclapi/roombookings/token_auth.py\n+++ b/backend/uclapi/roombookings/token_auth.py\n@@ -1,4 +1,3 @@\n-from rest_framework.response import Response\n from dashboard.models import App\n from django.core.exceptions import ObjectDoesNotExist\n from django.http import JsonResponse\n@@ -9,16 +8,22 @@\n token = request.GET.get(\"token\")\n \n if not token:\n- return JsonResponse({\n+ response = JsonResponse({\n+ \"ok\": False,\n \"error\": \"No token provided\"\n })\n+ response.status_code = 400\n+ return response\n \n try:\n App.objects.get(api_token=token)\n except ObjectDoesNotExist:\n- return JsonResponse({\n+ response = JsonResponse({\n+ \"ok\": False,\n \"error\": \"Token does not exist\"\n })\n+ response.status_code = 400\n+ return response\n \n return view_func(request, *args, **kwargs)\n return wrapped\n", "issue": "Error responses missing OK property and correct HTTP Status Code\nHello there,\r\nwhen making an invalid request, say, an invalid token, an error is given but the HTTP code is still `200 OK` when it should give an authentication error (a `400 Bad Request` or even a `401 Unauthorized`).\r\n\r\nRequest:\r\n```http\r\nGET /roombookings/rooms?token=uclapi-5d58c3c4e6bf9c-c2910ad3b6e054-7ef60f44f1c14f-a05147bfd17fdb HTTP/1.1\r\nHost: uclapi.com\r\nUser-Agent: Paw/3.0.16 (Macintosh; OS X/10.12.4) NSURLConnection/1349.63\r\nCookie: AWSALB=8q9+FZmk9TOAZ/GG2tFsVUuckVO8STONoiGDn6/jd9FBEwFi5Ke/kvz+hIjdCmAwtpOIXGlnudL7LU3AaVxKt1sHWYGjLJnoMypHqu53r7Ub4b73trfiMx5NMVZ6\r\n```\r\n\r\nResponse:\r\n```http\r\nHTTP/1.1 200 OK\r\nConnection: keep-alive\r\nContent-Type: application/json\r\nAllow: GET, OPTIONS\r\nServer: nginx/1.11.10\r\nSet-Cookie: AWSALB=U2qTk7k+LrfxXibfskgPN1RoWvDQqKhaSBmgvUMOa3AcYgag1BZMcyz+5h5rQ2qhQc+Cm2PYzfjKV466PHcc9dleZHkLYE5O8d5q2WO+7WVbQT6VQMSSSHpOh3xy; Expires=Sat, 11 Mar 2017 14:18:40 GMT; Path=/\r\nTransfer-Encoding: Identity\r\nDate: Sat, 04 Mar 2017 14:18:40 GMT\r\nX-Frame-Options: SAMEORIGIN\r\nVary: Accept, Cookie\r\n\r\n{\"error\": \"Token does not exist\"}\r\n```\r\n\r\nFurthermore, when a request is successful, the first property is an `\"ok\": true`, however, with invalid requests, the `\"ok\": false` is missing from the error response.\r\n\r\nActual:\r\n```json\r\n{\r\n \"error\": \"Token does not exist\"\r\n}\r\n```\r\n\r\nExpected:\r\n```json\r\n{\r\n \"ok\": false,\r\n \"error\": \"Token does not exist\"\r\n}\r\n```\n", "before_files": [{"content": "from rest_framework.response import Response\nfrom dashboard.models import App\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import JsonResponse\n\n\ndef does_token_exist(view_func):\n def wrapped(request, *args, **kwargs):\n token = request.GET.get(\"token\")\n\n if not token:\n return JsonResponse({\n \"error\": \"No token provided\"\n })\n\n try:\n App.objects.get(api_token=token)\n except ObjectDoesNotExist:\n return JsonResponse({\n \"error\": \"Token does not exist\"\n })\n\n return view_func(request, *args, **kwargs)\n return wrapped\n", "path": "backend/uclapi/roombookings/token_auth.py"}]} | 1,317 | 247 |
gh_patches_debug_26093 | rasdani/github-patches | git_diff | lightly-ai__lightly-618 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update docs version
Update docs version and link copyright in docs footer to lightly website
Closes #618
</issue>
<code>
[start of docs/source/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 import os
14 import sys
15 sys.path.insert(0, os.path.abspath('../..'))
16
17 import sphinx_rtd_theme
18
19
20 # -- Project information -----------------------------------------------------
21
22 project = 'lightly'
23 copyright = '2020, Lightly AG'
24 author = 'Philipp Wirth, Igor Susmelj'
25
26 # The full version, including alpha/beta/rc tags
27 release = '1.0.0'
28 master_doc = 'index'
29
30
31 # -- General configuration ---------------------------------------------------
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = [
37 "sphinx_rtd_theme",
38 #'sphinx.ext.napoleon',
39 "sphinx.ext.autosummary",
40 "sphinx_gallery.gen_gallery",
41 "sphinx_tabs.tabs",
42 "sphinx_copybutton",
43 ]
44
45 sphinx_gallery_conf = {
46 'examples_dirs': ['tutorials_source/package', 'tutorials_source/platform'],
47 'gallery_dirs': ['tutorials/package', 'tutorials/platform'], # path to where to save gallery generated output
48 'filename_pattern': '/tutorial_',
49 }
50
51 napoleon_google_docstring = True
52 napoleon_numpy_docstring = False
53 napoleon_include_init_with_doc = False
54 napoleon_include_private_with_doc = False
55 napoleon_include_special_with_doc = False
56 napoleon_use_admonition_for_examples = False
57 napoleon_use_admonition_for_notes = False
58 napoleon_use_admonition_for_references = False
59 napoleon_use_ivar = False
60 napoleon_use_param = False
61 napoleon_use_rtype = False
62 napoleon_type_aliases = None
63
64 # Add any paths that contain templates here, relative to this directory.
65 templates_path = ['_templates']
66
67 # List of patterns, relative to source directory, that match files and
68 # directories to ignore when looking for source files.
69 # This pattern also affects html_static_path and html_extra_path.
70 exclude_patterns = []
71
72
73 # -- Options for HTML output -------------------------------------------------
74
75 # The theme to use for HTML and HTML Help pages. See the documentation for
76 # a list of builtin themes.
77 #
78 html_theme = 'sphinx_rtd_theme'
79
80 html_theme_options = {
81 'collapse_navigation': False, # set to false to prevent menu item collapse
82 }
83
84 # Add any paths that contain custom static files (such as style sheets) here,
85 # relative to this directory. They are copied after the builtin static files,
86 # so a file named "default.css" will overwrite the builtin "default.css".
87 html_static_path = ['_static']
88
89 html_favicon = 'favicon.png'
90
91 #html_logo = "../logos/lightly_logo_crop.png"
92 def setup(app):
93 app.add_css_file('css/my-styles.css')
94
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -15,16 +15,19 @@
sys.path.insert(0, os.path.abspath('../..'))
import sphinx_rtd_theme
+import lightly
# -- Project information -----------------------------------------------------
project = 'lightly'
-copyright = '2020, Lightly AG'
+copyright_year = '2020'
+copyright = "Lightly AG"
+website_url = 'https://www.lightly.ai/'
author = 'Philipp Wirth, Igor Susmelj'
# The full version, including alpha/beta/rc tags
-release = '1.0.0'
+release = lightly.__version__
master_doc = 'index'
@@ -79,6 +82,7 @@
html_theme_options = {
'collapse_navigation': False, # set to false to prevent menu item collapse
+ 'logo_only': True
}
# Add any paths that contain custom static files (such as style sheets) here,
@@ -88,6 +92,10 @@
html_favicon = 'favicon.png'
-#html_logo = "../logos/lightly_logo_crop.png"
-def setup(app):
- app.add_css_file('css/my-styles.css')
+html_logo = '../logos/lightly_logo_crop_white_text.png'
+
+# Exposes variables so that they can be used by django
+html_context = {
+ 'copyright_year': copyright_year,
+ 'website_url': website_url,
+}
\ No newline at end of file
| {"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -15,16 +15,19 @@\n sys.path.insert(0, os.path.abspath('../..'))\n \n import sphinx_rtd_theme\n+import lightly\n \n \n # -- Project information -----------------------------------------------------\n \n project = 'lightly'\n-copyright = '2020, Lightly AG'\n+copyright_year = '2020'\n+copyright = \"Lightly AG\"\n+website_url = 'https://www.lightly.ai/'\n author = 'Philipp Wirth, Igor Susmelj'\n \n # The full version, including alpha/beta/rc tags\n-release = '1.0.0'\n+release = lightly.__version__\n master_doc = 'index'\n \n \n@@ -79,6 +82,7 @@\n \n html_theme_options = {\n 'collapse_navigation': False, # set to false to prevent menu item collapse\n+ 'logo_only': True\n }\n \n # Add any paths that contain custom static files (such as style sheets) here,\n@@ -88,6 +92,10 @@\n \n html_favicon = 'favicon.png'\n \n-#html_logo = \"../logos/lightly_logo_crop.png\"\n-def setup(app):\n- app.add_css_file('css/my-styles.css')\n+html_logo = '../logos/lightly_logo_crop_white_text.png'\n+\n+#\u00a0Exposes variables so that they can be used by django\n+html_context = {\n+ 'copyright_year': copyright_year,\n+ 'website_url': website_url,\n+}\n\\ No newline at end of file\n", "issue": "Update docs version\nUpdate docs version and link copyright in docs footer to lightly website\r\n\r\nCloses #618 \n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../..'))\n\nimport sphinx_rtd_theme\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'lightly'\ncopyright = '2020, Lightly AG'\nauthor = 'Philipp Wirth, Igor Susmelj'\n\n# The full version, including alpha/beta/rc tags\nrelease = '1.0.0'\nmaster_doc = 'index'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx_rtd_theme\",\n #'sphinx.ext.napoleon',\n \"sphinx.ext.autosummary\",\n \"sphinx_gallery.gen_gallery\",\n \"sphinx_tabs.tabs\",\n \"sphinx_copybutton\",\n]\n\nsphinx_gallery_conf = {\n 'examples_dirs': ['tutorials_source/package', 'tutorials_source/platform'],\n 'gallery_dirs': ['tutorials/package', 'tutorials/platform'], # path to where to save gallery generated output\n 'filename_pattern': '/tutorial_',\n}\n\nnapoleon_google_docstring = True\nnapoleon_numpy_docstring = False\nnapoleon_include_init_with_doc = False\nnapoleon_include_private_with_doc = False\nnapoleon_include_special_with_doc = False\nnapoleon_use_admonition_for_examples = False\nnapoleon_use_admonition_for_notes = False\nnapoleon_use_admonition_for_references = False\nnapoleon_use_ivar = False\nnapoleon_use_param = False\nnapoleon_use_rtype = False\nnapoleon_type_aliases = None\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\nhtml_theme_options = {\n 'collapse_navigation': False, # set to false to prevent menu item collapse\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_favicon = 'favicon.png'\n\n#html_logo = \"../logos/lightly_logo_crop.png\"\ndef setup(app):\n app.add_css_file('css/my-styles.css')\n", "path": "docs/source/conf.py"}]} | 1,437 | 351 |
gh_patches_debug_16649 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-1259 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate functions ?
Central point to discuss functions to deprecate, if any?
- [x] `process_text` - `transform_columns` covers this very well
- [x] `impute` vs `fill_empty` - `impute` has the advantage of extra statistics functions (mean, mode, ...)
- [x] `rename_columns` - use pandas `rename`
- [x] `rename_column` - use `pd.rename`
- [x] `remove_columns` - use `pd.drop` or `select`
- [x] `filter_on` - use `query` or `select`
- [x] `fill_direction` - use `transform_columns` or `pd.DataFrame.assign`
- [x] `groupby_agg` - use `transform_columns` - once `by` is implemented
- [x] `then` - use `pd.DataFrame.pipe`
- [x] `to_datetime` - use `jn.transform_columns`
- [x] `pivot_wider` - use `pd.DataFrame.pivot`
</issue>
<code>
[start of janitor/functions/then.py]
1 """Implementation source for `then`."""
2 from typing import Callable
3 import pandas_flavor as pf
4 import pandas as pd
5
6
7 @pf.register_dataframe_method
8 def then(df: pd.DataFrame, func: Callable) -> pd.DataFrame:
9 """Add an arbitrary function to run in the `pyjanitor` method chain.
10
11 This method does not mutate the original DataFrame.
12
13 Examples:
14 A trivial example using a lambda `func`.
15
16 >>> import pandas as pd
17 >>> import janitor
18 >>> (pd.DataFrame({"a": [1, 2, 3], "b": [7, 8, 9]})
19 ... .then(lambda df: df * 2))
20 a b
21 0 2 14
22 1 4 16
23 2 6 18
24
25 Args:
26 df: A pandas DataFrame.
27 func: A function you would like to run in the method chain.
28 It should take one parameter and return one parameter, each being
29 the DataFrame object. After that, do whatever you want in the
30 middle. Go crazy.
31
32 Returns:
33 A pandas DataFrame.
34 """
35 df = func(df)
36 return df
37
[end of janitor/functions/then.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/janitor/functions/then.py b/janitor/functions/then.py
--- a/janitor/functions/then.py
+++ b/janitor/functions/then.py
@@ -2,14 +2,25 @@
from typing import Callable
import pandas_flavor as pf
import pandas as pd
+from janitor.utils import refactored_function
[email protected]_dataframe_method
+@refactored_function(
+ message="This function will be deprecated in a 1.x release. "
+ "Kindly use `pd.DataFrame.pipe` instead."
+)
@pf.register_dataframe_method
def then(df: pd.DataFrame, func: Callable) -> pd.DataFrame:
"""Add an arbitrary function to run in the `pyjanitor` method chain.
This method does not mutate the original DataFrame.
+ !!!note
+
+ This function will be deprecated in a 1.x release.
+ Please use `pd.DataFrame.pipe` instead.
+
Examples:
A trivial example using a lambda `func`.
| {"golden_diff": "diff --git a/janitor/functions/then.py b/janitor/functions/then.py\n--- a/janitor/functions/then.py\n+++ b/janitor/functions/then.py\n@@ -2,14 +2,25 @@\n from typing import Callable\n import pandas_flavor as pf\n import pandas as pd\n+from janitor.utils import refactored_function\n \n \[email protected]_dataframe_method\n+@refactored_function(\n+ message=\"This function will be deprecated in a 1.x release. \"\n+ \"Kindly use `pd.DataFrame.pipe` instead.\"\n+)\n @pf.register_dataframe_method\n def then(df: pd.DataFrame, func: Callable) -> pd.DataFrame:\n \"\"\"Add an arbitrary function to run in the `pyjanitor` method chain.\n \n This method does not mutate the original DataFrame.\n \n+ !!!note\n+\n+ This function will be deprecated in a 1.x release.\n+ Please use `pd.DataFrame.pipe` instead.\n+\n Examples:\n A trivial example using a lambda `func`.\n", "issue": "Deprecate functions ?\nCentral point to discuss functions to deprecate, if any?\r\n\r\n- [x] `process_text` - `transform_columns` covers this very well\r\n- [x] `impute` vs `fill_empty` - `impute` has the advantage of extra statistics functions (mean, mode, ...)\r\n- [x] `rename_columns` - use pandas `rename`\r\n- [x] `rename_column` - use `pd.rename`\r\n- [x] `remove_columns` - use `pd.drop` or `select`\r\n- [x] `filter_on` - use `query` or `select`\r\n- [x] `fill_direction` - use `transform_columns` or `pd.DataFrame.assign`\r\n- [x] `groupby_agg` - use `transform_columns` - once `by` is implemented\r\n- [x] `then` - use `pd.DataFrame.pipe`\r\n- [x] `to_datetime` - use `jn.transform_columns`\r\n- [x] `pivot_wider` - use `pd.DataFrame.pivot`\n", "before_files": [{"content": "\"\"\"Implementation source for `then`.\"\"\"\nfrom typing import Callable\nimport pandas_flavor as pf\nimport pandas as pd\n\n\[email protected]_dataframe_method\ndef then(df: pd.DataFrame, func: Callable) -> pd.DataFrame:\n \"\"\"Add an arbitrary function to run in the `pyjanitor` method chain.\n\n This method does not mutate the original DataFrame.\n\n Examples:\n A trivial example using a lambda `func`.\n\n >>> import pandas as pd\n >>> import janitor\n >>> (pd.DataFrame({\"a\": [1, 2, 3], \"b\": [7, 8, 9]})\n ... .then(lambda df: df * 2))\n a b\n 0 2 14\n 1 4 16\n 2 6 18\n\n Args:\n df: A pandas DataFrame.\n func: A function you would like to run in the method chain.\n It should take one parameter and return one parameter, each being\n the DataFrame object. After that, do whatever you want in the\n middle. Go crazy.\n\n Returns:\n A pandas DataFrame.\n \"\"\"\n df = func(df)\n return df\n", "path": "janitor/functions/then.py"}]} | 1,091 | 224 |
gh_patches_debug_12049 | rasdani/github-patches | git_diff | nf-core__tools-1755 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use of future functionality
### Description of the bug
The `shutil.copytree` argument [`dirs_exist_ok`](https://docs.python.org/3/library/shutil.html#shutil.copytree) is only available in Python version 3.8+ but nf-core tool still support Python 3.7
https://github.com/nf-core/tools/blob/b5400d84d307343353b6ab09aad204231f74fb0e/nf_core/modules/lint/module_changes.py#L28
Two options: drop Python 3.7 support or change that code.
</issue>
<code>
[start of nf_core/modules/lint/module_changes.py]
1 """
2 Check whether the content of a module has changed compared to the original repository
3 """
4 import shutil
5 import tempfile
6 from pathlib import Path
7
8 from nf_core.modules.modules_differ import ModulesDiffer
9
10
11 def module_changes(module_lint_object, module):
12 """
13 Checks whether installed nf-core modules have changed compared to the
14 original repository
15
16 Downloads the ``main.nf`` and ``meta.yml`` files for every module
17 and compares them to the local copies
18
19 If the module has a commit SHA entry in the ``modules.json``, the file content is
20 compared against the files in the remote at this SHA.
21
22 Only runs when linting a pipeline, not the modules repository
23 """
24 if module.is_patched:
25 # If the module is patched, we need to apply
26 # the patch in reverse before comparing with the remote
27 tempdir = Path(tempfile.mkdtemp())
28 shutil.copytree(module.module_dir, tempdir, dirs_exist_ok=True)
29 try:
30 new_lines = ModulesDiffer.try_apply_patch(
31 module.module_name, module_lint_object.modules_repo.fullname, module.patch_path, tempdir, reverse=True
32 )
33 for file, lines in new_lines.items():
34 with open(tempdir / file, "w") as fh:
35 fh.writelines(lines)
36 except LookupError:
37 # This error is already reported by module_patch, so just return
38 return
39 else:
40 tempdir = module.module_dir
41
42 for f, same in module_lint_object.modules_repo.module_files_identical(
43 module.module_name, tempdir, module.git_sha
44 ).items():
45 if same:
46 module.passed.append(
47 (
48 "check_local_copy",
49 "Local copy of module up to date",
50 f"{Path(module.module_dir, f)}",
51 )
52 )
53 else:
54 module.failed.append(
55 (
56 "check_local_copy",
57 "Local copy of module does not match remote",
58 f"{Path(module.module_dir, f)}",
59 )
60 )
61
[end of nf_core/modules/lint/module_changes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nf_core/modules/lint/module_changes.py b/nf_core/modules/lint/module_changes.py
--- a/nf_core/modules/lint/module_changes.py
+++ b/nf_core/modules/lint/module_changes.py
@@ -25,7 +25,7 @@
# If the module is patched, we need to apply
# the patch in reverse before comparing with the remote
tempdir = Path(tempfile.mkdtemp())
- shutil.copytree(module.module_dir, tempdir, dirs_exist_ok=True)
+ shutil.copytree(module.module_dir, tempdir)
try:
new_lines = ModulesDiffer.try_apply_patch(
module.module_name, module_lint_object.modules_repo.fullname, module.patch_path, tempdir, reverse=True
| {"golden_diff": "diff --git a/nf_core/modules/lint/module_changes.py b/nf_core/modules/lint/module_changes.py\n--- a/nf_core/modules/lint/module_changes.py\n+++ b/nf_core/modules/lint/module_changes.py\n@@ -25,7 +25,7 @@\n # If the module is patched, we need to apply\n # the patch in reverse before comparing with the remote\n tempdir = Path(tempfile.mkdtemp())\n- shutil.copytree(module.module_dir, tempdir, dirs_exist_ok=True)\n+ shutil.copytree(module.module_dir, tempdir)\n try:\n new_lines = ModulesDiffer.try_apply_patch(\n module.module_name, module_lint_object.modules_repo.fullname, module.patch_path, tempdir, reverse=True\n", "issue": "Use of future functionality\n### Description of the bug\r\n\r\nThe `shutil.copytree` argument [`dirs_exist_ok`](https://docs.python.org/3/library/shutil.html#shutil.copytree) is only available in Python version 3.8+ but nf-core tool still support Python 3.7\r\n\r\nhttps://github.com/nf-core/tools/blob/b5400d84d307343353b6ab09aad204231f74fb0e/nf_core/modules/lint/module_changes.py#L28\r\n\r\nTwo options: drop Python 3.7 support or change that code.\r\n\n", "before_files": [{"content": "\"\"\"\nCheck whether the content of a module has changed compared to the original repository\n\"\"\"\nimport shutil\nimport tempfile\nfrom pathlib import Path\n\nfrom nf_core.modules.modules_differ import ModulesDiffer\n\n\ndef module_changes(module_lint_object, module):\n \"\"\"\n Checks whether installed nf-core modules have changed compared to the\n original repository\n\n Downloads the ``main.nf`` and ``meta.yml`` files for every module\n and compares them to the local copies\n\n If the module has a commit SHA entry in the ``modules.json``, the file content is\n compared against the files in the remote at this SHA.\n\n Only runs when linting a pipeline, not the modules repository\n \"\"\"\n if module.is_patched:\n # If the module is patched, we need to apply\n # the patch in reverse before comparing with the remote\n tempdir = Path(tempfile.mkdtemp())\n shutil.copytree(module.module_dir, tempdir, dirs_exist_ok=True)\n try:\n new_lines = ModulesDiffer.try_apply_patch(\n module.module_name, module_lint_object.modules_repo.fullname, module.patch_path, tempdir, reverse=True\n )\n for file, lines in new_lines.items():\n with open(tempdir / file, \"w\") as fh:\n fh.writelines(lines)\n except LookupError:\n # This error is already reported by module_patch, so just return\n return\n else:\n tempdir = module.module_dir\n\n for f, same in module_lint_object.modules_repo.module_files_identical(\n module.module_name, tempdir, module.git_sha\n ).items():\n if same:\n module.passed.append(\n (\n \"check_local_copy\",\n \"Local copy of module up to date\",\n f\"{Path(module.module_dir, f)}\",\n )\n )\n else:\n module.failed.append(\n (\n \"check_local_copy\",\n \"Local copy of module does not match remote\",\n f\"{Path(module.module_dir, f)}\",\n )\n )\n", "path": "nf_core/modules/lint/module_changes.py"}]} | 1,227 | 164 |
gh_patches_debug_2117 | rasdani/github-patches | git_diff | streamlit__streamlit-1469 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spelling mistake while running streamlit hello , DataFrame Demo
# Summary
I noticed a spelling mistake in dataframe demo while runing streamlit hello , It displays UN Data Exlorer instead of UN Data Explorer
# Steps to reproduce
1. Go to terminal
2. Run `streamlit hello`
3. Open browser at localhost:8501 and choose dataframe demo
## Expected behavior:
It should display correct spelling as `(Data courtesy of the UN Data Exlporer.)`
## Actual behavior:
It's displaying `(Data courtesy of the UN Data Exlorer.)`

## Is this a regression?
no
# Debug info
- Streamlit version: 0.57.3
- Python version: 3.8.2
- Using Conda? PipEnv? PyEnv? Pex? Conda
- OS version: Windows 10
- Browser version: Chrome v81.0
# Additional information
If needed, add any other context about the problem here. For example, did this bug come from https://discuss.streamlit.io or another site? Link the original source here!
</issue>
<code>
[start of lib/streamlit/hello/hello.py]
1 # Copyright 2018-2020 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import inspect
16 import textwrap
17 from collections import OrderedDict
18
19 import streamlit as st
20 from streamlit.logger import get_logger
21 from streamlit.hello import demos
22
23 LOGGER = get_logger(__name__)
24
25 # Dictionary of
26 # demo_name -> (demo_function, demo_description)
27 DEMOS = OrderedDict(
28 [
29 ("—", (demos.intro, None)),
30 (
31 "Animation Demo",
32 (
33 demos.fractal_demo,
34 """
35 This app shows how you can use Streamlit to build cool animations.
36 It displays an animated fractal based on the the Julia Set. Use the slider
37 to tune different parameters.
38 """,
39 ),
40 ),
41 (
42 "Plotting Demo",
43 (
44 demos.plotting_demo,
45 """
46 This demo illustrates a combination of plotting and animation with
47 Streamlit. We're generating a bunch of random numbers in a loop for around
48 5 seconds. Enjoy!
49 """,
50 ),
51 ),
52 (
53 "Mapping Demo",
54 (
55 demos.mapping_demo,
56 """
57 This demo shows how to use
58 [`st.deck_gl_chart`](https://docs.streamlit.io/api.html#streamlit.deck_gl_chart)
59 to display geospatial data.
60 """,
61 ),
62 ),
63 (
64 "DataFrame Demo",
65 (
66 demos.data_frame_demo,
67 """
68 This demo shows how to use `st.write` to visualize Pandas DataFrames.
69
70 (Data courtesy of the [UN Data Exlorer](http://data.un.org/Explorer.aspx).)
71 """,
72 ),
73 ),
74 ]
75 )
76
77
78 def run():
79 demo_name = st.sidebar.selectbox("Choose a demo", list(DEMOS.keys()), 0)
80 demo = DEMOS[demo_name][0]
81
82 if demo_name == "—":
83 show_code = False
84 st.write("# Welcome to Streamlit! 👋")
85 else:
86 show_code = st.sidebar.checkbox("Show code", True)
87 st.markdown("# %s" % demo_name)
88 description = DEMOS[demo_name][1]
89 if description:
90 st.write(description)
91 # Clear everything from the intro page.
92 # We only have 4 elements in the page so this is intentional overkill.
93 for i in range(10):
94 st.empty()
95
96 demo()
97
98 if show_code:
99 st.markdown("## Code")
100 sourcelines, _ = inspect.getsourcelines(demo)
101 st.code(textwrap.dedent("".join(sourcelines[1:])))
102
103
104 if __name__ == "__main__":
105 run()
106
[end of lib/streamlit/hello/hello.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/streamlit/hello/hello.py b/lib/streamlit/hello/hello.py
--- a/lib/streamlit/hello/hello.py
+++ b/lib/streamlit/hello/hello.py
@@ -67,7 +67,7 @@
"""
This demo shows how to use `st.write` to visualize Pandas DataFrames.
-(Data courtesy of the [UN Data Exlorer](http://data.un.org/Explorer.aspx).)
+(Data courtesy of the [UN Data Explorer](http://data.un.org/Explorer.aspx).)
""",
),
),
| {"golden_diff": "diff --git a/lib/streamlit/hello/hello.py b/lib/streamlit/hello/hello.py\n--- a/lib/streamlit/hello/hello.py\n+++ b/lib/streamlit/hello/hello.py\n@@ -67,7 +67,7 @@\n \"\"\"\n This demo shows how to use `st.write` to visualize Pandas DataFrames.\n \n-(Data courtesy of the [UN Data Exlorer](http://data.un.org/Explorer.aspx).)\n+(Data courtesy of the [UN Data Explorer](http://data.un.org/Explorer.aspx).)\n \"\"\",\n ),\n ),\n", "issue": "Spelling mistake while running streamlit hello , DataFrame Demo\n# Summary\r\n\r\nI noticed a spelling mistake in dataframe demo while runing streamlit hello , It displays UN Data Exlorer instead of UN Data Explorer\r\n\r\n# Steps to reproduce\r\n\r\n1. Go to terminal\r\n2. Run `streamlit hello`\r\n3. Open browser at localhost:8501 and choose dataframe demo\r\n\r\n## Expected behavior:\r\n\r\nIt should display correct spelling as `(Data courtesy of the UN Data Exlporer.)`\r\n\r\n## Actual behavior:\r\n\r\nIt's displaying `(Data courtesy of the UN Data Exlorer.)`\r\n\r\n\r\n\r\n## Is this a regression?\r\n no\r\n\r\n# Debug info\r\n\r\n- Streamlit version: 0.57.3\r\n- Python version: 3.8.2\r\n- Using Conda? PipEnv? PyEnv? Pex? Conda\r\n- OS version: Windows 10\r\n- Browser version: Chrome v81.0\r\n\r\n# Additional information\r\n\r\nIf needed, add any other context about the problem here. For example, did this bug come from https://discuss.streamlit.io or another site? Link the original source here!\r\n\n", "before_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport textwrap\nfrom collections import OrderedDict\n\nimport streamlit as st\nfrom streamlit.logger import get_logger\nfrom streamlit.hello import demos\n\nLOGGER = get_logger(__name__)\n\n# Dictionary of\n# demo_name -> (demo_function, demo_description)\nDEMOS = OrderedDict(\n [\n (\"\u2014\", (demos.intro, None)),\n (\n \"Animation Demo\",\n (\n demos.fractal_demo,\n \"\"\"\nThis app shows how you can use Streamlit to build cool animations.\nIt displays an animated fractal based on the the Julia Set. Use the slider\nto tune different parameters.\n\"\"\",\n ),\n ),\n (\n \"Plotting Demo\",\n (\n demos.plotting_demo,\n \"\"\"\nThis demo illustrates a combination of plotting and animation with\nStreamlit. We're generating a bunch of random numbers in a loop for around\n5 seconds. Enjoy!\n\"\"\",\n ),\n ),\n (\n \"Mapping Demo\",\n (\n demos.mapping_demo,\n \"\"\"\nThis demo shows how to use\n[`st.deck_gl_chart`](https://docs.streamlit.io/api.html#streamlit.deck_gl_chart)\nto display geospatial data.\n\"\"\",\n ),\n ),\n (\n \"DataFrame Demo\",\n (\n demos.data_frame_demo,\n \"\"\"\nThis demo shows how to use `st.write` to visualize Pandas DataFrames.\n\n(Data courtesy of the [UN Data Exlorer](http://data.un.org/Explorer.aspx).)\n\"\"\",\n ),\n ),\n ]\n)\n\n\ndef run():\n demo_name = st.sidebar.selectbox(\"Choose a demo\", list(DEMOS.keys()), 0)\n demo = DEMOS[demo_name][0]\n\n if demo_name == \"\u2014\":\n show_code = False\n st.write(\"# Welcome to Streamlit! \ud83d\udc4b\")\n else:\n show_code = st.sidebar.checkbox(\"Show code\", True)\n st.markdown(\"# %s\" % demo_name)\n description = DEMOS[demo_name][1]\n if description:\n st.write(description)\n # Clear everything from the intro page.\n # We only have 4 elements in the page so this is intentional overkill.\n for i in range(10):\n st.empty()\n\n demo()\n\n if show_code:\n st.markdown(\"## Code\")\n sourcelines, _ = inspect.getsourcelines(demo)\n st.code(textwrap.dedent(\"\".join(sourcelines[1:])))\n\n\nif __name__ == \"__main__\":\n run()\n", "path": "lib/streamlit/hello/hello.py"}]} | 1,725 | 125 |
gh_patches_debug_3743 | rasdani/github-patches | git_diff | openai__gym-2576 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Proposal] Add license to the license field in the setup.py file
### Proposal
Add license to the license field in the setup.py file.
### Motivation
The license field is defined but not completed in the setup.py file.
Some package dependency scanners will block the usage of this package as the license is not specified.
### Checklist
- [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)
</issue>
<code>
[start of setup.py]
1 import os.path
2 import sys
3 import itertools
4
5 from setuptools import find_packages, setup
6
7 # Don't import gym module here, since deps may not be installed
8 sys.path.insert(0, os.path.join(os.path.dirname(__file__), "gym"))
9 from version import VERSION
10
11 # Environment-specific dependencies.
12 extras = {
13 "atari": ["ale-py~=0.7.1"],
14 "accept-rom-license": ["autorom[accept-rom-license]~=0.4.2"],
15 "box2d": ["box2d-py==2.3.5", "pyglet>=1.4.0"],
16 "classic_control": ["pyglet>=1.4.0"],
17 "mujoco": ["mujoco_py>=1.50, <2.0"],
18 "toy_text": ["pygame==2.1.0", "scipy>=1.4.1"],
19 "other": ["lz4>=3.1.0", "opencv-python>=3.0"],
20 }
21
22 # Meta dependency groups.
23 nomujoco_blacklist = set(["mujoco", "accept-rom-license", "atari"])
24 nomujoco_groups = set(extras.keys()) - nomujoco_blacklist
25
26 extras["nomujoco"] = list(
27 itertools.chain.from_iterable(map(lambda group: extras[group], nomujoco_groups))
28 )
29
30
31 all_blacklist = set(["accept-rom-license"])
32 all_groups = set(extras.keys()) - all_blacklist
33
34 extras["all"] = list(
35 itertools.chain.from_iterable(map(lambda group: extras[group], all_groups))
36 )
37
38 setup(
39 name="gym",
40 version=VERSION,
41 description="Gym: A universal API for reinforcement learning environments.",
42 url="https://github.com/openai/gym",
43 author="Gym Community",
44 author_email="[email protected]",
45 license="",
46 packages=[package for package in find_packages() if package.startswith("gym")],
47 zip_safe=False,
48 install_requires=[
49 "numpy>=1.18.0",
50 "cloudpickle>=1.2.0",
51 "importlib_metadata>=4.10.0; python_version < '3.10'",
52 ],
53 extras_require=extras,
54 package_data={
55 "gym": [
56 "envs/mujoco/assets/*.xml",
57 "envs/classic_control/assets/*.png",
58 "envs/toy_text/font/*.ttf",
59 "envs/toy_text/img/*.png",
60 ]
61 },
62 tests_require=["pytest", "mock"],
63 python_requires=">=3.7",
64 classifiers=[
65 "Programming Language :: Python :: 3",
66 "Programming Language :: Python :: 3.7",
67 "Programming Language :: Python :: 3.8",
68 "Programming Language :: Python :: 3.9",
69 "Programming Language :: Python :: 3.10",
70 ],
71 )
72
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -42,7 +42,7 @@
url="https://github.com/openai/gym",
author="Gym Community",
author_email="[email protected]",
- license="",
+ license="MIT",
packages=[package for package in find_packages() if package.startswith("gym")],
zip_safe=False,
install_requires=[
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -42,7 +42,7 @@\n url=\"https://github.com/openai/gym\",\n author=\"Gym Community\",\n author_email=\"[email protected]\",\n- license=\"\",\n+ license=\"MIT\",\n packages=[package for package in find_packages() if package.startswith(\"gym\")],\n zip_safe=False,\n install_requires=[\n", "issue": "[Proposal] Add license to the license field in the setup.py file\n### Proposal \r\n\r\nAdd license to the license field in the setup.py file.\r\n\r\n### Motivation\r\n\r\nThe license field is defined but not completed in the setup.py file. \r\n\r\nSome package dependency scanners will block the usage of this package as the license is not specified.\r\n\r\n### Checklist\r\n\r\n- [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)\r\n\n", "before_files": [{"content": "import os.path\nimport sys\nimport itertools\n\nfrom setuptools import find_packages, setup\n\n# Don't import gym module here, since deps may not be installed\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), \"gym\"))\nfrom version import VERSION\n\n# Environment-specific dependencies.\nextras = {\n \"atari\": [\"ale-py~=0.7.1\"],\n \"accept-rom-license\": [\"autorom[accept-rom-license]~=0.4.2\"],\n \"box2d\": [\"box2d-py==2.3.5\", \"pyglet>=1.4.0\"],\n \"classic_control\": [\"pyglet>=1.4.0\"],\n \"mujoco\": [\"mujoco_py>=1.50, <2.0\"],\n \"toy_text\": [\"pygame==2.1.0\", \"scipy>=1.4.1\"],\n \"other\": [\"lz4>=3.1.0\", \"opencv-python>=3.0\"],\n}\n\n# Meta dependency groups.\nnomujoco_blacklist = set([\"mujoco\", \"accept-rom-license\", \"atari\"])\nnomujoco_groups = set(extras.keys()) - nomujoco_blacklist\n\nextras[\"nomujoco\"] = list(\n itertools.chain.from_iterable(map(lambda group: extras[group], nomujoco_groups))\n)\n\n\nall_blacklist = set([\"accept-rom-license\"])\nall_groups = set(extras.keys()) - all_blacklist\n\nextras[\"all\"] = list(\n itertools.chain.from_iterable(map(lambda group: extras[group], all_groups))\n)\n\nsetup(\n name=\"gym\",\n version=VERSION,\n description=\"Gym: A universal API for reinforcement learning environments.\",\n url=\"https://github.com/openai/gym\",\n author=\"Gym Community\",\n author_email=\"[email protected]\",\n license=\"\",\n packages=[package for package in find_packages() if package.startswith(\"gym\")],\n zip_safe=False,\n install_requires=[\n \"numpy>=1.18.0\",\n \"cloudpickle>=1.2.0\",\n \"importlib_metadata>=4.10.0; python_version < '3.10'\",\n ],\n extras_require=extras,\n package_data={\n \"gym\": [\n \"envs/mujoco/assets/*.xml\",\n \"envs/classic_control/assets/*.png\",\n \"envs/toy_text/font/*.ttf\",\n \"envs/toy_text/img/*.png\",\n ]\n },\n tests_require=[\"pytest\", \"mock\"],\n python_requires=\">=3.7\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}]} | 1,398 | 101 |
gh_patches_debug_4907 | rasdani/github-patches | git_diff | Mailu__Mailu-1487 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
We need to check the (persistent) postfix mailqueue permissions
As the postfix and postdrop uid/gid might change (especially with a change of base image), it is needed to check the directory permissions on /queue before starting postfix
</issue>
<code>
[start of core/postfix/start.py]
1 #!/usr/bin/python3
2
3 import os
4 import glob
5 import shutil
6 import multiprocessing
7 import logging as log
8 import sys
9
10 from podop import run_server
11 from socrate import system, conf
12
13 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
14
15 def start_podop():
16 os.setuid(100)
17 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/"
18 # TODO: Remove verbosity setting from Podop?
19 run_server(0, "postfix", "/tmp/podop.socket", [
20 ("transport", "url", url + "transport/§"),
21 ("alias", "url", url + "alias/§"),
22 ("domain", "url", url + "domain/§"),
23 ("mailbox", "url", url + "mailbox/§"),
24 ("recipientmap", "url", url + "recipient/map/§"),
25 ("sendermap", "url", url + "sender/map/§"),
26 ("senderaccess", "url", url + "sender/access/§"),
27 ("senderlogin", "url", url + "sender/login/§")
28 ])
29
30 def is_valid_postconf_line(line):
31 return not line.startswith("#") \
32 and not line == ''
33
34 # Actual startup script
35 os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
36 os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
37 os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332")
38 os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525")
39
40 for postfix_file in glob.glob("/conf/*.cf"):
41 conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
42
43 if os.path.exists("/overrides/postfix.cf"):
44 for line in open("/overrides/postfix.cf").read().strip().split("\n"):
45 if is_valid_postconf_line(line):
46 os.system('postconf -e "{}"'.format(line))
47
48 if os.path.exists("/overrides/postfix.master"):
49 for line in open("/overrides/postfix.master").read().strip().split("\n"):
50 if is_valid_postconf_line(line):
51 os.system('postconf -Me "{}"'.format(line))
52
53 for map_file in glob.glob("/overrides/*.map"):
54 destination = os.path.join("/etc/postfix", os.path.basename(map_file))
55 shutil.copyfile(map_file, destination)
56 os.system("postmap {}".format(destination))
57 os.remove(destination)
58
59 if "RELAYUSER" in os.environ:
60 path = "/etc/postfix/sasl_passwd"
61 conf.jinja("/conf/sasl_passwd", os.environ, path)
62 os.system("postmap {}".format(path))
63
64 # Run Podop and Postfix
65 multiprocessing.Process(target=start_podop).start()
66 os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing")
67 os.system("postfix start-fg")
68
[end of core/postfix/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/postfix/start.py b/core/postfix/start.py
--- a/core/postfix/start.py
+++ b/core/postfix/start.py
@@ -64,4 +64,7 @@
# Run Podop and Postfix
multiprocessing.Process(target=start_podop).start()
os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing")
+# Before starting postfix, we need to check permissions on /queue
+# in the event that postfix,postdrop id have changed
+os.system("postfix set-permissions")
os.system("postfix start-fg")
| {"golden_diff": "diff --git a/core/postfix/start.py b/core/postfix/start.py\n--- a/core/postfix/start.py\n+++ b/core/postfix/start.py\n@@ -64,4 +64,7 @@\n # Run Podop and Postfix\n multiprocessing.Process(target=start_podop).start()\n os.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\n+# Before starting postfix, we need to check permissions on /queue\n+# in the event that postfix,postdrop id have changed\n+os.system(\"postfix set-permissions\")\n os.system(\"postfix start-fg\")\n", "issue": "We need to check the (persistent) postfix mailqueue permissions\nAs the postfix and postdrop uid/gid might change (especially with a change of base image), it is needed to check the directory permissions on /queue before starting postfix\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(100)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n\t\t(\"transport\", \"url\", url + \"transport/\u00a7\"),\n\t\t(\"alias\", \"url\", url + \"alias/\u00a7\"),\n\t\t(\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"recipientmap\", \"url\", url + \"recipient/map/\u00a7\"),\n (\"sendermap\", \"url\", url + \"sender/map/\u00a7\"),\n (\"senderaccess\", \"url\", url + \"sender/access/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\")\n ])\n\ndef is_valid_postconf_line(line):\n return not line.startswith(\"#\") \\\n and not line == ''\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_MILTER_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_MILTER\", \"antispam:11332\")\nos.environ[\"LMTP_ADDRESS\"] = system.get_host_address_from_environment(\"LMTP\", \"imap:2525\")\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n conf.jinja(postfix_file, os.environ, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n conf.jinja(\"/conf/sasl_passwd\", os.environ, path)\n os.system(\"postmap {}\".format(path))\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\nos.system(\"postfix start-fg\")\n", "path": "core/postfix/start.py"}]} | 1,384 | 128 |
gh_patches_debug_24129 | rasdani/github-patches | git_diff | fossasia__open-event-server-7875 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Delete space in front of colons
Some text pieces coming from the server has a space in front of colons " :". Please check all text including email texts and delete the space in front of colons.
</issue>
<code>
[start of create_db.py]
1 import argparse
2 import getpass
3 import re
4
5 from flask_migrate import stamp
6
7 from app.instance import current_app
8 from app.models import db
9 from populate_db import populate
10 from tests.all.integration.auth_helper import create_super_admin
11
12
13 def create_default_user(email, password):
14 print("Your login is 'super_admin'.")
15 if not email:
16 ask_email = True
17 while ask_email:
18 email = input("Enter email for super_admin : ")
19 if not re.match(r'[^@]+@[^@]+\.[^@]+', email):
20 print('\nInvalid email address\n')
21 continue
22 ask_email = False
23 if not password:
24 ask_password = True
25 while ask_password:
26 password = getpass.getpass("Enter password for super_admin : ")
27 if len(password) < 8:
28 print('\nPassword should have minimum 8 characters')
29 continue
30 repassword = getpass.getpass("Enter your password again to confirm : ")
31 if password != repassword:
32 print('\nPassword did not match')
33 continue
34 ask_password = False
35 create_super_admin(email, password)
36
37
38 if __name__ == "__main__":
39 parser = argparse.ArgumentParser()
40 parser.add_argument("email", nargs='?', help="The email for super_admin.", default='')
41 parser.add_argument(
42 "password", nargs='?', help="The password for super_admin.", default=''
43 )
44 parsed = parser.parse_args()
45 with current_app.app_context():
46 db.engine.execute('create extension if not exists citext')
47 db.create_all()
48 stamp()
49 create_default_user(parsed.email, parsed.password)
50 populate()
51
[end of create_db.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/create_db.py b/create_db.py
--- a/create_db.py
+++ b/create_db.py
@@ -15,7 +15,7 @@
if not email:
ask_email = True
while ask_email:
- email = input("Enter email for super_admin : ")
+ email = input("Enter email for super_admin: ")
if not re.match(r'[^@]+@[^@]+\.[^@]+', email):
print('\nInvalid email address\n')
continue
@@ -23,11 +23,11 @@
if not password:
ask_password = True
while ask_password:
- password = getpass.getpass("Enter password for super_admin : ")
+ password = getpass.getpass("Enter password for super_admin: ")
if len(password) < 8:
print('\nPassword should have minimum 8 characters')
continue
- repassword = getpass.getpass("Enter your password again to confirm : ")
+ repassword = getpass.getpass("Enter your password again to confirm: ")
if password != repassword:
print('\nPassword did not match')
continue
| {"golden_diff": "diff --git a/create_db.py b/create_db.py\n--- a/create_db.py\n+++ b/create_db.py\n@@ -15,7 +15,7 @@\n if not email:\n ask_email = True\n while ask_email:\n- email = input(\"Enter email for super_admin : \")\n+ email = input(\"Enter email for super_admin: \")\n if not re.match(r'[^@]+@[^@]+\\.[^@]+', email):\n print('\\nInvalid email address\\n')\n continue\n@@ -23,11 +23,11 @@\n if not password:\n ask_password = True\n while ask_password:\n- password = getpass.getpass(\"Enter password for super_admin : \")\n+ password = getpass.getpass(\"Enter password for super_admin: \")\n if len(password) < 8:\n print('\\nPassword should have minimum 8 characters')\n continue\n- repassword = getpass.getpass(\"Enter your password again to confirm : \")\n+ repassword = getpass.getpass(\"Enter your password again to confirm: \")\n if password != repassword:\n print('\\nPassword did not match')\n continue\n", "issue": "Delete space in front of colons\nSome text pieces coming from the server has a space in front of colons \" :\". Please check all text including email texts and delete the space in front of colons.\n", "before_files": [{"content": "import argparse\nimport getpass\nimport re\n\nfrom flask_migrate import stamp\n\nfrom app.instance import current_app\nfrom app.models import db\nfrom populate_db import populate\nfrom tests.all.integration.auth_helper import create_super_admin\n\n\ndef create_default_user(email, password):\n print(\"Your login is 'super_admin'.\")\n if not email:\n ask_email = True\n while ask_email:\n email = input(\"Enter email for super_admin : \")\n if not re.match(r'[^@]+@[^@]+\\.[^@]+', email):\n print('\\nInvalid email address\\n')\n continue\n ask_email = False\n if not password:\n ask_password = True\n while ask_password:\n password = getpass.getpass(\"Enter password for super_admin : \")\n if len(password) < 8:\n print('\\nPassword should have minimum 8 characters')\n continue\n repassword = getpass.getpass(\"Enter your password again to confirm : \")\n if password != repassword:\n print('\\nPassword did not match')\n continue\n ask_password = False\n create_super_admin(email, password)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"email\", nargs='?', help=\"The email for super_admin.\", default='')\n parser.add_argument(\n \"password\", nargs='?', help=\"The password for super_admin.\", default=''\n )\n parsed = parser.parse_args()\n with current_app.app_context():\n db.engine.execute('create extension if not exists citext')\n db.create_all()\n stamp()\n create_default_user(parsed.email, parsed.password)\n populate()\n", "path": "create_db.py"}]} | 1,019 | 254 |
gh_patches_debug_31190 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-5890 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
@spider=zabka_pl opening hours are missing Sunday data, proposed `opening_hours` are broken
https://www.alltheplaces.xyz/map/#16.47/50.073227/20.037421

https://www.openstreetmap.org/node/4271289403
Opening hours in OSM (`Mo-Sa 06:00-22:00; Su 11:00-20:00`) are correct.
At https://www.zabka.pl/znajdz-sklep it refuses to show Sunday opening hours.
Maybe it is caused by their internal structure? This shops are franchises and maybe they are obligated to be open `Mo-Sa 06:00-22:00` and Sundays can be decided by operator?
Overall it seems that Monday to Saturday data is likely correct.
</issue>
<code>
[start of locations/spiders/zabka_pl.py]
1 import scrapy
2 from scrapy.http import JsonRequest
3
4 from locations.dict_parser import DictParser
5 from locations.hours import OpeningHours
6
7
8 class ZabkaPLSpider(scrapy.Spider):
9 name = "zabka_pl"
10 item_attributes = {"brand": "Żabka", "brand_wikidata": "Q2589061"}
11
12 # URL extracted by observing request made by Żappka Android app (using HTTP Toolkit)
13 start_urls = ["https://partner-api.zabkamobile.pl/v2/shops"]
14
15 def start_requests(self):
16 # Authorization header is hard-coded into the Żappka app and does not appear to change (as of version 3.14.10).
17 headers = {
18 "Authorization": "PartnerKey 424A0B7AD0E9EA136510474D89061BBDC007B9BE5256A638EA28CC19D2BB15CD",
19 }
20 yield JsonRequest(url=self.start_urls[0], headers=headers)
21
22 def parse(self, response):
23 for location in response.json():
24 item = DictParser.parse(location)
25 item["street_address"] = item.pop("addr_full", "")
26 # unset "state" field, it is taken from the "region" field which is some internal Żabka ID
27 item["state"] = None
28 item["opening_hours"] = OpeningHours()
29
30 # Each franchisee is required to be open Mon-Sat with the same hours
31 # But the hours for Sundays are set in the "nonTradingDays" field, which
32 # contains the opening hours for each specific Sunday.
33 item["opening_hours"].add_days_range(
34 ["Mo", "Tu", "We", "Th", "Fr", "Sa"], location["openTime"], location["closeTime"]
35 )
36 yield item
37
[end of locations/spiders/zabka_pl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/zabka_pl.py b/locations/spiders/zabka_pl.py
--- a/locations/spiders/zabka_pl.py
+++ b/locations/spiders/zabka_pl.py
@@ -1,3 +1,5 @@
+from datetime import datetime
+
import scrapy
from scrapy.http import JsonRequest
@@ -20,6 +22,7 @@
yield JsonRequest(url=self.start_urls[0], headers=headers)
def parse(self, response):
+ today = datetime.now()
for location in response.json():
item = DictParser.parse(location)
item["street_address"] = item.pop("addr_full", "")
@@ -33,4 +36,25 @@
item["opening_hours"].add_days_range(
["Mo", "Tu", "We", "Th", "Fr", "Sa"], location["openTime"], location["closeTime"]
)
+
+ if location["nonTradingDays"]:
+ sunday_open = None
+ sunday_close = None
+ for rule in location["nonTradingDays"]:
+ d = datetime.strptime(rule["date"], "%Y-%m-%d")
+ if d.weekday() != 6 or d < today:
+ continue # In the past, ignore
+ if sunday_open is None:
+ sunday_open = rule["openTime"]
+ sunday_close = rule["closeTime"]
+ else:
+ if sunday_open != rule["openTime"] or sunday_close != rule["closeTime"]:
+ self.crawler.stats.inc_value("atp/zabka_pl/nonTradingDays/mismatching")
+ break # Mismatching future Sundays, skip
+ else:
+ self.crawler.stats.inc_value("atp/zabka_pl/nonTradingDays/fine")
+ item["opening_hours"].add_range("Su", sunday_open, sunday_close)
+ else:
+ self.crawler.stats.inc_value("atp/zabka_pl/nonTradingDays/missing") # Sunday closed? Missing data?
+
yield item
| {"golden_diff": "diff --git a/locations/spiders/zabka_pl.py b/locations/spiders/zabka_pl.py\n--- a/locations/spiders/zabka_pl.py\n+++ b/locations/spiders/zabka_pl.py\n@@ -1,3 +1,5 @@\n+from datetime import datetime\n+\n import scrapy\n from scrapy.http import JsonRequest\n \n@@ -20,6 +22,7 @@\n yield JsonRequest(url=self.start_urls[0], headers=headers)\n \n def parse(self, response):\n+ today = datetime.now()\n for location in response.json():\n item = DictParser.parse(location)\n item[\"street_address\"] = item.pop(\"addr_full\", \"\")\n@@ -33,4 +36,25 @@\n item[\"opening_hours\"].add_days_range(\n [\"Mo\", \"Tu\", \"We\", \"Th\", \"Fr\", \"Sa\"], location[\"openTime\"], location[\"closeTime\"]\n )\n+\n+ if location[\"nonTradingDays\"]:\n+ sunday_open = None\n+ sunday_close = None\n+ for rule in location[\"nonTradingDays\"]:\n+ d = datetime.strptime(rule[\"date\"], \"%Y-%m-%d\")\n+ if d.weekday() != 6 or d < today:\n+ continue # In the past, ignore\n+ if sunday_open is None:\n+ sunday_open = rule[\"openTime\"]\n+ sunday_close = rule[\"closeTime\"]\n+ else:\n+ if sunday_open != rule[\"openTime\"] or sunday_close != rule[\"closeTime\"]:\n+ self.crawler.stats.inc_value(\"atp/zabka_pl/nonTradingDays/mismatching\")\n+ break # Mismatching future Sundays, skip\n+ else:\n+ self.crawler.stats.inc_value(\"atp/zabka_pl/nonTradingDays/fine\")\n+ item[\"opening_hours\"].add_range(\"Su\", sunday_open, sunday_close)\n+ else:\n+ self.crawler.stats.inc_value(\"atp/zabka_pl/nonTradingDays/missing\") # Sunday closed? Missing data?\n+\n yield item\n", "issue": "@spider=zabka_pl opening hours are missing Sunday data, proposed `opening_hours` are broken\nhttps://www.alltheplaces.xyz/map/#16.47/50.073227/20.037421\r\n\r\n\r\n\r\nhttps://www.openstreetmap.org/node/4271289403\r\n\r\nOpening hours in OSM (`Mo-Sa 06:00-22:00; Su 11:00-20:00`) are correct.\r\n\r\nAt https://www.zabka.pl/znajdz-sklep it refuses to show Sunday opening hours.\r\n\r\nMaybe it is caused by their internal structure? This shops are franchises and maybe they are obligated to be open `Mo-Sa 06:00-22:00` and Sundays can be decided by operator?\r\n\r\nOverall it seems that Monday to Saturday data is likely correct.\n", "before_files": [{"content": "import scrapy\nfrom scrapy.http import JsonRequest\n\nfrom locations.dict_parser import DictParser\nfrom locations.hours import OpeningHours\n\n\nclass ZabkaPLSpider(scrapy.Spider):\n name = \"zabka_pl\"\n item_attributes = {\"brand\": \"\u017babka\", \"brand_wikidata\": \"Q2589061\"}\n\n # URL extracted by observing request made by \u017bappka Android app (using HTTP Toolkit)\n start_urls = [\"https://partner-api.zabkamobile.pl/v2/shops\"]\n\n def start_requests(self):\n # Authorization header is hard-coded into the \u017bappka app and does not appear to change (as of version 3.14.10).\n headers = {\n \"Authorization\": \"PartnerKey 424A0B7AD0E9EA136510474D89061BBDC007B9BE5256A638EA28CC19D2BB15CD\",\n }\n yield JsonRequest(url=self.start_urls[0], headers=headers)\n\n def parse(self, response):\n for location in response.json():\n item = DictParser.parse(location)\n item[\"street_address\"] = item.pop(\"addr_full\", \"\")\n # unset \"state\" field, it is taken from the \"region\" field which is some internal \u017babka ID\n item[\"state\"] = None\n item[\"opening_hours\"] = OpeningHours()\n\n # Each franchisee is required to be open Mon-Sat with the same hours\n # But the hours for Sundays are set in the \"nonTradingDays\" field, which\n # contains the opening hours for each specific Sunday.\n item[\"opening_hours\"].add_days_range(\n [\"Mo\", \"Tu\", \"We\", \"Th\", \"Fr\", \"Sa\"], location[\"openTime\"], location[\"closeTime\"]\n )\n yield item\n", "path": "locations/spiders/zabka_pl.py"}]} | 1,273 | 444 |
gh_patches_debug_4671 | rasdani/github-patches | git_diff | holoviz__panel-609 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Replacing periodic.period doesn't change period
```
import panel as pn
pn.extension()
test = pn.widgets.Toggle(name='Test')
def toggle():
test.value = not test.value
periodic = test.add_periodic_callback(toggle, period=1000)
test
```
```
periodic.period = 20000 #this doesnt update the periodic milliseconds
```
</issue>
<code>
[start of panel/callbacks.py]
1 """
2 Defines callbacks to be executed on a thread or by scheduling it
3 on a running bokeh server.
4 """
5 from __future__ import absolute_import, division, unicode_literals
6
7
8 import time
9 import param
10
11 from bokeh.io import curdoc as _curdoc
12
13
14 class PeriodicCallback(param.Parameterized):
15 """
16 Periodic encapsulates a periodic callback which will run both
17 in tornado based notebook environments and on bokeh server. By
18 default the callback will run until the stop method is called,
19 but count and timeout values can be set to limit the number of
20 executions or the maximum length of time for which the callback
21 will run.
22 """
23
24 callback = param.Callable(doc="""
25 The callback to execute periodically.""")
26
27 count = param.Integer(default=None, doc="""
28 Number of times the callback will be executed, by default
29 this is unlimited.""")
30
31 period = param.Integer(default=500, doc="""
32 Period in milliseconds at which the callback is executed.""")
33
34 timeout = param.Integer(default=None, doc="""
35 Timeout in seconds from the start time at which the callback
36 expires""")
37
38 def __init__(self, **params):
39 super(PeriodicCallback, self).__init__(**params)
40 self._counter = 0
41 self._start_time = None
42 self._timeout = None
43 self._cb = None
44 self._doc = None
45
46 def start(self):
47 if self._cb is not None:
48 raise RuntimeError('Periodic callback has already started.')
49 self._start_time = time.time()
50 if _curdoc().session_context:
51 self._doc = _curdoc()
52 self._cb = self._doc.add_periodic_callback(self._periodic_callback, self.period)
53 else:
54 from tornado.ioloop import PeriodicCallback
55 self._cb = PeriodicCallback(self._periodic_callback, self.period)
56 self._cb.start()
57
58 def _periodic_callback(self):
59 self.callback()
60 self._counter += 1
61 if self._timeout is not None:
62 dt = (time.time() - self._start_time)
63 if dt > self._timeout:
64 self.stop()
65 if self._counter == self.count:
66 self.stop()
67
68 def stop(self):
69 self._counter = 0
70 self._timeout = None
71 if self._doc:
72 self._doc.remove_periodic_callback(self._cb)
73 else:
74 self._cb.stop()
75 self._cb = None
76
77
[end of panel/callbacks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/panel/callbacks.py b/panel/callbacks.py
--- a/panel/callbacks.py
+++ b/panel/callbacks.py
@@ -55,6 +55,12 @@
self._cb = PeriodicCallback(self._periodic_callback, self.period)
self._cb.start()
+ @param.depends('period', watch=True)
+ def _update_period(self):
+ if self._cb:
+ self.stop()
+ self.start()
+
def _periodic_callback(self):
self.callback()
self._counter += 1
| {"golden_diff": "diff --git a/panel/callbacks.py b/panel/callbacks.py\n--- a/panel/callbacks.py\n+++ b/panel/callbacks.py\n@@ -55,6 +55,12 @@\n self._cb = PeriodicCallback(self._periodic_callback, self.period)\n self._cb.start()\n \n+ @param.depends('period', watch=True)\n+ def _update_period(self):\n+ if self._cb:\n+ self.stop()\n+ self.start()\n+\n def _periodic_callback(self):\n self.callback()\n self._counter += 1\n", "issue": "Replacing periodic.period doesn't change period\n```\r\nimport panel as pn\r\npn.extension()\r\n\r\ntest = pn.widgets.Toggle(name='Test')\r\n\r\ndef toggle():\r\n test.value = not test.value\r\n\r\nperiodic = test.add_periodic_callback(toggle, period=1000)\r\ntest\r\n```\r\n\r\n```\r\nperiodic.period = 20000 #this doesnt update the periodic milliseconds\r\n```\n", "before_files": [{"content": "\"\"\"\nDefines callbacks to be executed on a thread or by scheduling it\non a running bokeh server.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\n\nimport time\nimport param\n\nfrom bokeh.io import curdoc as _curdoc\n\n\nclass PeriodicCallback(param.Parameterized):\n \"\"\"\n Periodic encapsulates a periodic callback which will run both\n in tornado based notebook environments and on bokeh server. By\n default the callback will run until the stop method is called,\n but count and timeout values can be set to limit the number of\n executions or the maximum length of time for which the callback\n will run.\n \"\"\"\n\n callback = param.Callable(doc=\"\"\"\n The callback to execute periodically.\"\"\")\n\n count = param.Integer(default=None, doc=\"\"\"\n Number of times the callback will be executed, by default\n this is unlimited.\"\"\")\n\n period = param.Integer(default=500, doc=\"\"\"\n Period in milliseconds at which the callback is executed.\"\"\")\n\n timeout = param.Integer(default=None, doc=\"\"\"\n Timeout in seconds from the start time at which the callback\n expires\"\"\")\n\n def __init__(self, **params):\n super(PeriodicCallback, self).__init__(**params)\n self._counter = 0\n self._start_time = None\n self._timeout = None\n self._cb = None\n self._doc = None\n\n def start(self):\n if self._cb is not None:\n raise RuntimeError('Periodic callback has already started.')\n self._start_time = time.time()\n if _curdoc().session_context:\n self._doc = _curdoc()\n self._cb = self._doc.add_periodic_callback(self._periodic_callback, self.period)\n else:\n from tornado.ioloop import PeriodicCallback\n self._cb = PeriodicCallback(self._periodic_callback, self.period)\n self._cb.start()\n\n def _periodic_callback(self):\n self.callback()\n self._counter += 1\n if self._timeout is not None:\n dt = (time.time() - self._start_time)\n if dt > self._timeout:\n self.stop()\n if self._counter == self.count:\n self.stop()\n\n def stop(self):\n self._counter = 0\n self._timeout = None\n if self._doc:\n self._doc.remove_periodic_callback(self._cb)\n else:\n self._cb.stop()\n self._cb = None\n\n", "path": "panel/callbacks.py"}]} | 1,305 | 128 |
gh_patches_debug_36254 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-494 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Author a full getting started guide
As part of the final beta announce on March 30th, we should have a full getting started guide, similar to this one for js:
https://github.com/open-telemetry/opentelemetry-js/blob/master/getting-started/README.md
</issue>
<code>
[start of docs/trace_example.py]
1 from opentelemetry import trace
2 from opentelemetry.sdk.trace import TracerProvider
3 from opentelemetry.sdk.trace.export import (
4 ConsoleSpanExporter,
5 SimpleExportSpanProcessor,
6 )
7
8 trace.set_preferred_tracer_provider_implementation(lambda T: TracerProvider())
9 trace.tracer_provider().add_span_processor(
10 SimpleExportSpanProcessor(ConsoleSpanExporter())
11 )
12
13 tracer = trace.get_tracer(__name__)
14
15 with tracer.start_as_current_span("foo"):
16 with tracer.start_as_current_span("bar"):
17 with tracer.start_as_current_span("baz"):
18 print("Hello world from OpenTelemetry Python!")
19
[end of docs/trace_example.py]
[start of docs/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py]
1 # Copyright 2019, OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 """
16 This module serves as an example to integrate with flask, using
17 the requests library to perform downstream requests
18 """
19 import flask
20 import pkg_resources
21 import requests
22
23 import opentelemetry.ext.http_requests
24 from opentelemetry import trace
25 from opentelemetry.ext.flask import instrument_app
26 from opentelemetry.sdk.trace import TracerProvider
27
28
29 def configure_opentelemetry(flask_app: flask.Flask):
30 """Configure a flask application to use OpenTelemetry.
31
32 This activates the specific components:
33
34 * sets tracer to the SDK's Tracer
35 * enables requests integration on the Tracer
36 * uses a WSGI middleware to enable configuration
37 """
38 # Start by configuring all objects required to ensure a complete end to end
39 # workflow.
40 trace.set_tracer_provider(TracerProvider())
41
42 # Next, we need to configure how the values that are used by traces and
43 # metrics are propagated (such as what specific headers carry this value).
44 # Integrations are the glue that binds the OpenTelemetry API and the
45 # frameworks and libraries that are used together, automatically creating
46 # Spans and propagating context as appropriate.
47 opentelemetry.ext.http_requests.enable(trace.get_tracer_provider())
48 instrument_app(flask_app)
49
50
51 app = flask.Flask(__name__)
52
53
54 @app.route("/")
55 def hello():
56 # Emit a trace that measures how long the sleep takes
57 version = pkg_resources.get_distribution(
58 "opentelemetry-example-app"
59 ).version
60 tracer = trace.get_tracer(__name__, version)
61 with tracer.start_as_current_span("example-request"):
62 requests.get("http://www.example.com")
63 return "hello"
64
65
66 configure_opentelemetry(app)
67
[end of docs/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py b/docs/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py
--- a/docs/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py
+++ b/docs/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py
@@ -17,50 +17,31 @@
the requests library to perform downstream requests
"""
import flask
-import pkg_resources
import requests
import opentelemetry.ext.http_requests
from opentelemetry import trace
from opentelemetry.ext.flask import instrument_app
from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.sdk.trace.export import ConsoleSpanExporter
+from opentelemetry.sdk.trace.export import SimpleExportSpanProcessor
-
-def configure_opentelemetry(flask_app: flask.Flask):
- """Configure a flask application to use OpenTelemetry.
-
- This activates the specific components:
-
- * sets tracer to the SDK's Tracer
- * enables requests integration on the Tracer
- * uses a WSGI middleware to enable configuration
- """
- # Start by configuring all objects required to ensure a complete end to end
- # workflow.
- trace.set_tracer_provider(TracerProvider())
-
- # Next, we need to configure how the values that are used by traces and
- # metrics are propagated (such as what specific headers carry this value).
- # Integrations are the glue that binds the OpenTelemetry API and the
- # frameworks and libraries that are used together, automatically creating
- # Spans and propagating context as appropriate.
- opentelemetry.ext.http_requests.enable(trace.get_tracer_provider())
- instrument_app(flask_app)
-
+trace.set_tracer_provider(TracerProvider())
+trace.get_tracer_provider().add_span_processor(
+ SimpleExportSpanProcessor(ConsoleSpanExporter())
+)
app = flask.Flask(__name__)
+opentelemetry.ext.http_requests.enable(trace.get_tracer_provider())
+instrument_app(app)
@app.route("/")
def hello():
- # Emit a trace that measures how long the sleep takes
- version = pkg_resources.get_distribution(
- "opentelemetry-example-app"
- ).version
- tracer = trace.get_tracer(__name__, version)
+ tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span("example-request"):
requests.get("http://www.example.com")
return "hello"
-configure_opentelemetry(app)
+app.run(debug=True)
diff --git a/docs/trace_example.py b/docs/trace_example.py
--- a/docs/trace_example.py
+++ b/docs/trace_example.py
@@ -5,8 +5,8 @@
SimpleExportSpanProcessor,
)
-trace.set_preferred_tracer_provider_implementation(lambda T: TracerProvider())
-trace.tracer_provider().add_span_processor(
+trace.set_tracer_provider(TracerProvider())
+trace.get_tracer_provider().add_span_processor(
SimpleExportSpanProcessor(ConsoleSpanExporter())
)
| {"golden_diff": "diff --git a/docs/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py b/docs/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py\n--- a/docs/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py\n+++ b/docs/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py\n@@ -17,50 +17,31 @@\n the requests library to perform downstream requests\n \"\"\"\n import flask\n-import pkg_resources\n import requests\n \n import opentelemetry.ext.http_requests\n from opentelemetry import trace\n from opentelemetry.ext.flask import instrument_app\n from opentelemetry.sdk.trace import TracerProvider\n+from opentelemetry.sdk.trace.export import ConsoleSpanExporter\n+from opentelemetry.sdk.trace.export import SimpleExportSpanProcessor\n \n-\n-def configure_opentelemetry(flask_app: flask.Flask):\n- \"\"\"Configure a flask application to use OpenTelemetry.\n-\n- This activates the specific components:\n-\n- * sets tracer to the SDK's Tracer\n- * enables requests integration on the Tracer\n- * uses a WSGI middleware to enable configuration\n- \"\"\"\n- # Start by configuring all objects required to ensure a complete end to end\n- # workflow.\n- trace.set_tracer_provider(TracerProvider())\n-\n- # Next, we need to configure how the values that are used by traces and\n- # metrics are propagated (such as what specific headers carry this value).\n- # Integrations are the glue that binds the OpenTelemetry API and the\n- # frameworks and libraries that are used together, automatically creating\n- # Spans and propagating context as appropriate.\n- opentelemetry.ext.http_requests.enable(trace.get_tracer_provider())\n- instrument_app(flask_app)\n-\n+trace.set_tracer_provider(TracerProvider())\n+trace.get_tracer_provider().add_span_processor(\n+ SimpleExportSpanProcessor(ConsoleSpanExporter())\n+)\n \n app = flask.Flask(__name__)\n+opentelemetry.ext.http_requests.enable(trace.get_tracer_provider())\n+instrument_app(app)\n \n \n @app.route(\"/\")\n def hello():\n- # Emit a trace that measures how long the sleep takes\n- version = pkg_resources.get_distribution(\n- \"opentelemetry-example-app\"\n- ).version\n- tracer = trace.get_tracer(__name__, version)\n+ tracer = trace.get_tracer(__name__)\n with tracer.start_as_current_span(\"example-request\"):\n requests.get(\"http://www.example.com\")\n return \"hello\"\n \n \n-configure_opentelemetry(app)\n+app.run(debug=True)\ndiff --git a/docs/trace_example.py b/docs/trace_example.py\n--- a/docs/trace_example.py\n+++ b/docs/trace_example.py\n@@ -5,8 +5,8 @@\n SimpleExportSpanProcessor,\n )\n \n-trace.set_preferred_tracer_provider_implementation(lambda T: TracerProvider())\n-trace.tracer_provider().add_span_processor(\n+trace.set_tracer_provider(TracerProvider())\n+trace.get_tracer_provider().add_span_processor(\n SimpleExportSpanProcessor(ConsoleSpanExporter())\n )\n", "issue": "Author a full getting started guide\nAs part of the final beta announce on March 30th, we should have a full getting started guide, similar to this one for js:\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-js/blob/master/getting-started/README.md\r\n\r\n\n", "before_files": [{"content": "from opentelemetry import trace\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleExportSpanProcessor,\n)\n\ntrace.set_preferred_tracer_provider_implementation(lambda T: TracerProvider())\ntrace.tracer_provider().add_span_processor(\n SimpleExportSpanProcessor(ConsoleSpanExporter())\n)\n\ntracer = trace.get_tracer(__name__)\n\nwith tracer.start_as_current_span(\"foo\"):\n with tracer.start_as_current_span(\"bar\"):\n with tracer.start_as_current_span(\"baz\"):\n print(\"Hello world from OpenTelemetry Python!\")\n", "path": "docs/trace_example.py"}, {"content": "# Copyright 2019, OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nThis module serves as an example to integrate with flask, using\nthe requests library to perform downstream requests\n\"\"\"\nimport flask\nimport pkg_resources\nimport requests\n\nimport opentelemetry.ext.http_requests\nfrom opentelemetry import trace\nfrom opentelemetry.ext.flask import instrument_app\nfrom opentelemetry.sdk.trace import TracerProvider\n\n\ndef configure_opentelemetry(flask_app: flask.Flask):\n \"\"\"Configure a flask application to use OpenTelemetry.\n\n This activates the specific components:\n\n * sets tracer to the SDK's Tracer\n * enables requests integration on the Tracer\n * uses a WSGI middleware to enable configuration\n \"\"\"\n # Start by configuring all objects required to ensure a complete end to end\n # workflow.\n trace.set_tracer_provider(TracerProvider())\n\n # Next, we need to configure how the values that are used by traces and\n # metrics are propagated (such as what specific headers carry this value).\n # Integrations are the glue that binds the OpenTelemetry API and the\n # frameworks and libraries that are used together, automatically creating\n # Spans and propagating context as appropriate.\n opentelemetry.ext.http_requests.enable(trace.get_tracer_provider())\n instrument_app(flask_app)\n\n\napp = flask.Flask(__name__)\n\n\[email protected](\"/\")\ndef hello():\n # Emit a trace that measures how long the sleep takes\n version = pkg_resources.get_distribution(\n \"opentelemetry-example-app\"\n ).version\n tracer = trace.get_tracer(__name__, version)\n with tracer.start_as_current_span(\"example-request\"):\n requests.get(\"http://www.example.com\")\n return \"hello\"\n\n\nconfigure_opentelemetry(app)\n", "path": "docs/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py"}]} | 1,418 | 672 |
gh_patches_debug_17216 | rasdani/github-patches | git_diff | medtagger__MedTagger-407 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scans with low number of Slices are not properly loaded on Labeling page
## Current Behavior
When Scan has less than 10 Slices, Scan Viewer keeps displaying spinner indicator and never gets to displaying actual Scan.
## Expected Behavior
Scan should be displayed properly, no matter how many Slices it has.
## Steps to Reproduce the Problem
1. Upload less than 10 Slices.
2. Go to Labelling page.
3. Voi'la, Scan is not displayed, instead you're presented with spinner indicator that doesn't dissapear.
## Additional comment (optional)
Reason for that happening is that `SLICE_BATCH_SIZE` (`validation-page.component.ts`, Line 23) is fixed to 10. When we upload Scan that has less than `SLICE_BATCH_SIZE` slices, method `slice.isLastInBatch()` (`marker-page.component.ts`, Line 101) will always return `false`, because it will never hit slice with id = 9 (because there isn't one...). Therefore, Slice download will never be marked as completed.
</issue>
<code>
[start of backend/medtagger/api/scans/service_web_socket.py]
1 """Module responsible for definition of Scans service available via WebSockets."""
2 from typing import Dict
3
4 from flask_socketio import Namespace, emit
5
6 from medtagger.api import web_socket
7 from medtagger.database.models import SliceOrientation
8 from medtagger.types import ScanID
9 from medtagger.api.exceptions import InvalidArgumentsException
10 from medtagger.api.scans import business
11
12
13 class Slices(Namespace):
14 """WebSocket handler for /slices namespace."""
15
16 MAX_NUMBER_OF_SLICES_PER_REQUEST = 25
17
18 def on_request_slices(self, request: Dict) -> None:
19 """Handle slices request triggered by `request_slices` event."""
20 assert request.get('scan_id'), 'ScanID is required!'
21 scan_id = ScanID(str(request['scan_id']))
22 begin = max(0, request.get('begin', 0))
23 count = request.get('count', 1)
24 reversed_order = request.get('reversed', False)
25 orientation = request.get('orientation', SliceOrientation.Z.value)
26 self._raise_on_invalid_request_slices(count, orientation)
27
28 orientation = SliceOrientation[orientation]
29 slices = business.get_slices_for_scan(scan_id, begin, count, orientation=orientation)
30 slices_to_send = reversed(list(enumerate(slices))) if reversed_order else enumerate(slices)
31 last_in_batch = begin if reversed_order else begin + count - 1
32 for index, (_slice, image) in slices_to_send:
33 emit('slice', {
34 'scan_id': scan_id,
35 'index': begin + index,
36 'last_in_batch': last_in_batch,
37 'image': image,
38 })
39
40 def _raise_on_invalid_request_slices(self, count: int, orientation: str) -> None:
41 """Validate incoming request and raise an exception if there are issues with given arguments.
42
43 :param count: number of slices that should be returned
44 :param orientation: Slice's orientation as a string
45 """
46 # Make sure that passed orientation is proper one
47 if orientation not in SliceOrientation.__members__:
48 raise InvalidArgumentsException('Invalid Slice orientation.')
49
50 # Make sure that nobody will fetch whole scan at once. It could freeze our backend application.
51 if count > self.MAX_NUMBER_OF_SLICES_PER_REQUEST:
52 message = 'Cannot return more than {} slices per request.'.format(self.MAX_NUMBER_OF_SLICES_PER_REQUEST)
53 raise InvalidArgumentsException(message)
54
55
56 # Register above namespace
57 web_socket.on_namespace(Slices('/slices'))
58
[end of backend/medtagger/api/scans/service_web_socket.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/medtagger/api/scans/service_web_socket.py b/backend/medtagger/api/scans/service_web_socket.py
--- a/backend/medtagger/api/scans/service_web_socket.py
+++ b/backend/medtagger/api/scans/service_web_socket.py
@@ -26,10 +26,10 @@
self._raise_on_invalid_request_slices(count, orientation)
orientation = SliceOrientation[orientation]
- slices = business.get_slices_for_scan(scan_id, begin, count, orientation=orientation)
- slices_to_send = reversed(list(enumerate(slices))) if reversed_order else enumerate(slices)
- last_in_batch = begin if reversed_order else begin + count - 1
- for index, (_slice, image) in slices_to_send:
+ slices = list(business.get_slices_for_scan(scan_id, begin, count, orientation=orientation))
+ slices_to_send = list(reversed(slices)) if reversed_order else slices
+ last_in_batch = begin if reversed_order else begin + len(slices_to_send) - 1
+ for index, (_slice, image) in enumerate(slices_to_send):
emit('slice', {
'scan_id': scan_id,
'index': begin + index,
| {"golden_diff": "diff --git a/backend/medtagger/api/scans/service_web_socket.py b/backend/medtagger/api/scans/service_web_socket.py\n--- a/backend/medtagger/api/scans/service_web_socket.py\n+++ b/backend/medtagger/api/scans/service_web_socket.py\n@@ -26,10 +26,10 @@\n self._raise_on_invalid_request_slices(count, orientation)\n \n orientation = SliceOrientation[orientation]\n- slices = business.get_slices_for_scan(scan_id, begin, count, orientation=orientation)\n- slices_to_send = reversed(list(enumerate(slices))) if reversed_order else enumerate(slices)\n- last_in_batch = begin if reversed_order else begin + count - 1\n- for index, (_slice, image) in slices_to_send:\n+ slices = list(business.get_slices_for_scan(scan_id, begin, count, orientation=orientation))\n+ slices_to_send = list(reversed(slices)) if reversed_order else slices\n+ last_in_batch = begin if reversed_order else begin + len(slices_to_send) - 1\n+ for index, (_slice, image) in enumerate(slices_to_send):\n emit('slice', {\n 'scan_id': scan_id,\n 'index': begin + index,\n", "issue": "Scans with low number of Slices are not properly loaded on Labeling page\n## Current Behavior\r\n\r\nWhen Scan has less than 10 Slices, Scan Viewer keeps displaying spinner indicator and never gets to displaying actual Scan.\r\n\r\n## Expected Behavior\r\n\r\nScan should be displayed properly, no matter how many Slices it has.\r\n\r\n## Steps to Reproduce the Problem\r\n\r\n 1. Upload less than 10 Slices.\r\n 2. Go to Labelling page.\r\n 3. Voi'la, Scan is not displayed, instead you're presented with spinner indicator that doesn't dissapear.\r\n\r\n## Additional comment (optional)\r\n\r\nReason for that happening is that `SLICE_BATCH_SIZE` (`validation-page.component.ts`, Line 23) is fixed to 10. When we upload Scan that has less than `SLICE_BATCH_SIZE` slices, method `slice.isLastInBatch()` (`marker-page.component.ts`, Line 101) will always return `false`, because it will never hit slice with id = 9 (because there isn't one...). Therefore, Slice download will never be marked as completed.\r\n\n", "before_files": [{"content": "\"\"\"Module responsible for definition of Scans service available via WebSockets.\"\"\"\nfrom typing import Dict\n\nfrom flask_socketio import Namespace, emit\n\nfrom medtagger.api import web_socket\nfrom medtagger.database.models import SliceOrientation\nfrom medtagger.types import ScanID\nfrom medtagger.api.exceptions import InvalidArgumentsException\nfrom medtagger.api.scans import business\n\n\nclass Slices(Namespace):\n \"\"\"WebSocket handler for /slices namespace.\"\"\"\n\n MAX_NUMBER_OF_SLICES_PER_REQUEST = 25\n\n def on_request_slices(self, request: Dict) -> None:\n \"\"\"Handle slices request triggered by `request_slices` event.\"\"\"\n assert request.get('scan_id'), 'ScanID is required!'\n scan_id = ScanID(str(request['scan_id']))\n begin = max(0, request.get('begin', 0))\n count = request.get('count', 1)\n reversed_order = request.get('reversed', False)\n orientation = request.get('orientation', SliceOrientation.Z.value)\n self._raise_on_invalid_request_slices(count, orientation)\n\n orientation = SliceOrientation[orientation]\n slices = business.get_slices_for_scan(scan_id, begin, count, orientation=orientation)\n slices_to_send = reversed(list(enumerate(slices))) if reversed_order else enumerate(slices)\n last_in_batch = begin if reversed_order else begin + count - 1\n for index, (_slice, image) in slices_to_send:\n emit('slice', {\n 'scan_id': scan_id,\n 'index': begin + index,\n 'last_in_batch': last_in_batch,\n 'image': image,\n })\n\n def _raise_on_invalid_request_slices(self, count: int, orientation: str) -> None:\n \"\"\"Validate incoming request and raise an exception if there are issues with given arguments.\n\n :param count: number of slices that should be returned\n :param orientation: Slice's orientation as a string\n \"\"\"\n # Make sure that passed orientation is proper one\n if orientation not in SliceOrientation.__members__:\n raise InvalidArgumentsException('Invalid Slice orientation.')\n\n # Make sure that nobody will fetch whole scan at once. It could freeze our backend application.\n if count > self.MAX_NUMBER_OF_SLICES_PER_REQUEST:\n message = 'Cannot return more than {} slices per request.'.format(self.MAX_NUMBER_OF_SLICES_PER_REQUEST)\n raise InvalidArgumentsException(message)\n\n\n# Register above namespace\nweb_socket.on_namespace(Slices('/slices'))\n", "path": "backend/medtagger/api/scans/service_web_socket.py"}]} | 1,420 | 272 |
gh_patches_debug_17992 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-3329 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/engine/gradient_handler/__init__.py]
1 from ._base_gradient_handler import BaseGradientHandler
2 from ._data_parallel_gradient_handler import DataParallelGradientHandler
3 from ._zero_gradient_handler import ZeROGradientHandler
4 from ._sequence_parallel_gradient_handler import SequenceParallelGradientHandler
5 from ._pipeline_parallel_gradient_handler import PipelineSharedModuleGradientHandler
6 from ._moe_gradient_handler import MoeGradientHandler
7 from ._sequence_parallel_gradient_handler import SequenceParallelGradientHandler
8
9 __all__ = [
10 'BaseGradientHandler', 'DataParallelGradientHandler', 'ZeROGradientHandler', 'PipelineSharedModuleGradientHandler',
11 'MoeGradientHandler', 'SequenceParallelGradientHandler'
12 ]
13
[end of colossalai/engine/gradient_handler/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/engine/gradient_handler/__init__.py b/colossalai/engine/gradient_handler/__init__.py
--- a/colossalai/engine/gradient_handler/__init__.py
+++ b/colossalai/engine/gradient_handler/__init__.py
@@ -1,10 +1,9 @@
from ._base_gradient_handler import BaseGradientHandler
from ._data_parallel_gradient_handler import DataParallelGradientHandler
-from ._zero_gradient_handler import ZeROGradientHandler
-from ._sequence_parallel_gradient_handler import SequenceParallelGradientHandler
-from ._pipeline_parallel_gradient_handler import PipelineSharedModuleGradientHandler
from ._moe_gradient_handler import MoeGradientHandler
+from ._pipeline_parallel_gradient_handler import PipelineSharedModuleGradientHandler
from ._sequence_parallel_gradient_handler import SequenceParallelGradientHandler
+from ._zero_gradient_handler import ZeROGradientHandler
__all__ = [
'BaseGradientHandler', 'DataParallelGradientHandler', 'ZeROGradientHandler', 'PipelineSharedModuleGradientHandler',
| {"golden_diff": "diff --git a/colossalai/engine/gradient_handler/__init__.py b/colossalai/engine/gradient_handler/__init__.py\n--- a/colossalai/engine/gradient_handler/__init__.py\n+++ b/colossalai/engine/gradient_handler/__init__.py\n@@ -1,10 +1,9 @@\n from ._base_gradient_handler import BaseGradientHandler\n from ._data_parallel_gradient_handler import DataParallelGradientHandler\n-from ._zero_gradient_handler import ZeROGradientHandler\n-from ._sequence_parallel_gradient_handler import SequenceParallelGradientHandler\n-from ._pipeline_parallel_gradient_handler import PipelineSharedModuleGradientHandler\n from ._moe_gradient_handler import MoeGradientHandler\n+from ._pipeline_parallel_gradient_handler import PipelineSharedModuleGradientHandler\n from ._sequence_parallel_gradient_handler import SequenceParallelGradientHandler\n+from ._zero_gradient_handler import ZeROGradientHandler\n \n __all__ = [\n 'BaseGradientHandler', 'DataParallelGradientHandler', 'ZeROGradientHandler', 'PipelineSharedModuleGradientHandler',\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from ._base_gradient_handler import BaseGradientHandler\nfrom ._data_parallel_gradient_handler import DataParallelGradientHandler\nfrom ._zero_gradient_handler import ZeROGradientHandler\nfrom ._sequence_parallel_gradient_handler import SequenceParallelGradientHandler\nfrom ._pipeline_parallel_gradient_handler import PipelineSharedModuleGradientHandler\nfrom ._moe_gradient_handler import MoeGradientHandler\nfrom ._sequence_parallel_gradient_handler import SequenceParallelGradientHandler\n\n__all__ = [\n 'BaseGradientHandler', 'DataParallelGradientHandler', 'ZeROGradientHandler', 'PipelineSharedModuleGradientHandler',\n 'MoeGradientHandler', 'SequenceParallelGradientHandler'\n]\n", "path": "colossalai/engine/gradient_handler/__init__.py"}]} | 714 | 210 |
gh_patches_debug_386 | rasdani/github-patches | git_diff | UTNkar__moore-554 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix cookies for Utnarm
Utnarm recently switched to utnarm.se instead of utnarm.utn.se. This lead to that you can’t sign in to utnarm.se. Per default, moore uses utn.se as cookie domain and since utnarm.se is a different top level domain the cookies can’t be used.
We need to dynamically add utnarm.se as a cookie domain. This python package might be useful https://github.com/ViktorStiskala/django-shared-session
</issue>
<code>
[start of src/moore/settings/production.py]
1 """
2 Django settings for the production environment of Project Moore.
3
4 For more information regarding running in production see,
5 See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
6
7 For more information on this file, see
8 https://docs.djangoproject.com/en/1.10/topics/settings/
9
10 For the full list of settings and their values, see
11 https://docs.djangoproject.com/en/1.10/ref/settings/
12 """
13 from __future__ import absolute_import, unicode_literals
14 import raven
15
16 from .base import *
17
18 # SECURITY WARNING: don't run with debug turned on in production!
19 DEBUG = False
20
21 # SECURITY WARNING: keep the secret key used in production secret!
22 SECRET_KEY = os.environ.get(
23 'DJANGO_SECRET',
24 'za7^0@54n&p-dg4)_l12q_3^o5awz_uym0osqaz2!myki_8kw0'
25 )
26
27 # Database
28 # https://docs.djangoproject.com/en/1.10/ref/settings/#databases
29
30 DATABASES = {
31 'default': {
32 'ENGINE': 'django.db.backends.postgresql',
33 'NAME': os.environ.get('DJANGO_DB_NAME', 'moore'),
34 'USER': os.environ.get('DJANGO_DB_USER', 'moore'),
35 'PASSWORD': os.environ.get('DJANGO_DB_PASS'),
36 'HOST': os.environ.get('DJANGO_DB_HOST', '127.0.0.1'),
37 'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),
38 }
39 }
40
41
42 # CONN_MAX_AGE = 0
43
44 # Base URL to use when referring to full URLs within the Wagtail admin
45 # backend - e.g. in notification emails. Don't include '/admin' or a
46 # trailing slash
47 BASE_URL = 'https://utn.se'
48
49 ALLOWED_HOSTS = ['.utn.se', '.utnarm.se']
50
51 # Email settings
52 DEFAULT_FROM_EMAIL = '[email protected]'
53
54 EMAIL_SUBJECT_PREFIX = '[UTN] '
55
56 # Sentry Configuration - will be sent error messages
57 RAVEN_CONFIG = {
58 'dsn': os.environ.get('SENTRY_DSN'),
59 'release': raven.fetch_git_sha(os.path.dirname(BASE_DIR)),
60 }
61
62 LOGGING = {
63 'version': 1,
64 'disable_existing_loggers': True,
65 'root': {
66 'level': 'WARNING',
67 'handlers': ['sentry'],
68 },
69 'formatters': {
70 'verbose': {
71 'format': '%(levelname)s %(asctime)s %(module)s '
72 '%(process)d %(thread)d %(message)s'
73 },
74 },
75 'handlers': {
76 'sentry': {
77 'level': 'ERROR',
78 'class': 'raven.contrib.django.raven_compat'
79 '.handlers.SentryHandler',
80 'tags': {'custom-tag': 'x'},
81 },
82 'console': {
83 'level': 'DEBUG',
84 'class': 'logging.StreamHandler',
85 'formatter': 'verbose'
86 }
87 },
88 'loggers': {
89 'django.db.backends': {
90 'level': 'ERROR',
91 'handlers': ['console'],
92 'propagate': False,
93 },
94 'raven': {
95 'level': 'DEBUG',
96 'handlers': ['console'],
97 'propagate': False,
98 },
99 'sentry.errors': {
100 'level': 'DEBUG',
101 'handlers': ['console'],
102 'propagate': False,
103 },
104 },
105 }
106
107 CSRF_COOKIE_SECURE = True
108
109 SESSION_COOKIE_DOMAIN = '.utn.se'
110
111 SESSION_COOKIE_SECURE = True
112
113 MELOS_URL = os.environ.get('MELOS_URL')
114 MELOS_ORG_ID = os.environ.get('MELOS_ORG_ID')
115 MELOS_ADMIN = os.environ.get('MELOS_ADMIN')
116
117 # Google API
118 GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')
119
120 RECAPTCHA_PUBLIC_KEY = os.environ.get("RECAPTCHA_PUBLIC_KEY", "")
121 RECAPTCHA_PRIVATE_KEY = os.environ.get("RECAPTCHA_PRIVATE_KEY", "")
122
123 try:
124 from .local import *
125 except ImportError:
126 pass
127
[end of src/moore/settings/production.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/moore/settings/production.py b/src/moore/settings/production.py
--- a/src/moore/settings/production.py
+++ b/src/moore/settings/production.py
@@ -106,8 +106,6 @@
CSRF_COOKIE_SECURE = True
-SESSION_COOKIE_DOMAIN = '.utn.se'
-
SESSION_COOKIE_SECURE = True
MELOS_URL = os.environ.get('MELOS_URL')
| {"golden_diff": "diff --git a/src/moore/settings/production.py b/src/moore/settings/production.py\n--- a/src/moore/settings/production.py\n+++ b/src/moore/settings/production.py\n@@ -106,8 +106,6 @@\n \n CSRF_COOKIE_SECURE = True\n \n-SESSION_COOKIE_DOMAIN = '.utn.se'\n-\n SESSION_COOKIE_SECURE = True\n \n MELOS_URL = os.environ.get('MELOS_URL')\n", "issue": "Fix cookies for Utnarm\nUtnarm recently switched to utnarm.se instead of utnarm.utn.se. This lead to that you can\u2019t sign in to utnarm.se. Per default, moore uses utn.se as cookie domain and since utnarm.se is a different top level domain the cookies can\u2019t be used. \r\n\r\nWe need to dynamically add utnarm.se as a cookie domain. This python package might be useful https://github.com/ViktorStiskala/django-shared-session\n", "before_files": [{"content": "\"\"\"\nDjango settings for the production environment of Project Moore.\n\nFor more information regarding running in production see,\nSee https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nimport raven\n\nfrom .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'DJANGO_SECRET',\n 'za7^0@54n&p-dg4)_l12q_3^o5awz_uym0osqaz2!myki_8kw0'\n)\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get('DJANGO_DB_NAME', 'moore'),\n 'USER': os.environ.get('DJANGO_DB_USER', 'moore'),\n 'PASSWORD': os.environ.get('DJANGO_DB_PASS'),\n 'HOST': os.environ.get('DJANGO_DB_HOST', '127.0.0.1'),\n 'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),\n }\n}\n\n\n# CONN_MAX_AGE = 0\n\n# Base URL to use when referring to full URLs within the Wagtail admin\n# backend - e.g. in notification emails. Don't include '/admin' or a\n# trailing slash\nBASE_URL = 'https://utn.se'\n\nALLOWED_HOSTS = ['.utn.se', '.utnarm.se']\n\n# Email settings\nDEFAULT_FROM_EMAIL = '[email protected]'\n\nEMAIL_SUBJECT_PREFIX = '[UTN] '\n\n# Sentry Configuration - will be sent error messages\nRAVEN_CONFIG = {\n 'dsn': os.environ.get('SENTRY_DSN'),\n 'release': raven.fetch_git_sha(os.path.dirname(BASE_DIR)),\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'root': {\n 'level': 'WARNING',\n 'handlers': ['sentry'],\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s '\n '%(process)d %(thread)d %(message)s'\n },\n },\n 'handlers': {\n 'sentry': {\n 'level': 'ERROR',\n 'class': 'raven.contrib.django.raven_compat'\n '.handlers.SentryHandler',\n 'tags': {'custom-tag': 'x'},\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n\nCSRF_COOKIE_SECURE = True\n\nSESSION_COOKIE_DOMAIN = '.utn.se'\n\nSESSION_COOKIE_SECURE = True\n\nMELOS_URL = os.environ.get('MELOS_URL')\nMELOS_ORG_ID = os.environ.get('MELOS_ORG_ID')\nMELOS_ADMIN = os.environ.get('MELOS_ADMIN')\n\n# Google API\nGOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')\n\nRECAPTCHA_PUBLIC_KEY = os.environ.get(\"RECAPTCHA_PUBLIC_KEY\", \"\")\nRECAPTCHA_PRIVATE_KEY = os.environ.get(\"RECAPTCHA_PRIVATE_KEY\", \"\")\n\ntry:\n from .local import *\nexcept ImportError:\n pass\n", "path": "src/moore/settings/production.py"}]} | 1,812 | 95 |
gh_patches_debug_116 | rasdani/github-patches | git_diff | sanic-org__sanic-1530 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Publish 19.3 release to PyPI
Thank you for the release 3 days ago!
https://github.com/huge-success/sanic/releases/tag/19.3
It's missing from PyPI at the moment:
https://pypi.org/project/sanic/#history
Please publish it at your convenience 🙇
Keep up the awesome work ❤️
</issue>
<code>
[start of sanic/__init__.py]
1 from sanic.app import Sanic
2 from sanic.blueprints import Blueprint
3
4
5 __version__ = "19.03.0"
6
7 __all__ = ["Sanic", "Blueprint"]
8
[end of sanic/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sanic/__init__.py b/sanic/__init__.py
--- a/sanic/__init__.py
+++ b/sanic/__init__.py
@@ -2,6 +2,6 @@
from sanic.blueprints import Blueprint
-__version__ = "19.03.0"
+__version__ = "19.03.1"
__all__ = ["Sanic", "Blueprint"]
| {"golden_diff": "diff --git a/sanic/__init__.py b/sanic/__init__.py\n--- a/sanic/__init__.py\n+++ b/sanic/__init__.py\n@@ -2,6 +2,6 @@\n from sanic.blueprints import Blueprint\n \n \n-__version__ = \"19.03.0\"\n+__version__ = \"19.03.1\"\n \n __all__ = [\"Sanic\", \"Blueprint\"]\n", "issue": "Publish 19.3 release to PyPI\nThank you for the release 3 days ago!\r\n\r\nhttps://github.com/huge-success/sanic/releases/tag/19.3\r\n\r\nIt's missing from PyPI at the moment:\r\n\r\nhttps://pypi.org/project/sanic/#history\r\n\r\nPlease publish it at your convenience \ud83d\ude47 \r\n\r\nKeep up the awesome work \u2764\ufe0f \n", "before_files": [{"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n\n__version__ = \"19.03.0\"\n\n__all__ = [\"Sanic\", \"Blueprint\"]\n", "path": "sanic/__init__.py"}]} | 667 | 96 |
gh_patches_debug_34427 | rasdani/github-patches | git_diff | mozilla__pontoon-2520 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add an option to link to multiple strings
*This issue was created automatically by a [script](https://github.com/mathjazz/bugzilla2github/).*
## [Bug 1324933](https://bugzilla.mozilla.org/show_bug.cgi?id=1324933)
Bug Reporter: @tomer
CC: @gaby2300, @ItielMaN, @mathjazz
Blocker for: [Bug 1390931](https://bugzilla.mozilla.org/show_bug.cgi?id=1390931)
In dxr.mozilla.org I can select multiple lines in a file and than have a link that will show these lines highlighted. I'd suggest having a similar feature in pontoon, so we could send a team member a link that contains multiple strings, instead of one of the following workarounds:
* Sending one string per link
* Sending link to a search results or a resource, and asking them to look on the second string, the third from the bottom, etc.
I'd like to have a link similar to this one:
https://pontoon.mozilla.org/he/firefox-aurora/all-resources/?string=75295,75296
… Which will show me both strings in the list.
As for the UI for creating such lists, I guess having control-click to toggle multiple selections will be enough; this is an advanced feature after all. ☺
</issue>
<code>
[start of pontoon/projects/management/commands/send_review_notifications.py]
1 from collections import defaultdict
2 from datetime import timedelta
3 from urllib.parse import urlencode
4
5 from django.core.management.base import BaseCommand
6 from django.db.models import Q
7 from django.urls import reverse
8 from django.utils import timezone
9 from notifications.signals import notify
10 from pontoon.base.models import Translation
11
12
13 class Command(BaseCommand):
14 help = "Notify translators about their newly reviewed suggestions"
15
16 def get_description(self, author, notifyData):
17 desc = "Your suggestions have been reviewed:\n<ul>"
18
19 for (locale, project), (approved, rejected) in notifyData.items():
20 url = reverse(
21 "pontoon.translate",
22 kwargs={
23 "locale": locale.code,
24 "project": project.slug,
25 "resource": "all-resources",
26 },
27 )
28 url += "?" + urlencode({"author": author.email})
29 if len(approved) == 1 and len(rejected) == 0:
30 url += "&" + urlencode({"string": approved[0]})
31 elif len(approved) == 0 and len(rejected) == 1:
32 url += "&" + urlencode({"string": rejected[0]})
33
34 # Filter out rejections where the author's own suggestion replaced the previous
35 rejected = [x for x in rejected if x not in approved]
36
37 if len(approved) == 0:
38 msg = f"{len(rejected)} Rejected"
39 else:
40 msg = f"{len(approved)} Approved"
41 if len(rejected) > 0:
42 msg += f", {len(rejected)} Rejected"
43
44 desc += (
45 f'\n<li><a href="{url}">{project.name} ({locale.code})</a>: {msg}</li>'
46 )
47
48 return desc + "\n</ul>"
49
50 def handle(self, *args, **options):
51 """
52 This command sends notifications about newly reviewed
53 suggestions to the authors of those suggestions.
54
55 The command is designed to run on a daily basis.
56 """
57 self.stdout.write("Sending review notifications...")
58
59 # (author) -> (locale, project) -> (approved, rejected)
60 data = defaultdict(lambda: defaultdict(lambda: (list(), list())))
61 start = timezone.now() - timedelta(days=1)
62 for suggestion in Translation.objects.filter(
63 (Q(approved_date__gt=start) | Q(rejected_date__gt=start))
64 & Q(user__profile__review_notifications=True)
65 ):
66 author = suggestion.user
67 locale = suggestion.locale
68 project = suggestion.entity.resource.project
69
70 if suggestion.approved and suggestion.approved_user != author:
71 data[author][(locale, project)][0].append(suggestion.entity.pk)
72 elif suggestion.rejected and suggestion.rejected_user != author:
73 data[author][(locale, project)][1].append(suggestion.entity.pk)
74
75 for author, notifyData in data.items():
76 desc = self.get_description(author, notifyData)
77 notify.send(
78 sender=author,
79 recipient=author,
80 verb="has reviewed suggestions",
81 description=desc,
82 )
83
84 self.stdout.write(f"Sent {len(data)} review notifications.")
85
[end of pontoon/projects/management/commands/send_review_notifications.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pontoon/projects/management/commands/send_review_notifications.py b/pontoon/projects/management/commands/send_review_notifications.py
--- a/pontoon/projects/management/commands/send_review_notifications.py
+++ b/pontoon/projects/management/commands/send_review_notifications.py
@@ -1,6 +1,5 @@
from collections import defaultdict
from datetime import timedelta
-from urllib.parse import urlencode
from django.core.management.base import BaseCommand
from django.db.models import Q
@@ -13,7 +12,7 @@
class Command(BaseCommand):
help = "Notify translators about their newly reviewed suggestions"
- def get_description(self, author, notifyData):
+ def get_description(self, notifyData):
desc = "Your suggestions have been reviewed:\n<ul>"
for (locale, project), (approved, rejected) in notifyData.items():
@@ -25,11 +24,8 @@
"resource": "all-resources",
},
)
- url += "?" + urlencode({"author": author.email})
- if len(approved) == 1 and len(rejected) == 0:
- url += "&" + urlencode({"string": approved[0]})
- elif len(approved) == 0 and len(rejected) == 1:
- url += "&" + urlencode({"string": rejected[0]})
+ list = map(str, approved + rejected)
+ url += "?list=" + ",".join(list)
# Filter out rejections where the author's own suggestion replaced the previous
rejected = [x for x in rejected if x not in approved]
@@ -73,7 +69,7 @@
data[author][(locale, project)][1].append(suggestion.entity.pk)
for author, notifyData in data.items():
- desc = self.get_description(author, notifyData)
+ desc = self.get_description(notifyData)
notify.send(
sender=author,
recipient=author,
| {"golden_diff": "diff --git a/pontoon/projects/management/commands/send_review_notifications.py b/pontoon/projects/management/commands/send_review_notifications.py\n--- a/pontoon/projects/management/commands/send_review_notifications.py\n+++ b/pontoon/projects/management/commands/send_review_notifications.py\n@@ -1,6 +1,5 @@\n from collections import defaultdict\n from datetime import timedelta\n-from urllib.parse import urlencode\n \n from django.core.management.base import BaseCommand\n from django.db.models import Q\n@@ -13,7 +12,7 @@\n class Command(BaseCommand):\n help = \"Notify translators about their newly reviewed suggestions\"\n \n- def get_description(self, author, notifyData):\n+ def get_description(self, notifyData):\n desc = \"Your suggestions have been reviewed:\\n<ul>\"\n \n for (locale, project), (approved, rejected) in notifyData.items():\n@@ -25,11 +24,8 @@\n \"resource\": \"all-resources\",\n },\n )\n- url += \"?\" + urlencode({\"author\": author.email})\n- if len(approved) == 1 and len(rejected) == 0:\n- url += \"&\" + urlencode({\"string\": approved[0]})\n- elif len(approved) == 0 and len(rejected) == 1:\n- url += \"&\" + urlencode({\"string\": rejected[0]})\n+ list = map(str, approved + rejected)\n+ url += \"?list=\" + \",\".join(list)\n \n # Filter out rejections where the author's own suggestion replaced the previous\n rejected = [x for x in rejected if x not in approved]\n@@ -73,7 +69,7 @@\n data[author][(locale, project)][1].append(suggestion.entity.pk)\n \n for author, notifyData in data.items():\n- desc = self.get_description(author, notifyData)\n+ desc = self.get_description(notifyData)\n notify.send(\n sender=author,\n recipient=author,\n", "issue": "Add an option to link to multiple strings\n*This issue was created automatically by a [script](https://github.com/mathjazz/bugzilla2github/).*\n## [Bug 1324933](https://bugzilla.mozilla.org/show_bug.cgi?id=1324933)\nBug Reporter: @tomer\nCC: @gaby2300, @ItielMaN, @mathjazz\nBlocker for: [Bug 1390931](https://bugzilla.mozilla.org/show_bug.cgi?id=1390931)\n\nIn dxr.mozilla.org I can select multiple lines in a file and than have a link that will show these lines highlighted. I'd suggest having a similar feature in pontoon, so we could send a team member a link that contains multiple strings, instead of one of the following workarounds:\n\n* Sending one string per link\n* Sending link to a search results or a resource, and asking them to look on the second string, the third from the bottom, etc. \n\nI'd like to have a link similar to this one: \nhttps://pontoon.mozilla.org/he/firefox-aurora/all-resources/?string=75295,75296 \n\u2026 Which will show me both strings in the list.\n\n\nAs for the UI for creating such lists, I guess having control-click to toggle multiple selections will be enough; this is an advanced feature after all. \u263a\n", "before_files": [{"content": "from collections import defaultdict\nfrom datetime import timedelta\nfrom urllib.parse import urlencode\n\nfrom django.core.management.base import BaseCommand\nfrom django.db.models import Q\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom notifications.signals import notify\nfrom pontoon.base.models import Translation\n\n\nclass Command(BaseCommand):\n help = \"Notify translators about their newly reviewed suggestions\"\n\n def get_description(self, author, notifyData):\n desc = \"Your suggestions have been reviewed:\\n<ul>\"\n\n for (locale, project), (approved, rejected) in notifyData.items():\n url = reverse(\n \"pontoon.translate\",\n kwargs={\n \"locale\": locale.code,\n \"project\": project.slug,\n \"resource\": \"all-resources\",\n },\n )\n url += \"?\" + urlencode({\"author\": author.email})\n if len(approved) == 1 and len(rejected) == 0:\n url += \"&\" + urlencode({\"string\": approved[0]})\n elif len(approved) == 0 and len(rejected) == 1:\n url += \"&\" + urlencode({\"string\": rejected[0]})\n\n # Filter out rejections where the author's own suggestion replaced the previous\n rejected = [x for x in rejected if x not in approved]\n\n if len(approved) == 0:\n msg = f\"{len(rejected)} Rejected\"\n else:\n msg = f\"{len(approved)} Approved\"\n if len(rejected) > 0:\n msg += f\", {len(rejected)} Rejected\"\n\n desc += (\n f'\\n<li><a href=\"{url}\">{project.name} ({locale.code})</a>: {msg}</li>'\n )\n\n return desc + \"\\n</ul>\"\n\n def handle(self, *args, **options):\n \"\"\"\n This command sends notifications about newly reviewed\n suggestions to the authors of those suggestions.\n\n The command is designed to run on a daily basis.\n \"\"\"\n self.stdout.write(\"Sending review notifications...\")\n\n # (author) -> (locale, project) -> (approved, rejected)\n data = defaultdict(lambda: defaultdict(lambda: (list(), list())))\n start = timezone.now() - timedelta(days=1)\n for suggestion in Translation.objects.filter(\n (Q(approved_date__gt=start) | Q(rejected_date__gt=start))\n & Q(user__profile__review_notifications=True)\n ):\n author = suggestion.user\n locale = suggestion.locale\n project = suggestion.entity.resource.project\n\n if suggestion.approved and suggestion.approved_user != author:\n data[author][(locale, project)][0].append(suggestion.entity.pk)\n elif suggestion.rejected and suggestion.rejected_user != author:\n data[author][(locale, project)][1].append(suggestion.entity.pk)\n\n for author, notifyData in data.items():\n desc = self.get_description(author, notifyData)\n notify.send(\n sender=author,\n recipient=author,\n verb=\"has reviewed suggestions\",\n description=desc,\n )\n\n self.stdout.write(f\"Sent {len(data)} review notifications.\")\n", "path": "pontoon/projects/management/commands/send_review_notifications.py"}]} | 1,690 | 427 |
gh_patches_debug_27260 | rasdani/github-patches | git_diff | ContinualAI__avalanche-52 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LWF "warmup_train" fun never used
Hi @AntonioCarta, I've noticed this function in the `LearningWithoutForgetting` class is never used, do we need it?
</issue>
<code>
[start of avalanche/training/strategies/lwf/lwf.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 ################################################################################
5 # Copyright (c) 2020 ContinualAI Research #
6 # Copyrights licensed under the CC BY 4.0 License. #
7 # See the accompanying LICENSE file for terms. #
8 # #
9 # Date: 1-05-2020 #
10 # Author(s): ContinualAI #
11 # E-mail: [email protected] #
12 # Website: clair.continualai.org #
13 ################################################################################
14
15 """ Rehearsal Strategy Implementation """
16
17 # Python 2-3 compatible
18 from __future__ import print_function
19 from __future__ import division
20 from __future__ import absolute_import
21
22 from training.strategies.strategy import Strategy
23 from avalanche.evaluation.eval_protocol import EvalProtocol
24 from avalanche.evaluation.metrics import ACC
25 from avalanche.training.utils import pad_data, shuffle_in_unison
26 import torch
27 import torch.nn.functional as F
28 import numpy as np
29 import copy
30
31
32 def distillation_loss(y_pred, y_teacher, temperature):
33 """ Distillation loss. """
34 scale = y_teacher.shape[-1] # kl_div is normalized by element instead of observation
35 log_p = F.log_softmax(y_pred / temperature, dim=1)
36 q = F.softmax(y_teacher / temperature, dim=1)
37 res = scale * F.kl_div(log_p, q, reduction='mean')
38 return res
39
40
41 class LearningWithoutForgetting(Strategy):
42 def __init__(self, model, classes_per_task, alpha=0.5, distillation_loss_T=2, warmup_epochs=2, optimizer=None,
43 criterion=torch.nn.CrossEntropyLoss(), mb_size=256,
44 train_ep=2, device=None, preproc=None,
45 eval_protocol=EvalProtocol(metrics=[ACC()])):
46 """
47 Learning without Forgetting Strategy.
48
49 paper: https://arxiv.org/abs/1606.09282
50 original implementation (Matlab): https://github.com/lizhitwo/LearningWithoutForgetting
51 reference implementation (pytorch): https://github.com/arunmallya/packnet/blob/master/src/lwf.py
52
53 Args:
54 classes_per_task:
55 alpha: distillation loss coefficient. Can be an integer or a list of values (one for each task).
56 distillation_loss_T: distillation loss temperature
57 warmup_epochs: number of warmup epochs training only the new parameters.
58 """
59 super(LearningWithoutForgetting, self).__init__(
60 model, optimizer, criterion, mb_size, train_ep, multi_head=False,
61 device=device, preproc=preproc, eval_protocol=eval_protocol
62 )
63
64 # LwF parameters
65 self.classes_per_task = classes_per_task
66 self.prev_model = None
67 self.distillation_loss_T = distillation_loss_T
68 self.alpha = alpha
69 self.warmup_epochs = warmup_epochs
70
71 def warmup_train(self):
72 """ Train only the new parameters for the first epochs. """
73 # add only the last layer to the trainable parameters
74 opt = torch.optim.SGD(lr=0.01, params=self.model.classifier.parameters())
75
76 train_x, train_y, it_x_ep = self.preproc_batch_data(self.x, self.y, self.t)
77 model = self.model.to(self.device)
78
79 train_x = torch.tensor(train_x, dtype=torch.float)
80 train_y = torch.tensor(train_y, dtype=torch.long)
81 for ep in range(self.train_ep):
82 for it in range(it_x_ep):
83 start = it * self.mb_size
84 end = (it + 1) * self.mb_size
85
86 self.optimizer.zero_grad()
87 x_mb = train_x[start:end].to(self.device)
88 y_mb = train_y[start:end].to(self.device)
89 logits = model(x_mb)
90 # loss computed only on the new classes
91 loss = self.criterion(logits[:, self.t*self.classes_per_task:(self.t+1)*self.classes_per_task],
92 y_mb - self.t*self.classes_per_task)
93 loss.backward()
94 opt.step()
95
96 def compute_loss(self, logits, y_mb):
97 dist_loss = 0
98 if self.prev_model is not None:
99 y_prev = self.prev_model(self.x_mb).detach()
100 loss = self.criterion(logits, y_mb)
101 dist_loss += distillation_loss(logits, y_prev, self.distillation_loss_T)
102
103 if isinstance(self.alpha, list):
104 loss = loss + self.alpha[self.t] * dist_loss
105 else:
106 loss = loss + self.alpha * dist_loss
107 else:
108 loss = self.criterion(logits, y_mb)
109 return loss
110
111 def after_train(self):
112 self.prev_model = copy.deepcopy(self.model)
113
114
[end of avalanche/training/strategies/lwf/lwf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/avalanche/training/strategies/lwf/lwf.py b/avalanche/training/strategies/lwf/lwf.py
--- a/avalanche/training/strategies/lwf/lwf.py
+++ b/avalanche/training/strategies/lwf/lwf.py
@@ -39,7 +39,7 @@
class LearningWithoutForgetting(Strategy):
- def __init__(self, model, classes_per_task, alpha=0.5, distillation_loss_T=2, warmup_epochs=2, optimizer=None,
+ def __init__(self, model, classes_per_task, alpha=0.5, distillation_loss_T=2, warmup_epochs=0, optimizer=None,
criterion=torch.nn.CrossEntropyLoss(), mb_size=256,
train_ep=2, device=None, preproc=None,
eval_protocol=EvalProtocol(metrics=[ACC()])):
@@ -78,7 +78,7 @@
train_x = torch.tensor(train_x, dtype=torch.float)
train_y = torch.tensor(train_y, dtype=torch.long)
- for ep in range(self.train_ep):
+ for ep in range(self.warmup_epochs):
for it in range(it_x_ep):
start = it * self.mb_size
end = (it + 1) * self.mb_size
@@ -108,6 +108,9 @@
loss = self.criterion(logits, y_mb)
return loss
+ def before_train(self):
+ self.warmup_train()
+
def after_train(self):
self.prev_model = copy.deepcopy(self.model)
| {"golden_diff": "diff --git a/avalanche/training/strategies/lwf/lwf.py b/avalanche/training/strategies/lwf/lwf.py\n--- a/avalanche/training/strategies/lwf/lwf.py\n+++ b/avalanche/training/strategies/lwf/lwf.py\n@@ -39,7 +39,7 @@\n \n \n class LearningWithoutForgetting(Strategy):\n- def __init__(self, model, classes_per_task, alpha=0.5, distillation_loss_T=2, warmup_epochs=2, optimizer=None,\n+ def __init__(self, model, classes_per_task, alpha=0.5, distillation_loss_T=2, warmup_epochs=0, optimizer=None,\n criterion=torch.nn.CrossEntropyLoss(), mb_size=256,\n train_ep=2, device=None, preproc=None,\n eval_protocol=EvalProtocol(metrics=[ACC()])):\n@@ -78,7 +78,7 @@\n \n train_x = torch.tensor(train_x, dtype=torch.float)\n train_y = torch.tensor(train_y, dtype=torch.long)\n- for ep in range(self.train_ep):\n+ for ep in range(self.warmup_epochs):\n for it in range(it_x_ep):\n start = it * self.mb_size\n end = (it + 1) * self.mb_size\n@@ -108,6 +108,9 @@\n loss = self.criterion(logits, y_mb)\n return loss\n \n+ def before_train(self):\n+ self.warmup_train()\n+\n def after_train(self):\n self.prev_model = copy.deepcopy(self.model)\n", "issue": "LWF \"warmup_train\" fun never used\nHi @AntonioCarta, I've noticed this function in the `LearningWithoutForgetting` class is never used, do we need it?\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n################################################################################\n# Copyright (c) 2020 ContinualAI Research #\n# Copyrights licensed under the CC BY 4.0 License. #\n# See the accompanying LICENSE file for terms. #\n# #\n# Date: 1-05-2020 #\n# Author(s): ContinualAI #\n# E-mail: [email protected] #\n# Website: clair.continualai.org #\n################################################################################\n\n\"\"\" Rehearsal Strategy Implementation \"\"\"\n\n# Python 2-3 compatible\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nfrom training.strategies.strategy import Strategy\nfrom avalanche.evaluation.eval_protocol import EvalProtocol\nfrom avalanche.evaluation.metrics import ACC\nfrom avalanche.training.utils import pad_data, shuffle_in_unison\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport copy\n\n\ndef distillation_loss(y_pred, y_teacher, temperature):\n \"\"\" Distillation loss. \"\"\"\n scale = y_teacher.shape[-1] # kl_div is normalized by element instead of observation\n log_p = F.log_softmax(y_pred / temperature, dim=1)\n q = F.softmax(y_teacher / temperature, dim=1)\n res = scale * F.kl_div(log_p, q, reduction='mean')\n return res\n\n\nclass LearningWithoutForgetting(Strategy):\n def __init__(self, model, classes_per_task, alpha=0.5, distillation_loss_T=2, warmup_epochs=2, optimizer=None,\n criterion=torch.nn.CrossEntropyLoss(), mb_size=256,\n train_ep=2, device=None, preproc=None,\n eval_protocol=EvalProtocol(metrics=[ACC()])):\n \"\"\"\n Learning without Forgetting Strategy.\n\n paper: https://arxiv.org/abs/1606.09282\n original implementation (Matlab): https://github.com/lizhitwo/LearningWithoutForgetting\n reference implementation (pytorch): https://github.com/arunmallya/packnet/blob/master/src/lwf.py\n\n Args:\n classes_per_task:\n alpha: distillation loss coefficient. Can be an integer or a list of values (one for each task).\n distillation_loss_T: distillation loss temperature\n warmup_epochs: number of warmup epochs training only the new parameters.\n \"\"\"\n super(LearningWithoutForgetting, self).__init__(\n model, optimizer, criterion, mb_size, train_ep, multi_head=False,\n device=device, preproc=preproc, eval_protocol=eval_protocol\n )\n\n # LwF parameters\n self.classes_per_task = classes_per_task\n self.prev_model = None\n self.distillation_loss_T = distillation_loss_T\n self.alpha = alpha\n self.warmup_epochs = warmup_epochs\n\n def warmup_train(self):\n \"\"\" Train only the new parameters for the first epochs. \"\"\"\n # add only the last layer to the trainable parameters\n opt = torch.optim.SGD(lr=0.01, params=self.model.classifier.parameters())\n\n train_x, train_y, it_x_ep = self.preproc_batch_data(self.x, self.y, self.t)\n model = self.model.to(self.device)\n\n train_x = torch.tensor(train_x, dtype=torch.float)\n train_y = torch.tensor(train_y, dtype=torch.long)\n for ep in range(self.train_ep):\n for it in range(it_x_ep):\n start = it * self.mb_size\n end = (it + 1) * self.mb_size\n\n self.optimizer.zero_grad()\n x_mb = train_x[start:end].to(self.device)\n y_mb = train_y[start:end].to(self.device)\n logits = model(x_mb)\n # loss computed only on the new classes\n loss = self.criterion(logits[:, self.t*self.classes_per_task:(self.t+1)*self.classes_per_task],\n y_mb - self.t*self.classes_per_task)\n loss.backward()\n opt.step()\n\n def compute_loss(self, logits, y_mb):\n dist_loss = 0\n if self.prev_model is not None:\n y_prev = self.prev_model(self.x_mb).detach()\n loss = self.criterion(logits, y_mb)\n dist_loss += distillation_loss(logits, y_prev, self.distillation_loss_T)\n\n if isinstance(self.alpha, list):\n loss = loss + self.alpha[self.t] * dist_loss\n else:\n loss = loss + self.alpha * dist_loss\n else:\n loss = self.criterion(logits, y_mb)\n return loss\n\n def after_train(self):\n self.prev_model = copy.deepcopy(self.model)\n\n", "path": "avalanche/training/strategies/lwf/lwf.py"}]} | 1,849 | 351 |
gh_patches_debug_8384 | rasdani/github-patches | git_diff | Qiskit__qiskit-2350 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
disassemble_circuits() suggested in qobj_to_circuits.py DeprecationWarning doesn't exist
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**: 0.8.0
- **Python version**: 3.7.2
- **Operating system**: macOS
`qobj_to_circuits` gives the following `DeprecationWarning`:
```python
.../qiskit/converters/qobj_to_circuits.py:34: DeprecationWarning: qiskit.converters.qobj_to_circuit() is deprecated and will be removed in Qiskit Terra 0.9. Please use qiskit.compiler.disassemble_circuits() to convert a qobj to list of circuits.
```
but `qiskit.compiler.disassemble_circuits()` doesn't exist.
### What is the current behavior?
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: cannot import name 'disassemble_circuits' from 'qiskit.compiler' (/Users/matteo/Work/projects/ibmq/env/lib/python3.7/site-packages/qiskit/compiler/__init__.py)
```
### Steps to reproduce the problem
1. Installed qiskit in a new python virtualenv with `pip install qiskit`
2. `from qiskit.compiler import disassemble_circuits`
```
>>> qiskit.__qiskit_version__
{'qiskit': '0.10.0', 'qiskit-terra': '0.8.0', 'qiskit-ignis': '0.1.1', 'qiskit-aer': '0.2.0', 'qiskit-ibmq-provider': '0.2.1', 'qiskit-aqua': '0.5.0'}
```
### What is the expected behavior?
If a function is deprecated, and the warning suggests to use a new function, this function should exist in the current release.
### Suggested solutions
Implement the function or change the deprecation warning.
</issue>
<code>
[start of qiskit/converters/qobj_to_circuits.py]
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017, 2018.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """Helper function for converting qobj to a list of circuits"""
16
17 import warnings
18
19 from qiskit.assembler import disassemble
20
21
22 def qobj_to_circuits(qobj):
23 """Return a list of QuantumCircuit object(s) from a qobj
24
25 Args:
26 qobj (Qobj): The Qobj object to convert to QuantumCircuits
27 Returns:
28 list: A list of QuantumCircuit objects from the qobj
29
30 """
31 warnings.warn('qiskit.converters.qobj_to_circuit() is deprecated and will '
32 'be removed in Qiskit Terra 0.9. Please use '
33 'qiskit.compiler.disassemble_circuits() to convert a qobj '
34 'to list of circuits.', DeprecationWarning)
35
36 variables = disassemble(qobj)
37 return variables[0]
38
[end of qiskit/converters/qobj_to_circuits.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qiskit/converters/qobj_to_circuits.py b/qiskit/converters/qobj_to_circuits.py
--- a/qiskit/converters/qobj_to_circuits.py
+++ b/qiskit/converters/qobj_to_circuits.py
@@ -30,7 +30,7 @@
"""
warnings.warn('qiskit.converters.qobj_to_circuit() is deprecated and will '
'be removed in Qiskit Terra 0.9. Please use '
- 'qiskit.compiler.disassemble_circuits() to convert a qobj '
+ 'qiskit.assembler.disassemble() to convert a qobj '
'to list of circuits.', DeprecationWarning)
variables = disassemble(qobj)
| {"golden_diff": "diff --git a/qiskit/converters/qobj_to_circuits.py b/qiskit/converters/qobj_to_circuits.py\n--- a/qiskit/converters/qobj_to_circuits.py\n+++ b/qiskit/converters/qobj_to_circuits.py\n@@ -30,7 +30,7 @@\n \"\"\"\n warnings.warn('qiskit.converters.qobj_to_circuit() is deprecated and will '\n 'be removed in Qiskit Terra 0.9. Please use '\n- 'qiskit.compiler.disassemble_circuits() to convert a qobj '\n+ 'qiskit.assembler.disassemble() to convert a qobj '\n 'to list of circuits.', DeprecationWarning)\n \n variables = disassemble(qobj)\n", "issue": "disassemble_circuits() suggested in qobj_to_circuits.py DeprecationWarning doesn't exist\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **Qiskit Terra version**: 0.8.0\r\n- **Python version**: 3.7.2\r\n- **Operating system**: macOS\r\n\r\n`qobj_to_circuits` gives the following `DeprecationWarning`:\r\n\r\n```python\r\n.../qiskit/converters/qobj_to_circuits.py:34: DeprecationWarning: qiskit.converters.qobj_to_circuit() is deprecated and will be removed in Qiskit Terra 0.9. Please use qiskit.compiler.disassemble_circuits() to convert a qobj to list of circuits.\r\n```\r\n\r\nbut `qiskit.compiler.disassemble_circuits()` doesn't exist.\r\n\r\n### What is the current behavior?\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\nImportError: cannot import name 'disassemble_circuits' from 'qiskit.compiler' (/Users/matteo/Work/projects/ibmq/env/lib/python3.7/site-packages/qiskit/compiler/__init__.py)\r\n```\r\n\r\n### Steps to reproduce the problem\r\n1. Installed qiskit in a new python virtualenv with `pip install qiskit`\r\n2. `from qiskit.compiler import disassemble_circuits`\r\n\r\n```\r\n>>> qiskit.__qiskit_version__\r\n{'qiskit': '0.10.0', 'qiskit-terra': '0.8.0', 'qiskit-ignis': '0.1.1', 'qiskit-aer': '0.2.0', 'qiskit-ibmq-provider': '0.2.1', 'qiskit-aqua': '0.5.0'}\r\n```\r\n\r\n### What is the expected behavior?\r\nIf a function is deprecated, and the warning suggests to use a new function, this function should exist in the current release.\r\n\r\n### Suggested solutions\r\n\r\nImplement the function or change the deprecation warning.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Helper function for converting qobj to a list of circuits\"\"\"\n\nimport warnings\n\nfrom qiskit.assembler import disassemble\n\n\ndef qobj_to_circuits(qobj):\n \"\"\"Return a list of QuantumCircuit object(s) from a qobj\n\n Args:\n qobj (Qobj): The Qobj object to convert to QuantumCircuits\n Returns:\n list: A list of QuantumCircuit objects from the qobj\n\n \"\"\"\n warnings.warn('qiskit.converters.qobj_to_circuit() is deprecated and will '\n 'be removed in Qiskit Terra 0.9. Please use '\n 'qiskit.compiler.disassemble_circuits() to convert a qobj '\n 'to list of circuits.', DeprecationWarning)\n\n variables = disassemble(qobj)\n return variables[0]\n", "path": "qiskit/converters/qobj_to_circuits.py"}]} | 1,413 | 173 |
gh_patches_debug_14386 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-845 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mycroft doesn't starts if /etc/mycroft/mycroft.conf exists and has comments
In log.py there are these statements:
if isfile(SYSTEM_CONFIG):
with open(SYSTEM_CONFIG) as f:
config = json.load(f)
log_level = config.get("log_level", "DEBUG")
note no filter of comments has been done using "load_commented_json".
Note: no pull request will be done, because could be the best solution is do not load any config here but use the common configuration procedure.
</issue>
<code>
[start of mycroft/util/log.py]
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17 import json
18 import logging
19
20 from os.path import isfile
21
22 SYSTEM_CONFIG = '/etc/mycroft/mycroft.conf'
23
24 __author__ = 'seanfitz'
25
26 log_level = "DEBUG"
27
28 if isfile(SYSTEM_CONFIG):
29 with open(SYSTEM_CONFIG) as f:
30 config = json.load(f)
31 log_level = config.get("log_level", "DEBUG")
32
33 FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
34 logging.basicConfig(format=FORMAT, level=logging.getLevelName(log_level))
35 logger = logging.getLogger("MYCROFT")
36
37
38 def getLogger(name="MYCROFT"):
39 """
40 Get a python logger
41
42 :param name: Module name for the logger
43
44 :return: an instance of logging.Logger
45 """
46 return logging.getLogger(name)
47
[end of mycroft/util/log.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mycroft/util/log.py b/mycroft/util/log.py
--- a/mycroft/util/log.py
+++ b/mycroft/util/log.py
@@ -18,6 +18,7 @@
import logging
from os.path import isfile
+from mycroft.util.json_helper import load_commented_json
SYSTEM_CONFIG = '/etc/mycroft/mycroft.conf'
@@ -26,9 +27,8 @@
log_level = "DEBUG"
if isfile(SYSTEM_CONFIG):
- with open(SYSTEM_CONFIG) as f:
- config = json.load(f)
- log_level = config.get("log_level", "DEBUG")
+ config = load_commented_json(SYSTEM_CONFIG)
+ log_level = config.get("log_level", "DEBUG")
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT, level=logging.getLevelName(log_level))
| {"golden_diff": "diff --git a/mycroft/util/log.py b/mycroft/util/log.py\n--- a/mycroft/util/log.py\n+++ b/mycroft/util/log.py\n@@ -18,6 +18,7 @@\n import logging\n \n from os.path import isfile\n+from mycroft.util.json_helper import load_commented_json\n \n SYSTEM_CONFIG = '/etc/mycroft/mycroft.conf'\n \n@@ -26,9 +27,8 @@\n log_level = \"DEBUG\"\n \n if isfile(SYSTEM_CONFIG):\n- with open(SYSTEM_CONFIG) as f:\n- config = json.load(f)\n- log_level = config.get(\"log_level\", \"DEBUG\")\n+ config = load_commented_json(SYSTEM_CONFIG)\n+ log_level = config.get(\"log_level\", \"DEBUG\")\n \n FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(format=FORMAT, level=logging.getLevelName(log_level))\n", "issue": "Mycroft doesn't starts if /etc/mycroft/mycroft.conf exists and has comments\nIn log.py there are these statements:\r\n\r\nif isfile(SYSTEM_CONFIG):\r\n with open(SYSTEM_CONFIG) as f:\r\n config = json.load(f)\r\n log_level = config.get(\"log_level\", \"DEBUG\")\r\n\r\nnote no filter of comments has been done using \"load_commented_json\".\r\n\r\nNote: no pull request will be done, because could be the best solution is do not load any config here but use the common configuration procedure.\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\nimport json\nimport logging\n\nfrom os.path import isfile\n\nSYSTEM_CONFIG = '/etc/mycroft/mycroft.conf'\n\n__author__ = 'seanfitz'\n\nlog_level = \"DEBUG\"\n\nif isfile(SYSTEM_CONFIG):\n with open(SYSTEM_CONFIG) as f:\n config = json.load(f)\n log_level = config.get(\"log_level\", \"DEBUG\")\n\nFORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\nlogging.basicConfig(format=FORMAT, level=logging.getLevelName(log_level))\nlogger = logging.getLogger(\"MYCROFT\")\n\n\ndef getLogger(name=\"MYCROFT\"):\n \"\"\"\n Get a python logger\n\n :param name: Module name for the logger\n\n :return: an instance of logging.Logger\n \"\"\"\n return logging.getLogger(name)\n", "path": "mycroft/util/log.py"}]} | 1,075 | 199 |
gh_patches_debug_9058 | rasdani/github-patches | git_diff | wagtail__wagtail-11992 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wagtail 6.1.x update breaks `RadioSelect` initialiastion after replacing jQuery with vanilla JS
### Issue Summary
Wagtail 6.1.x update breaks `RadioSelect` initialiastion after replacing jQuery with vanilla JS.
The JS adapter [`BoundRadioSelect`](https://github.com/wagtail/wagtail/blob/main/client/src/entrypoints/admin/telepath/widgets.js#L150) previously tested and set the checked option by comparing the element value against an array value - an array value is passed to the constructor:
#### Wagtail 6.0 implementation
```
setState(state) {
this.element.find('input[name="' + this.name + '"]').val([state]);
}
```
It now tests against a string, and the equality operator fails, so the radio option is not correctly set as `checked` on initialisation:
#### Wagtail 6.1.x implementation
```
setState(state) {
const inputs = this.element.querySelectorAll(`input[name="${this.name}"]`);
for (let i = 0; i < inputs.length; i += 1) {
inputs[i].checked = inputs[i].value === state;
}
}
```
The breaking change is here:
https://github.com/wagtail/wagtail/compare/v6.0.3...v6.1#diff-70fd977dfb7c5b823eab70485fa842987589fc8656203ffb729a4b7d688c1d04L130
### Steps to Reproduce
1. Use a choice block within a Streamfield such as:
```
link_type = blocks.ChoiceBlock(
choices=[
('page', 'Page'),
('document', 'Document'),
('url', 'URL'),
],
widget=forms.RadioSelect,
required=True,
default='page',
)
```
2. View/edit a page/snippet where this field is used
3. In Wagtail 6.0.x the first choice is `checked` as expected
4. In Wagtail 6.1.x the first choice is not `checked` as expected
- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes
### Technical details
- Python version: Python 3.12.3
- Django version: 5.0.6
- Wagtail version: 6.1.x
- Browser version: Chrome 125
### Working on this
Anyone can contribute to this. View our [contributing guidelines](https://docs.wagtail.org/en/latest/contributing/index.html), add a comment to the issue once you’re ready to start.
</issue>
<code>
[start of wagtail/widget_adapters.py]
1 """
2 Register Telepath adapters for core Django form widgets, so that they can
3 have corresponding Javascript objects with the ability to render new instances
4 and extract field values.
5 """
6
7 from django import forms
8 from django.core.exceptions import ValidationError
9 from django.utils.functional import cached_property
10
11 from wagtail.admin.staticfiles import versioned_static
12 from wagtail.telepath import Adapter, register
13
14
15 class WidgetAdapter(Adapter):
16 js_constructor = "wagtail.widgets.Widget"
17
18 def js_args(self, widget):
19 return [
20 widget.render("__NAME__", None, attrs={"id": "__ID__"}),
21 widget.id_for_label("__ID__"),
22 ]
23
24 def get_media(self, widget):
25 media = super().get_media(widget)
26 return media + widget.media
27
28 @cached_property
29 def media(self):
30 return forms.Media(
31 js=[
32 versioned_static("wagtailadmin/js/telepath/widgets.js"),
33 ]
34 )
35
36
37 register(WidgetAdapter(), forms.widgets.Input)
38 register(WidgetAdapter(), forms.Textarea)
39 register(WidgetAdapter(), forms.CheckboxSelectMultiple)
40
41
42 class CheckboxInputAdapter(WidgetAdapter):
43 js_constructor = "wagtail.widgets.CheckboxInput"
44
45
46 register(CheckboxInputAdapter(), forms.CheckboxInput)
47
48
49 class RadioSelectAdapter(WidgetAdapter):
50 js_constructor = "wagtail.widgets.RadioSelect"
51
52
53 register(RadioSelectAdapter(), forms.RadioSelect)
54
55
56 class SelectAdapter(WidgetAdapter):
57 js_constructor = "wagtail.widgets.Select"
58
59
60 register(SelectAdapter(), forms.Select)
61
62
63 class ValidationErrorAdapter(Adapter):
64 js_constructor = "wagtail.errors.ValidationError"
65
66 def js_args(self, error):
67 return [
68 error.messages,
69 ]
70
71 @cached_property
72 def media(self):
73 return forms.Media(
74 js=[
75 versioned_static("wagtailadmin/js/telepath/widgets.js"),
76 ]
77 )
78
79
80 register(ValidationErrorAdapter(), ValidationError)
81
[end of wagtail/widget_adapters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/widget_adapters.py b/wagtail/widget_adapters.py
--- a/wagtail/widget_adapters.py
+++ b/wagtail/widget_adapters.py
@@ -36,7 +36,6 @@
register(WidgetAdapter(), forms.widgets.Input)
register(WidgetAdapter(), forms.Textarea)
-register(WidgetAdapter(), forms.CheckboxSelectMultiple)
class CheckboxInputAdapter(WidgetAdapter):
@@ -51,6 +50,7 @@
register(RadioSelectAdapter(), forms.RadioSelect)
+register(RadioSelectAdapter(), forms.CheckboxSelectMultiple)
class SelectAdapter(WidgetAdapter):
| {"golden_diff": "diff --git a/wagtail/widget_adapters.py b/wagtail/widget_adapters.py\n--- a/wagtail/widget_adapters.py\n+++ b/wagtail/widget_adapters.py\n@@ -36,7 +36,6 @@\n \n register(WidgetAdapter(), forms.widgets.Input)\n register(WidgetAdapter(), forms.Textarea)\n-register(WidgetAdapter(), forms.CheckboxSelectMultiple)\n \n \n class CheckboxInputAdapter(WidgetAdapter):\n@@ -51,6 +50,7 @@\n \n \n register(RadioSelectAdapter(), forms.RadioSelect)\n+register(RadioSelectAdapter(), forms.CheckboxSelectMultiple)\n \n \n class SelectAdapter(WidgetAdapter):\n", "issue": "Wagtail 6.1.x update breaks `RadioSelect` initialiastion after replacing jQuery with vanilla JS\n### Issue Summary\r\n\r\nWagtail 6.1.x update breaks `RadioSelect` initialiastion after replacing jQuery with vanilla JS. \r\n\r\nThe JS adapter [`BoundRadioSelect`](https://github.com/wagtail/wagtail/blob/main/client/src/entrypoints/admin/telepath/widgets.js#L150) previously tested and set the checked option by comparing the element value against an array value - an array value is passed to the constructor:\r\n\r\n#### Wagtail 6.0 implementation\r\n```\r\nsetState(state) {\r\n this.element.find('input[name=\"' + this.name + '\"]').val([state]);\r\n}\r\n```\r\n\r\nIt now tests against a string, and the equality operator fails, so the radio option is not correctly set as `checked` on initialisation:\r\n\r\n#### Wagtail 6.1.x implementation\r\n```\r\n setState(state) {\r\n const inputs = this.element.querySelectorAll(`input[name=\"${this.name}\"]`);\r\n for (let i = 0; i < inputs.length; i += 1) {\r\n inputs[i].checked = inputs[i].value === state;\r\n }\r\n }\r\n ```\r\n\r\nThe breaking change is here:\r\n\r\nhttps://github.com/wagtail/wagtail/compare/v6.0.3...v6.1#diff-70fd977dfb7c5b823eab70485fa842987589fc8656203ffb729a4b7d688c1d04L130\r\n\r\n### Steps to Reproduce\r\n\r\n1. Use a choice block within a Streamfield such as:\r\n\r\n```\r\nlink_type = blocks.ChoiceBlock(\r\n choices=[\r\n ('page', 'Page'),\r\n ('document', 'Document'),\r\n ('url', 'URL'),\r\n ],\r\n widget=forms.RadioSelect,\r\n required=True,\r\n default='page',\r\n)\r\n```\r\n2. View/edit a page/snippet where this field is used\r\n3. In Wagtail 6.0.x the first choice is `checked` as expected\r\n4. In Wagtail 6.1.x the first choice is not `checked` as expected\r\n\r\n- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes\r\n\r\n### Technical details\r\n\r\n- Python version: Python 3.12.3\r\n- Django version: 5.0.6\r\n- Wagtail version: 6.1.x\r\n- Browser version: Chrome 125\r\n\r\n### Working on this\r\n\r\nAnyone can contribute to this. View our [contributing guidelines](https://docs.wagtail.org/en/latest/contributing/index.html), add a comment to the issue once you\u2019re ready to start.\r\n\n", "before_files": [{"content": "\"\"\"\nRegister Telepath adapters for core Django form widgets, so that they can\nhave corresponding Javascript objects with the ability to render new instances\nand extract field values.\n\"\"\"\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.utils.functional import cached_property\n\nfrom wagtail.admin.staticfiles import versioned_static\nfrom wagtail.telepath import Adapter, register\n\n\nclass WidgetAdapter(Adapter):\n js_constructor = \"wagtail.widgets.Widget\"\n\n def js_args(self, widget):\n return [\n widget.render(\"__NAME__\", None, attrs={\"id\": \"__ID__\"}),\n widget.id_for_label(\"__ID__\"),\n ]\n\n def get_media(self, widget):\n media = super().get_media(widget)\n return media + widget.media\n\n @cached_property\n def media(self):\n return forms.Media(\n js=[\n versioned_static(\"wagtailadmin/js/telepath/widgets.js\"),\n ]\n )\n\n\nregister(WidgetAdapter(), forms.widgets.Input)\nregister(WidgetAdapter(), forms.Textarea)\nregister(WidgetAdapter(), forms.CheckboxSelectMultiple)\n\n\nclass CheckboxInputAdapter(WidgetAdapter):\n js_constructor = \"wagtail.widgets.CheckboxInput\"\n\n\nregister(CheckboxInputAdapter(), forms.CheckboxInput)\n\n\nclass RadioSelectAdapter(WidgetAdapter):\n js_constructor = \"wagtail.widgets.RadioSelect\"\n\n\nregister(RadioSelectAdapter(), forms.RadioSelect)\n\n\nclass SelectAdapter(WidgetAdapter):\n js_constructor = \"wagtail.widgets.Select\"\n\n\nregister(SelectAdapter(), forms.Select)\n\n\nclass ValidationErrorAdapter(Adapter):\n js_constructor = \"wagtail.errors.ValidationError\"\n\n def js_args(self, error):\n return [\n error.messages,\n ]\n\n @cached_property\n def media(self):\n return forms.Media(\n js=[\n versioned_static(\"wagtailadmin/js/telepath/widgets.js\"),\n ]\n )\n\n\nregister(ValidationErrorAdapter(), ValidationError)\n", "path": "wagtail/widget_adapters.py"}]} | 1,712 | 137 |
gh_patches_debug_14443 | rasdani/github-patches | git_diff | aws__aws-cli-4231 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check if AWS::Include location is a string before packacing. Fixes #4087
Fixes #4087
If the value of Location in AWS::Include is *not* a string, we should skip
packaging it. This can happen if customers use, say, an intrinsic function
to construct the Include location:
Example:
```
AWS::Include:
Location:
Fn::Sub: "${S3Bucket}/file.txt"
```
*Issue #, if available:*
*Description of changes:*
By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import codecs
3 import os.path
4 import re
5 import sys
6
7 from setuptools import setup, find_packages
8
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12
13 def read(*parts):
14 return codecs.open(os.path.join(here, *parts), 'r').read()
15
16
17 def find_version(*file_paths):
18 version_file = read(*file_paths)
19 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
20 version_file, re.M)
21 if version_match:
22 return version_match.group(1)
23 raise RuntimeError("Unable to find version string.")
24
25
26 requires = ['botocore==1.12.165',
27 'colorama>=0.2.5,<=0.3.9',
28 'docutils>=0.10',
29 'rsa>=3.1.2,<=3.5.0',
30 's3transfer>=0.2.0,<0.3.0',
31 'PyYAML>=3.10,<=3.13']
32
33
34 if sys.version_info[:2] == (2, 6):
35 # For python2.6 we have to require argparse since it
36 # was not in stdlib until 2.7.
37 requires.append('argparse>=1.1')
38
39
40 setup_options = dict(
41 name='awscli',
42 version=find_version("awscli", "__init__.py"),
43 description='Universal Command Line Environment for AWS.',
44 long_description=read('README.rst'),
45 author='Amazon Web Services',
46 url='http://aws.amazon.com/cli/',
47 scripts=['bin/aws', 'bin/aws.cmd',
48 'bin/aws_completer', 'bin/aws_zsh_completer.sh',
49 'bin/aws_bash_completer'],
50 packages=find_packages(exclude=['tests*']),
51 package_data={'awscli': ['data/*.json', 'examples/*/*.rst',
52 'examples/*/*/*.rst', 'topics/*.rst',
53 'topics/*.json']},
54 install_requires=requires,
55 extras_require={
56 ':python_version=="2.6"': [
57 'argparse>=1.1',
58 ]
59 },
60 license="Apache License 2.0",
61 classifiers=[
62 'Development Status :: 5 - Production/Stable',
63 'Intended Audience :: Developers',
64 'Intended Audience :: System Administrators',
65 'Natural Language :: English',
66 'License :: OSI Approved :: Apache Software License',
67 'Programming Language :: Python',
68 'Programming Language :: Python :: 2',
69 'Programming Language :: Python :: 2.6',
70 'Programming Language :: Python :: 2.7',
71 'Programming Language :: Python :: 3',
72 'Programming Language :: Python :: 3.3',
73 'Programming Language :: Python :: 3.4',
74 'Programming Language :: Python :: 3.5',
75 'Programming Language :: Python :: 3.6',
76 'Programming Language :: Python :: 3.7',
77 ],
78 )
79
80 if 'py2exe' in sys.argv:
81 # This will actually give us a py2exe command.
82 import py2exe
83 # And we have some py2exe specific options.
84 setup_options['options'] = {
85 'py2exe': {
86 'optimize': 0,
87 'skip_archive': True,
88 'dll_excludes': ['crypt32.dll'],
89 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',
90 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],
91 }
92 }
93 setup_options['console'] = ['bin/aws']
94
95
96 setup(**setup_options)
97
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,8 +27,7 @@
'colorama>=0.2.5,<=0.3.9',
'docutils>=0.10',
'rsa>=3.1.2,<=3.5.0',
- 's3transfer>=0.2.0,<0.3.0',
- 'PyYAML>=3.10,<=3.13']
+ 's3transfer>=0.2.0,<0.3.0']
if sys.version_info[:2] == (2, 6):
@@ -36,6 +35,12 @@
# was not in stdlib until 2.7.
requires.append('argparse>=1.1')
+ # For Python 2.6, we have to require a different verion of PyYAML since the latest
+ # versions dropped support for Python 2.6.
+ requires.append('PyYAML>=3.10,<=3.13')
+else:
+ requires.append('PyYAML>=3.10,<=5.1')
+
setup_options = dict(
name='awscli',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,8 +27,7 @@\n 'colorama>=0.2.5,<=0.3.9',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n- 's3transfer>=0.2.0,<0.3.0',\n- 'PyYAML>=3.10,<=3.13']\n+ 's3transfer>=0.2.0,<0.3.0']\n \n \n if sys.version_info[:2] == (2, 6):\n@@ -36,6 +35,12 @@\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n \n+ # For Python 2.6, we have to require a different verion of PyYAML since the latest\n+ # versions dropped support for Python 2.6.\n+ requires.append('PyYAML>=3.10,<=3.13')\n+else:\n+ requires.append('PyYAML>=3.10,<=5.1')\n+\n \n setup_options = dict(\n name='awscli',\n", "issue": "Check if AWS::Include location is a string before packacing. Fixes #4087\nFixes #4087 \r\n\r\nIf the value of Location in AWS::Include is *not* a string, we should skip\r\npackaging it. This can happen if customers use, say, an intrinsic function\r\nto construct the Include location:\r\n\r\nExample:\r\n```\r\nAWS::Include:\r\n Location:\r\n Fn::Sub: \"${S3Bucket}/file.txt\"\r\n```\r\n\r\n*Issue #, if available:*\r\n\r\n*Description of changes:*\r\n\r\n\r\nBy submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.12.165',\n 'colorama>=0.2.5,<=0.3.9',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.2.0,<0.3.0',\n 'PyYAML>=3.10,<=3.13']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=read('README.rst'),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}]} | 1,645 | 281 |
gh_patches_debug_14119 | rasdani/github-patches | git_diff | buildbot__buildbot-3918 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Transferring Files with LocalWorker seems not working
This ticket is a migrated Trac ticket [3628](http://trac.buildbot.net/ticket/3628)
People contributed to the original ticket: @unknown_contributor, @sa2ajj
Ticket created on: `Oct 20 2016`
Ticket last modified on: `Oct 27 2016`
---
Hi,
I just update my buildbot to 0.9. And i convert my old Slave to [[LocalWorker]].
But now my builds are stuck during the copy process "DirectoryUpload" or "!Multiple[[FileUpload]]"
I check the `twistd.log` and it looks like the first light files are copied but then the copy is stuck during a transfer of 5MB. The file has always a size of 3227648o.
Thanks for your help
---
</issue>
<code>
[start of master/buildbot/worker/protocols/null.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 from __future__ import absolute_import
17 from __future__ import division
18 from __future__ import print_function
19
20 from twisted.internet import defer
21 from twisted.python import log
22
23 from buildbot.worker.protocols import base
24
25
26 class Listener(base.Listener):
27 pass
28
29
30 class ProxyMixin():
31
32 def __init__(self, impl):
33 assert isinstance(impl, self.ImplClass)
34 self.impl = impl
35 self._disconnect_listeners = []
36
37 def callRemote(self, message, *args, **kw):
38 method = getattr(self.impl, "remote_%s" % message, None)
39 if method is None:
40 raise AttributeError("No such method: remote_%s" % (message,))
41 try:
42 state = method(*args, **kw)
43 except TypeError:
44 log.msg("%s didn't accept %s and %s" % (method, args, kw))
45 raise
46 return defer.maybeDeferred(lambda: state)
47
48 def notifyOnDisconnect(self, cb):
49 pass
50
51 def dontNotifyOnDisconnect(self, cb):
52 pass
53
54
55 # just add ProxyMixin capability to the RemoteCommandProxy
56 # so that callers of callRemote actually directly call the proper method
57 class RemoteCommandProxy(ProxyMixin):
58 ImplClass = base.RemoteCommandImpl
59
60
61 class FileReaderProxy(ProxyMixin):
62 ImplClass = base.FileReaderImpl
63
64
65 class FileWriterProxy(ProxyMixin):
66 ImplClass = base.FileWriterImpl
67
68
69 class Connection(base.Connection):
70 proxies = {base.FileWriterImpl: FileWriterProxy,
71 base.FileReaderImpl: FileReaderProxy}
72
73 def loseConnection(self):
74 pass
75
76 def remotePrint(self, message):
77 return defer.maybeDeferred(self.worker.bot.remote_print, message)
78
79 def remoteGetWorkerInfo(self):
80 return defer.maybeDeferred(self.worker.bot.remote_getWorkerInfo)
81
82 def remoteSetBuilderList(self, builders):
83 return defer.maybeDeferred(self.worker.bot.remote_setBuilderList, builders)
84
85 def remoteStartCommand(self, remoteCommand, builderName, commandId, commandName, args):
86 remoteCommand = RemoteCommandProxy(remoteCommand)
87 args = self.createArgsProxies(args)
88 workerforbuilder = self.worker.bot.builders[builderName]
89 return defer.maybeDeferred(workerforbuilder.remote_startCommand, remoteCommand,
90 commandId, commandName, args)
91
92 def remoteShutdown(self):
93 return defer.maybeDeferred(self.worker.stopService)
94
95 def remoteStartBuild(self, builderName):
96 return defer.succeed(self.worker.bot.builders[builderName].remote_startBuild())
97
98 def remoteInterruptCommand(self, builderName, commandId, why):
99 workerforbuilder = self.worker.bot.builders[builderName]
100 return defer.maybeDeferred(workerforbuilder.remote_interruptCommand, commandId, why)
101
[end of master/buildbot/worker/protocols/null.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/master/buildbot/worker/protocols/null.py b/master/buildbot/worker/protocols/null.py
--- a/master/buildbot/worker/protocols/null.py
+++ b/master/buildbot/worker/protocols/null.py
@@ -20,6 +20,7 @@
from twisted.internet import defer
from twisted.python import log
+from buildbot.util.eventual import fireEventually
from buildbot.worker.protocols import base
@@ -43,7 +44,8 @@
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
- return defer.maybeDeferred(lambda: state)
+ # break callback recursion for large transfers by using fireEventually
+ return fireEventually(state)
def notifyOnDisconnect(self, cb):
pass
| {"golden_diff": "diff --git a/master/buildbot/worker/protocols/null.py b/master/buildbot/worker/protocols/null.py\n--- a/master/buildbot/worker/protocols/null.py\n+++ b/master/buildbot/worker/protocols/null.py\n@@ -20,6 +20,7 @@\n from twisted.internet import defer\n from twisted.python import log\n \n+from buildbot.util.eventual import fireEventually\n from buildbot.worker.protocols import base\n \n \n@@ -43,7 +44,8 @@\n except TypeError:\n log.msg(\"%s didn't accept %s and %s\" % (method, args, kw))\n raise\n- return defer.maybeDeferred(lambda: state)\n+ # break callback recursion for large transfers by using fireEventually\n+ return fireEventually(state)\n \n def notifyOnDisconnect(self, cb):\n pass\n", "issue": "Transferring Files with LocalWorker seems not working\nThis ticket is a migrated Trac ticket [3628](http://trac.buildbot.net/ticket/3628)\n\nPeople contributed to the original ticket: @unknown_contributor, @sa2ajj\nTicket created on: `Oct 20 2016`\nTicket last modified on: `Oct 27 2016`\n\n---\n\nHi,\n\nI just update my buildbot to 0.9. And i convert my old Slave to [[LocalWorker]].\n\nBut now my builds are stuck during the copy process \"DirectoryUpload\" or \"!Multiple[[FileUpload]]\"\n\nI check the `twistd.log` and it looks like the first light files are copied but then the copy is stuck during a transfer of 5MB. The file has always a size of 3227648o.\n\nThanks for your help\n\n\n---\n\n\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nfrom buildbot.worker.protocols import base\n\n\nclass Listener(base.Listener):\n pass\n\n\nclass ProxyMixin():\n\n def __init__(self, impl):\n assert isinstance(impl, self.ImplClass)\n self.impl = impl\n self._disconnect_listeners = []\n\n def callRemote(self, message, *args, **kw):\n method = getattr(self.impl, \"remote_%s\" % message, None)\n if method is None:\n raise AttributeError(\"No such method: remote_%s\" % (message,))\n try:\n state = method(*args, **kw)\n except TypeError:\n log.msg(\"%s didn't accept %s and %s\" % (method, args, kw))\n raise\n return defer.maybeDeferred(lambda: state)\n\n def notifyOnDisconnect(self, cb):\n pass\n\n def dontNotifyOnDisconnect(self, cb):\n pass\n\n\n# just add ProxyMixin capability to the RemoteCommandProxy\n# so that callers of callRemote actually directly call the proper method\nclass RemoteCommandProxy(ProxyMixin):\n ImplClass = base.RemoteCommandImpl\n\n\nclass FileReaderProxy(ProxyMixin):\n ImplClass = base.FileReaderImpl\n\n\nclass FileWriterProxy(ProxyMixin):\n ImplClass = base.FileWriterImpl\n\n\nclass Connection(base.Connection):\n proxies = {base.FileWriterImpl: FileWriterProxy,\n base.FileReaderImpl: FileReaderProxy}\n\n def loseConnection(self):\n pass\n\n def remotePrint(self, message):\n return defer.maybeDeferred(self.worker.bot.remote_print, message)\n\n def remoteGetWorkerInfo(self):\n return defer.maybeDeferred(self.worker.bot.remote_getWorkerInfo)\n\n def remoteSetBuilderList(self, builders):\n return defer.maybeDeferred(self.worker.bot.remote_setBuilderList, builders)\n\n def remoteStartCommand(self, remoteCommand, builderName, commandId, commandName, args):\n remoteCommand = RemoteCommandProxy(remoteCommand)\n args = self.createArgsProxies(args)\n workerforbuilder = self.worker.bot.builders[builderName]\n return defer.maybeDeferred(workerforbuilder.remote_startCommand, remoteCommand,\n commandId, commandName, args)\n\n def remoteShutdown(self):\n return defer.maybeDeferred(self.worker.stopService)\n\n def remoteStartBuild(self, builderName):\n return defer.succeed(self.worker.bot.builders[builderName].remote_startBuild())\n\n def remoteInterruptCommand(self, builderName, commandId, why):\n workerforbuilder = self.worker.bot.builders[builderName]\n return defer.maybeDeferred(workerforbuilder.remote_interruptCommand, commandId, why)\n", "path": "master/buildbot/worker/protocols/null.py"}]} | 1,694 | 180 |
gh_patches_debug_8612 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-2934 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
import pyqtgraph.canvas fails.
<!-- In the following, please describe your issue in detail! -->
<!-- If some sections do not apply, just remove them. -->
### Short description
Importing pyqtgraph.canvas causes the program to abort and exit with an error code..
### Code to reproduce
<!-- Please provide a minimal working example that reproduces the issue in the code block below.
Ideally, this should be a full example someone else could run without additional setup. -->
```python
print("Mark 0", flush=True)
import time
print("Mark 1", flush=True)
import pyqtgraph.canvas
print("Mark 2", flush=True)
time.sleep(10)
print("Mark 3", flush=True)
```
### Expected behavior
1. The program should print out all the markers four ,0 to 3, with a 10 seconds delay between marker 2 and 3.
2. The program should exit with status code 0 (ok)
### Real behavior
1. The program prints only markers 0 and 1.
2. The program exist with status code 127 (error)
### An error occurred?
No error messages were noticed. The program just exits when it tries to import pyqtgraph.canvas.
### Tested environment(s)
* PyQtGraph version: <!-- output of pyqtgraph.__version__ --> 0.13.3
* Qt Python binding: <!-- output of pyqtgraph.Qt.VERSION_INFO --> PyQt6 6.5.2 Qt 6.5.2
* Python version: 3.12.0
* NumPy version: <!-- output of numpy.__version__ --> 1.26.0
* Operating system: Windows 10, Ryzen
* Installation method: <!-- e.g. pip, conda, system packages, ... --> pip
### Additional context
The issue is discussed here in the pyinstaller repository https://github.com/pyinstaller/pyinstaller/issues/7991#issuecomment-1752032919
Swapping the two lines of CanvasManager as described the link above seems to fix the problem.
</issue>
<code>
[start of pyqtgraph/canvas/CanvasManager.py]
1 from ..Qt import QtCore, QtWidgets
2
3 if not hasattr(QtCore, 'Signal'):
4 QtCore.Signal = QtCore.pyqtSignal
5 import weakref
6
7
8 class CanvasManager(QtCore.QObject):
9 SINGLETON = None
10
11 sigCanvasListChanged = QtCore.Signal()
12
13 def __init__(self):
14 if CanvasManager.SINGLETON is not None:
15 raise Exception("Can only create one canvas manager.")
16 CanvasManager.SINGLETON = self
17 QtCore.QObject.__init__(self)
18 self.canvases = weakref.WeakValueDictionary()
19
20 @classmethod
21 def instance(cls):
22 return CanvasManager.SINGLETON
23
24 def registerCanvas(self, canvas, name):
25 n2 = name
26 i = 0
27 while n2 in self.canvases:
28 n2 = "%s_%03d" % (name, i)
29 i += 1
30 self.canvases[n2] = canvas
31 self.sigCanvasListChanged.emit()
32 return n2
33
34 def unregisterCanvas(self, name):
35 c = self.canvases[name]
36 del self.canvases[name]
37 self.sigCanvasListChanged.emit()
38
39 def listCanvases(self):
40 return list(self.canvases.keys())
41
42 def getCanvas(self, name):
43 return self.canvases[name]
44
45
46 manager = CanvasManager()
47
48
49 class CanvasCombo(QtWidgets.QComboBox):
50 def __init__(self, parent=None):
51 QtWidgets.QComboBox.__init__(self, parent)
52 man = CanvasManager.instance()
53 man.sigCanvasListChanged.connect(self.updateCanvasList)
54 self.hostName = None
55 self.updateCanvasList()
56
57 def updateCanvasList(self):
58 canvases = CanvasManager.instance().listCanvases()
59 canvases.insert(0, "")
60 if self.hostName in canvases:
61 canvases.remove(self.hostName)
62
63 sel = self.currentText()
64 if sel in canvases:
65 self.blockSignals(True) ## change does not affect current selection; block signals during update
66 self.clear()
67 for i in canvases:
68 self.addItem(i)
69 if i == sel:
70 self.setCurrentIndex(self.count())
71
72 self.blockSignals(False)
73
74 def setHostName(self, name):
75 self.hostName = name
76 self.updateCanvasList()
77
[end of pyqtgraph/canvas/CanvasManager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyqtgraph/canvas/CanvasManager.py b/pyqtgraph/canvas/CanvasManager.py
--- a/pyqtgraph/canvas/CanvasManager.py
+++ b/pyqtgraph/canvas/CanvasManager.py
@@ -13,8 +13,9 @@
def __init__(self):
if CanvasManager.SINGLETON is not None:
raise Exception("Can only create one canvas manager.")
- CanvasManager.SINGLETON = self
+ # It is important to save SINGLETON *after* the call to QObject.__init__, see #2838.
QtCore.QObject.__init__(self)
+ CanvasManager.SINGLETON = self
self.canvases = weakref.WeakValueDictionary()
@classmethod
| {"golden_diff": "diff --git a/pyqtgraph/canvas/CanvasManager.py b/pyqtgraph/canvas/CanvasManager.py\n--- a/pyqtgraph/canvas/CanvasManager.py\n+++ b/pyqtgraph/canvas/CanvasManager.py\n@@ -13,8 +13,9 @@\n def __init__(self):\n if CanvasManager.SINGLETON is not None:\n raise Exception(\"Can only create one canvas manager.\")\n- CanvasManager.SINGLETON = self\n+ # It is important to save SINGLETON *after* the call to QObject.__init__, see #2838.\n QtCore.QObject.__init__(self)\n+ CanvasManager.SINGLETON = self\n self.canvases = weakref.WeakValueDictionary()\n \n @classmethod\n", "issue": "import pyqtgraph.canvas fails.\n<!-- In the following, please describe your issue in detail! -->\r\n<!-- If some sections do not apply, just remove them. -->\r\n\r\n### Short description\r\nImporting pyqtgraph.canvas causes the program to abort and exit with an error code..\r\n\r\n### Code to reproduce\r\n<!-- Please provide a minimal working example that reproduces the issue in the code block below.\r\n Ideally, this should be a full example someone else could run without additional setup. -->\r\n\r\n```python\r\nprint(\"Mark 0\", flush=True)\r\nimport time\r\nprint(\"Mark 1\", flush=True)\r\nimport pyqtgraph.canvas\r\nprint(\"Mark 2\", flush=True)\r\ntime.sleep(10)\r\nprint(\"Mark 3\", flush=True)\r\n\r\n```\r\n\r\n### Expected behavior\r\n1. The program should print out all the markers four ,0 to 3, with a 10 seconds delay between marker 2 and 3.\r\n2. The program should exit with status code 0 (ok)\r\n\r\n\r\n### Real behavior\r\n1. The program prints only markers 0 and 1.\r\n2. The program exist with status code 127 (error)\r\n\r\n### An error occurred?\r\nNo error messages were noticed. The program just exits when it tries to import pyqtgraph.canvas.\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: <!-- output of pyqtgraph.__version__ --> 0.13.3\r\n * Qt Python binding: <!-- output of pyqtgraph.Qt.VERSION_INFO --> PyQt6 6.5.2 Qt 6.5.2\r\n * Python version: 3.12.0\r\n * NumPy version: <!-- output of numpy.__version__ --> 1.26.0\r\n * Operating system: Windows 10, Ryzen\r\n * Installation method: <!-- e.g. pip, conda, system packages, ... --> pip\r\n\r\n### Additional context\r\n\r\nThe issue is discussed here in the pyinstaller repository https://github.com/pyinstaller/pyinstaller/issues/7991#issuecomment-1752032919\r\n\r\nSwapping the two lines of CanvasManager as described the link above seems to fix the problem.\r\n\n", "before_files": [{"content": "from ..Qt import QtCore, QtWidgets\n\nif not hasattr(QtCore, 'Signal'):\n QtCore.Signal = QtCore.pyqtSignal\nimport weakref\n\n\nclass CanvasManager(QtCore.QObject):\n SINGLETON = None\n \n sigCanvasListChanged = QtCore.Signal()\n \n def __init__(self):\n if CanvasManager.SINGLETON is not None:\n raise Exception(\"Can only create one canvas manager.\")\n CanvasManager.SINGLETON = self\n QtCore.QObject.__init__(self)\n self.canvases = weakref.WeakValueDictionary()\n\n @classmethod\n def instance(cls):\n return CanvasManager.SINGLETON\n \n def registerCanvas(self, canvas, name):\n n2 = name\n i = 0\n while n2 in self.canvases:\n n2 = \"%s_%03d\" % (name, i)\n i += 1\n self.canvases[n2] = canvas\n self.sigCanvasListChanged.emit()\n return n2\n \n def unregisterCanvas(self, name):\n c = self.canvases[name]\n del self.canvases[name]\n self.sigCanvasListChanged.emit()\n \n def listCanvases(self):\n return list(self.canvases.keys())\n \n def getCanvas(self, name):\n return self.canvases[name]\n \n \nmanager = CanvasManager()\n\n\nclass CanvasCombo(QtWidgets.QComboBox):\n def __init__(self, parent=None):\n QtWidgets.QComboBox.__init__(self, parent)\n man = CanvasManager.instance()\n man.sigCanvasListChanged.connect(self.updateCanvasList)\n self.hostName = None\n self.updateCanvasList()\n \n def updateCanvasList(self):\n canvases = CanvasManager.instance().listCanvases()\n canvases.insert(0, \"\")\n if self.hostName in canvases:\n canvases.remove(self.hostName)\n \n sel = self.currentText()\n if sel in canvases:\n self.blockSignals(True) ## change does not affect current selection; block signals during update\n self.clear()\n for i in canvases:\n self.addItem(i)\n if i == sel:\n self.setCurrentIndex(self.count())\n \n self.blockSignals(False)\n \n def setHostName(self, name):\n self.hostName = name\n self.updateCanvasList()\n", "path": "pyqtgraph/canvas/CanvasManager.py"}]} | 1,631 | 164 |
gh_patches_debug_7900 | rasdani/github-patches | git_diff | vyperlang__vyper-3338 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
unable to use vyper-serve 0.3.6
### Version Information
* vyper Version (output of `vyper --version`): 0.3.6
* OS: ubutu
* Python Version 3.8
### What's your issue about?
unable to use the vyper-serve api endpoint, the call always fail with an error:
`Unable to json serialize IRNode`
</issue>
<code>
[start of vyper/cli/vyper_serve.py]
1 #!/usr/bin/env python3
2
3 import argparse
4 import json
5 import sys
6 from http.server import BaseHTTPRequestHandler, HTTPServer
7 from socketserver import ThreadingMixIn
8
9 import vyper
10 from vyper.codegen import ir_node
11 from vyper.evm.opcodes import DEFAULT_EVM_VERSION
12 from vyper.exceptions import VyperException
13
14
15 def _parse_cli_args():
16 return _parse_args(sys.argv[1:])
17
18
19 def _parse_args(argv):
20 parser = argparse.ArgumentParser(description="Serve Vyper compiler as an HTTP Service")
21 parser.add_argument(
22 "--version", action="version", version=f"{vyper.__version__}+commit{vyper.__commit__}"
23 )
24 parser.add_argument(
25 "-b",
26 help="Address to bind JSON server on, default: localhost:8000",
27 default="localhost:8000",
28 dest="bind_address",
29 )
30
31 args = parser.parse_args(argv)
32
33 if ":" in args.bind_address:
34 ir_node.VYPER_COLOR_OUTPUT = False
35 runserver(*args.bind_address.split(":"))
36 else:
37 print('Provide bind address in "{address}:{port}" format')
38
39
40 class VyperRequestHandler(BaseHTTPRequestHandler):
41 def send_404(self):
42 self.send_response(404)
43 self.end_headers()
44 return
45
46 def send_cors_all(self):
47 self.send_header("Access-Control-Allow-Origin", "*")
48 self.send_header("Access-Control-Allow-Headers", "X-Requested-With, Content-type")
49
50 def do_OPTIONS(self):
51 self.send_response(200)
52 self.send_cors_all()
53 self.end_headers()
54
55 def do_GET(self):
56 if self.path == "/":
57 self.send_response(200)
58 self.send_cors_all()
59 self.end_headers()
60 self.wfile.write(f"Vyper Compiler. Version: {vyper.__version__}\n".encode())
61 else:
62 self.send_404()
63
64 return
65
66 def do_POST(self):
67 if self.path == "/compile":
68 content_len = int(self.headers.get("content-length"))
69 post_body = self.rfile.read(content_len)
70 data = json.loads(post_body)
71
72 response, status_code = self._compile(data)
73
74 self.send_response(status_code)
75 self.send_header("Content-type", "application/json")
76 self.send_cors_all()
77 self.end_headers()
78 self.wfile.write(json.dumps(response).encode())
79
80 else:
81 self.send_404()
82
83 return
84
85 def _compile(self, data):
86 code = data.get("code")
87 if not code:
88 return {"status": "failed", "message": 'No "code" key supplied'}, 400
89 if not isinstance(code, str):
90 return {"status": "failed", "message": '"code" must be a non-empty string'}, 400
91
92 try:
93 code = data["code"]
94 out_dict = vyper.compile_codes(
95 {"": code},
96 list(vyper.compiler.OUTPUT_FORMATS.keys()),
97 evm_version=data.get("evm_version", DEFAULT_EVM_VERSION),
98 )[""]
99 out_dict["ir"] = str(out_dict["ir"])
100 except VyperException as e:
101 return (
102 {"status": "failed", "message": str(e), "column": e.col_offset, "line": e.lineno},
103 400,
104 )
105 except SyntaxError as e:
106 return (
107 {"status": "failed", "message": str(e), "column": e.offset, "line": e.lineno},
108 400,
109 )
110
111 out_dict.update({"status": "success"})
112
113 return out_dict, 200
114
115
116 class VyperHTTPServer(ThreadingMixIn, HTTPServer):
117 """Handle requests in a separate thread."""
118
119 pass
120
121
122 def runserver(host="", port=8000):
123 server_address = (host, int(port))
124 httpd = VyperHTTPServer(server_address, VyperRequestHandler)
125 print(f"Listening on http://{host}:{port}")
126 httpd.serve_forever()
127
[end of vyper/cli/vyper_serve.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vyper/cli/vyper_serve.py b/vyper/cli/vyper_serve.py
--- a/vyper/cli/vyper_serve.py
+++ b/vyper/cli/vyper_serve.py
@@ -97,6 +97,7 @@
evm_version=data.get("evm_version", DEFAULT_EVM_VERSION),
)[""]
out_dict["ir"] = str(out_dict["ir"])
+ out_dict["ir_runtime"] = str(out_dict["ir_runtime"])
except VyperException as e:
return (
{"status": "failed", "message": str(e), "column": e.col_offset, "line": e.lineno},
| {"golden_diff": "diff --git a/vyper/cli/vyper_serve.py b/vyper/cli/vyper_serve.py\n--- a/vyper/cli/vyper_serve.py\n+++ b/vyper/cli/vyper_serve.py\n@@ -97,6 +97,7 @@\n evm_version=data.get(\"evm_version\", DEFAULT_EVM_VERSION),\n )[\"\"]\n out_dict[\"ir\"] = str(out_dict[\"ir\"])\n+ out_dict[\"ir_runtime\"] = str(out_dict[\"ir_runtime\"])\n except VyperException as e:\n return (\n {\"status\": \"failed\", \"message\": str(e), \"column\": e.col_offset, \"line\": e.lineno},\n", "issue": "unable to use vyper-serve 0.3.6\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): 0.3.6\r\n* OS: ubutu\r\n* Python Version 3.8\r\n\r\n### What's your issue about?\r\n\r\nunable to use the vyper-serve api endpoint, the call always fail with an error:\r\n`Unable to json serialize IRNode`\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport sys\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom socketserver import ThreadingMixIn\n\nimport vyper\nfrom vyper.codegen import ir_node\nfrom vyper.evm.opcodes import DEFAULT_EVM_VERSION\nfrom vyper.exceptions import VyperException\n\n\ndef _parse_cli_args():\n return _parse_args(sys.argv[1:])\n\n\ndef _parse_args(argv):\n parser = argparse.ArgumentParser(description=\"Serve Vyper compiler as an HTTP Service\")\n parser.add_argument(\n \"--version\", action=\"version\", version=f\"{vyper.__version__}+commit{vyper.__commit__}\"\n )\n parser.add_argument(\n \"-b\",\n help=\"Address to bind JSON server on, default: localhost:8000\",\n default=\"localhost:8000\",\n dest=\"bind_address\",\n )\n\n args = parser.parse_args(argv)\n\n if \":\" in args.bind_address:\n ir_node.VYPER_COLOR_OUTPUT = False\n runserver(*args.bind_address.split(\":\"))\n else:\n print('Provide bind address in \"{address}:{port}\" format')\n\n\nclass VyperRequestHandler(BaseHTTPRequestHandler):\n def send_404(self):\n self.send_response(404)\n self.end_headers()\n return\n\n def send_cors_all(self):\n self.send_header(\"Access-Control-Allow-Origin\", \"*\")\n self.send_header(\"Access-Control-Allow-Headers\", \"X-Requested-With, Content-type\")\n\n def do_OPTIONS(self):\n self.send_response(200)\n self.send_cors_all()\n self.end_headers()\n\n def do_GET(self):\n if self.path == \"/\":\n self.send_response(200)\n self.send_cors_all()\n self.end_headers()\n self.wfile.write(f\"Vyper Compiler. Version: {vyper.__version__}\\n\".encode())\n else:\n self.send_404()\n\n return\n\n def do_POST(self):\n if self.path == \"/compile\":\n content_len = int(self.headers.get(\"content-length\"))\n post_body = self.rfile.read(content_len)\n data = json.loads(post_body)\n\n response, status_code = self._compile(data)\n\n self.send_response(status_code)\n self.send_header(\"Content-type\", \"application/json\")\n self.send_cors_all()\n self.end_headers()\n self.wfile.write(json.dumps(response).encode())\n\n else:\n self.send_404()\n\n return\n\n def _compile(self, data):\n code = data.get(\"code\")\n if not code:\n return {\"status\": \"failed\", \"message\": 'No \"code\" key supplied'}, 400\n if not isinstance(code, str):\n return {\"status\": \"failed\", \"message\": '\"code\" must be a non-empty string'}, 400\n\n try:\n code = data[\"code\"]\n out_dict = vyper.compile_codes(\n {\"\": code},\n list(vyper.compiler.OUTPUT_FORMATS.keys()),\n evm_version=data.get(\"evm_version\", DEFAULT_EVM_VERSION),\n )[\"\"]\n out_dict[\"ir\"] = str(out_dict[\"ir\"])\n except VyperException as e:\n return (\n {\"status\": \"failed\", \"message\": str(e), \"column\": e.col_offset, \"line\": e.lineno},\n 400,\n )\n except SyntaxError as e:\n return (\n {\"status\": \"failed\", \"message\": str(e), \"column\": e.offset, \"line\": e.lineno},\n 400,\n )\n\n out_dict.update({\"status\": \"success\"})\n\n return out_dict, 200\n\n\nclass VyperHTTPServer(ThreadingMixIn, HTTPServer):\n \"\"\"Handle requests in a separate thread.\"\"\"\n\n pass\n\n\ndef runserver(host=\"\", port=8000):\n server_address = (host, int(port))\n httpd = VyperHTTPServer(server_address, VyperRequestHandler)\n print(f\"Listening on http://{host}:{port}\")\n httpd.serve_forever()\n", "path": "vyper/cli/vyper_serve.py"}]} | 1,800 | 144 |
gh_patches_debug_530 | rasdani/github-patches | git_diff | craiga__will-of-the-prophets-35 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mobile-friendly layout
Sidebar should drop below the board once "mornhammered" becomes too wide for the window.
</issue>
<code>
[start of will_of_the_prophets/views.py]
1 """Views."""
2
3 from django.shortcuts import render
4 from django.views.generic.edit import CreateView
5 from django.contrib.auth.mixins import LoginRequiredMixin
6 from django.urls import reverse
7
8 from will_of_the_prophets import board, forms, models
9
10
11 def public_board(request):
12 """
13 Board for the public.
14
15 Does not take embargoed rolls into account.
16 """
17 special_square_types = models.SpecialSquareType.objects.all()
18 return render(request, 'will_of_the_prophets/public_board.html',
19 {'board': board.Board(),
20 'special_square_types': special_square_types})
21
22
23 class RollView(LoginRequiredMixin, CreateView):
24 """View for rolling the die."""
25
26 form_class = forms.RollForm
27 template_name = 'will_of_the_prophets/roll.html'
28
29 def get_context_data(self, **kwargs):
30 last_roll = models.Roll.objects.order_by('-embargo').first()
31 return super().get_context_data(
32 **kwargs,
33 last_roll=last_roll,
34 board=board.Board(now=last_roll.embargo),
35 special_square_types=models.SpecialSquareType.objects.all())
36
37 def get_success_url(self):
38 return reverse('roll')
39
[end of will_of_the_prophets/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/will_of_the_prophets/views.py b/will_of_the_prophets/views.py
--- a/will_of_the_prophets/views.py
+++ b/will_of_the_prophets/views.py
@@ -35,4 +35,4 @@
special_square_types=models.SpecialSquareType.objects.all())
def get_success_url(self):
- return reverse('roll')
+ return reverse('roll') + "#chula"
| {"golden_diff": "diff --git a/will_of_the_prophets/views.py b/will_of_the_prophets/views.py\n--- a/will_of_the_prophets/views.py\n+++ b/will_of_the_prophets/views.py\n@@ -35,4 +35,4 @@\n special_square_types=models.SpecialSquareType.objects.all())\n \n def get_success_url(self):\n- return reverse('roll')\n+ return reverse('roll') + \"#chula\"\n", "issue": "Mobile-friendly layout\nSidebar should drop below the board once \"mornhammered\" becomes too wide for the window.\n", "before_files": [{"content": "\"\"\"Views.\"\"\"\n\nfrom django.shortcuts import render\nfrom django.views.generic.edit import CreateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import reverse\n\nfrom will_of_the_prophets import board, forms, models\n\n\ndef public_board(request):\n \"\"\"\n Board for the public.\n\n Does not take embargoed rolls into account.\n \"\"\"\n special_square_types = models.SpecialSquareType.objects.all()\n return render(request, 'will_of_the_prophets/public_board.html',\n {'board': board.Board(),\n 'special_square_types': special_square_types})\n\n\nclass RollView(LoginRequiredMixin, CreateView):\n \"\"\"View for rolling the die.\"\"\"\n\n form_class = forms.RollForm\n template_name = 'will_of_the_prophets/roll.html'\n\n def get_context_data(self, **kwargs):\n last_roll = models.Roll.objects.order_by('-embargo').first()\n return super().get_context_data(\n **kwargs,\n last_roll=last_roll,\n board=board.Board(now=last_roll.embargo),\n special_square_types=models.SpecialSquareType.objects.all())\n\n def get_success_url(self):\n return reverse('roll')\n", "path": "will_of_the_prophets/views.py"}]} | 884 | 99 |
gh_patches_debug_23496 | rasdani/github-patches | git_diff | pypa__setuptools-2858 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FR] distutils.errors.* is not fully re-exported
### What's the problem this feature will solve?
The [Migration Advice in PEP 0632](https://www.python.org/dev/peps/pep-0632/#migration-advice) says that `setuptools` is a sufficient replacement for `distutils.errors`. However, only `DistutilsError` and `RemovedCommandError` exist there. In `distutils`, there are:
```
In [7]: [e for e in dir(distutils.errors) if e.endswith('Error')]
Out[7]:
['CCompilerError',
'CompileError',
'DistutilsArgError',
'DistutilsByteCompileError',
'DistutilsClassError',
'DistutilsError',
'DistutilsExecError',
'DistutilsFileError',
'DistutilsGetoptError',
'DistutilsInternalError',
'DistutilsModuleError',
'DistutilsOptionError',
'DistutilsPlatformError',
'DistutilsSetupError',
'DistutilsTemplateError',
'LibError',
'LinkError',
'PreprocessError',
'UnknownFileError']
```
While all of the `Distutils`-prefixed ones derive from `DistutilsError`, the ones without the prefix are derived from `CCompilerError`, and so cannot be caught using anything exported by `setuptools`. Concretely, this makes it difficult to check for compiler flags, as [done here](https://github.com/matplotlib/matplotlib/blob/69395749a4e588eade55b90dbf095549258f4b16/setup.py#L62-L72).
### Describe the solution you'd like
Re-export all `distutils.errors.*` in `setuptools.errors`.
### Alternative Solutions
Currently, we use `distutils.errors`, but won't be able to after Python 3.12. One can use a generic `Exception` too, but that's not recommended.
Or as a complete alternative, if `setuptools` had a way to check whether a compiler supported a flag in a builtin way, then we wouldn't need to catch the `CompileError` ourselves.
### Additional context
_No response_
### Code of Conduct
- [X] I agree to follow the PSF Code of Conduct
</issue>
<code>
[start of setuptools/errors.py]
1 """setuptools.errors
2
3 Provides exceptions used by setuptools modules.
4 """
5
6 from distutils.errors import DistutilsError
7
8
9 class RemovedCommandError(DistutilsError, RuntimeError):
10 """Error used for commands that have been removed in setuptools.
11
12 Since ``setuptools`` is built on ``distutils``, simply removing a command
13 from ``setuptools`` will make the behavior fall back to ``distutils``; this
14 error is raised if a command exists in ``distutils`` but has been actively
15 removed in ``setuptools``.
16 """
17
[end of setuptools/errors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setuptools/errors.py b/setuptools/errors.py
--- a/setuptools/errors.py
+++ b/setuptools/errors.py
@@ -3,6 +3,7 @@
Provides exceptions used by setuptools modules.
"""
+from distutils import errors as _distutils_errors
from distutils.errors import DistutilsError
@@ -14,3 +15,26 @@
error is raised if a command exists in ``distutils`` but has been actively
removed in ``setuptools``.
"""
+
+
+# Re-export errors from distutils to facilitate the migration to PEP632
+
+ByteCompileError = _distutils_errors.DistutilsByteCompileError
+CCompilerError = _distutils_errors.CCompilerError
+ClassError = _distutils_errors.DistutilsClassError
+CompileError = _distutils_errors.CompileError
+ExecError = _distutils_errors.DistutilsExecError
+FileError = _distutils_errors.DistutilsFileError
+InternalError = _distutils_errors.DistutilsInternalError
+LibError = _distutils_errors.LibError
+LinkError = _distutils_errors.LinkError
+ModuleError = _distutils_errors.DistutilsModuleError
+OptionError = _distutils_errors.DistutilsOptionError
+PlatformError = _distutils_errors.DistutilsPlatformError
+PreprocessError = _distutils_errors.PreprocessError
+SetupError = _distutils_errors.DistutilsSetupError
+TemplateError = _distutils_errors.DistutilsTemplateError
+UnknownFileError = _distutils_errors.UnknownFileError
+
+# The root error class in the hierarchy
+BaseError = _distutils_errors.DistutilsError
| {"golden_diff": "diff --git a/setuptools/errors.py b/setuptools/errors.py\n--- a/setuptools/errors.py\n+++ b/setuptools/errors.py\n@@ -3,6 +3,7 @@\n Provides exceptions used by setuptools modules.\n \"\"\"\n \n+from distutils import errors as _distutils_errors\n from distutils.errors import DistutilsError\n \n \n@@ -14,3 +15,26 @@\n error is raised if a command exists in ``distutils`` but has been actively\n removed in ``setuptools``.\n \"\"\"\n+\n+\n+# Re-export errors from distutils to facilitate the migration to PEP632\n+\n+ByteCompileError = _distutils_errors.DistutilsByteCompileError\n+CCompilerError = _distutils_errors.CCompilerError\n+ClassError = _distutils_errors.DistutilsClassError\n+CompileError = _distutils_errors.CompileError\n+ExecError = _distutils_errors.DistutilsExecError\n+FileError = _distutils_errors.DistutilsFileError\n+InternalError = _distutils_errors.DistutilsInternalError\n+LibError = _distutils_errors.LibError\n+LinkError = _distutils_errors.LinkError\n+ModuleError = _distutils_errors.DistutilsModuleError\n+OptionError = _distutils_errors.DistutilsOptionError\n+PlatformError = _distutils_errors.DistutilsPlatformError\n+PreprocessError = _distutils_errors.PreprocessError\n+SetupError = _distutils_errors.DistutilsSetupError\n+TemplateError = _distutils_errors.DistutilsTemplateError\n+UnknownFileError = _distutils_errors.UnknownFileError\n+\n+# The root error class in the hierarchy\n+BaseError = _distutils_errors.DistutilsError\n", "issue": "[FR] distutils.errors.* is not fully re-exported\n### What's the problem this feature will solve?\n\nThe [Migration Advice in PEP 0632](https://www.python.org/dev/peps/pep-0632/#migration-advice) says that `setuptools` is a sufficient replacement for `distutils.errors`. However, only `DistutilsError` and `RemovedCommandError` exist there. In `distutils`, there are:\r\n```\r\nIn [7]: [e for e in dir(distutils.errors) if e.endswith('Error')]\r\nOut[7]: \r\n['CCompilerError',\r\n 'CompileError',\r\n 'DistutilsArgError',\r\n 'DistutilsByteCompileError',\r\n 'DistutilsClassError',\r\n 'DistutilsError',\r\n 'DistutilsExecError',\r\n 'DistutilsFileError',\r\n 'DistutilsGetoptError',\r\n 'DistutilsInternalError',\r\n 'DistutilsModuleError',\r\n 'DistutilsOptionError',\r\n 'DistutilsPlatformError',\r\n 'DistutilsSetupError',\r\n 'DistutilsTemplateError',\r\n 'LibError',\r\n 'LinkError',\r\n 'PreprocessError',\r\n 'UnknownFileError']\r\n```\r\nWhile all of the `Distutils`-prefixed ones derive from `DistutilsError`, the ones without the prefix are derived from `CCompilerError`, and so cannot be caught using anything exported by `setuptools`. Concretely, this makes it difficult to check for compiler flags, as [done here](https://github.com/matplotlib/matplotlib/blob/69395749a4e588eade55b90dbf095549258f4b16/setup.py#L62-L72).\n\n### Describe the solution you'd like\n\nRe-export all `distutils.errors.*` in `setuptools.errors`.\n\n### Alternative Solutions\n\nCurrently, we use `distutils.errors`, but won't be able to after Python 3.12. One can use a generic `Exception` too, but that's not recommended.\r\n\r\nOr as a complete alternative, if `setuptools` had a way to check whether a compiler supported a flag in a builtin way, then we wouldn't need to catch the `CompileError` ourselves.\n\n### Additional context\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow the PSF Code of Conduct\n", "before_files": [{"content": "\"\"\"setuptools.errors\n\nProvides exceptions used by setuptools modules.\n\"\"\"\n\nfrom distutils.errors import DistutilsError\n\n\nclass RemovedCommandError(DistutilsError, RuntimeError):\n \"\"\"Error used for commands that have been removed in setuptools.\n\n Since ``setuptools`` is built on ``distutils``, simply removing a command\n from ``setuptools`` will make the behavior fall back to ``distutils``; this\n error is raised if a command exists in ``distutils`` but has been actively\n removed in ``setuptools``.\n \"\"\"\n", "path": "setuptools/errors.py"}]} | 1,165 | 368 |
gh_patches_debug_1141 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-2280 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] cannot save Mail Rule with "mail and attachment as seperate documents" in 1.11.1
Maybe it's just me, but I cannot save Mail Rule with "mail and attachment as seperate documents".
_Originally posted by @Limerick-gh in https://github.com/paperless-ngx/paperless-ngx/discussions/2265#discussioncomment-4557234_
[Bug] Missing consumption scope options in frontend
### Discussed in https://github.com/paperless-ngx/paperless-ngx/discussions/2265
<div type='discussions-op-text'>
<sup>Originally posted by **morremeyer** December 30, 2022</sup>
With #2000, frontend configuration for mail consumption was added.
With #848, at about the same time, email body & .eml file consumption was added.
#848 added the **consumption scope** for email consumption (see https://github.com/p-h-a-i-l/paperless-ngx/blob/0fda35723d62275a5beb783cbf9061d4d4a15703/src/paperless_mail/models.py#L59-L65) to decide between consuming:
* only the attachments
* the full email as .eml
* the full email as .eml **and** the attachments
The **consumption scope** is not yet configurable on the frontend. I'd be really happy if it were configurable in the frontend in a future version.
I'm pretty sure someone already has that planned, but I couldn't find an issue or discussion for it, so I'm opening this one to track this request.</div>
</issue>
<code>
[start of src/paperless_mail/serialisers.py]
1 from documents.serialisers import CorrespondentField
2 from documents.serialisers import DocumentTypeField
3 from documents.serialisers import TagsField
4 from paperless_mail.models import MailAccount
5 from paperless_mail.models import MailRule
6 from rest_framework import serializers
7
8
9 class ObfuscatedPasswordField(serializers.Field):
10 """
11 Sends *** string instead of password in the clear
12 """
13
14 def to_representation(self, value):
15 return "*" * len(value)
16
17 def to_internal_value(self, data):
18 return data
19
20
21 class MailAccountSerializer(serializers.ModelSerializer):
22 password = ObfuscatedPasswordField()
23
24 class Meta:
25 model = MailAccount
26 depth = 1
27 fields = [
28 "id",
29 "name",
30 "imap_server",
31 "imap_port",
32 "imap_security",
33 "username",
34 "password",
35 "character_set",
36 ]
37
38 def update(self, instance, validated_data):
39 if "password" in validated_data:
40 if len(validated_data.get("password").replace("*", "")) == 0:
41 validated_data.pop("password")
42 super().update(instance, validated_data)
43 return instance
44
45 def create(self, validated_data):
46 mail_account = MailAccount.objects.create(**validated_data)
47 return mail_account
48
49
50 class AccountField(serializers.PrimaryKeyRelatedField):
51 def get_queryset(self):
52 return MailAccount.objects.all().order_by("-id")
53
54
55 class MailRuleSerializer(serializers.ModelSerializer):
56 account = AccountField(required=True)
57 action_parameter = serializers.CharField(
58 allow_null=True,
59 required=False,
60 default="",
61 )
62 assign_correspondent = CorrespondentField(allow_null=True, required=False)
63 assign_tags = TagsField(many=True, allow_null=True, required=False)
64 assign_document_type = DocumentTypeField(allow_null=True, required=False)
65 order = serializers.IntegerField(required=False)
66
67 class Meta:
68 model = MailRule
69 depth = 1
70 fields = [
71 "id",
72 "name",
73 "account",
74 "folder",
75 "filter_from",
76 "filter_subject",
77 "filter_body",
78 "filter_attachment_filename",
79 "maximum_age",
80 "action",
81 "action_parameter",
82 "assign_title_from",
83 "assign_tags",
84 "assign_correspondent_from",
85 "assign_correspondent",
86 "assign_document_type",
87 "order",
88 "attachment_type",
89 ]
90
91 def update(self, instance, validated_data):
92 super().update(instance, validated_data)
93 return instance
94
95 def create(self, validated_data):
96 if "assign_tags" in validated_data:
97 assign_tags = validated_data.pop("assign_tags")
98 mail_rule = MailRule.objects.create(**validated_data)
99 if assign_tags:
100 mail_rule.assign_tags.set(assign_tags)
101 return mail_rule
102
103 def validate(self, attrs):
104 if (
105 attrs["action"] == MailRule.MailAction.TAG
106 or attrs["action"] == MailRule.MailAction.MOVE
107 ) and attrs["action_parameter"] is None:
108 raise serializers.ValidationError("An action parameter is required.")
109
110 return attrs
111
[end of src/paperless_mail/serialisers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/paperless_mail/serialisers.py b/src/paperless_mail/serialisers.py
--- a/src/paperless_mail/serialisers.py
+++ b/src/paperless_mail/serialisers.py
@@ -86,6 +86,7 @@
"assign_document_type",
"order",
"attachment_type",
+ "consumption_scope",
]
def update(self, instance, validated_data):
| {"golden_diff": "diff --git a/src/paperless_mail/serialisers.py b/src/paperless_mail/serialisers.py\n--- a/src/paperless_mail/serialisers.py\n+++ b/src/paperless_mail/serialisers.py\n@@ -86,6 +86,7 @@\n \"assign_document_type\",\n \"order\",\n \"attachment_type\",\n+ \"consumption_scope\",\n ]\n \n def update(self, instance, validated_data):\n", "issue": "[Bug] cannot save Mail Rule with \"mail and attachment as seperate documents\" in 1.11.1\n Maybe it's just me, but I cannot save Mail Rule with \"mail and attachment as seperate documents\".\n\n_Originally posted by @Limerick-gh in https://github.com/paperless-ngx/paperless-ngx/discussions/2265#discussioncomment-4557234_\n \n[Bug] Missing consumption scope options in frontend\n### Discussed in https://github.com/paperless-ngx/paperless-ngx/discussions/2265\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **morremeyer** December 30, 2022</sup>\r\nWith #2000, frontend configuration for mail consumption was added.\r\nWith #848, at about the same time, email body & .eml file consumption was added.\r\n\r\n#848 added the **consumption scope** for email consumption (see https://github.com/p-h-a-i-l/paperless-ngx/blob/0fda35723d62275a5beb783cbf9061d4d4a15703/src/paperless_mail/models.py#L59-L65) to decide between consuming:\r\n\r\n* only the attachments\r\n* the full email as .eml\r\n* the full email as .eml **and** the attachments\r\n\r\nThe **consumption scope** is not yet configurable on the frontend. I'd be really happy if it were configurable in the frontend in a future version.\r\n\r\nI'm pretty sure someone already has that planned, but I couldn't find an issue or discussion for it, so I'm opening this one to track this request.</div>\n", "before_files": [{"content": "from documents.serialisers import CorrespondentField\nfrom documents.serialisers import DocumentTypeField\nfrom documents.serialisers import TagsField\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\nfrom rest_framework import serializers\n\n\nclass ObfuscatedPasswordField(serializers.Field):\n \"\"\"\n Sends *** string instead of password in the clear\n \"\"\"\n\n def to_representation(self, value):\n return \"*\" * len(value)\n\n def to_internal_value(self, data):\n return data\n\n\nclass MailAccountSerializer(serializers.ModelSerializer):\n password = ObfuscatedPasswordField()\n\n class Meta:\n model = MailAccount\n depth = 1\n fields = [\n \"id\",\n \"name\",\n \"imap_server\",\n \"imap_port\",\n \"imap_security\",\n \"username\",\n \"password\",\n \"character_set\",\n ]\n\n def update(self, instance, validated_data):\n if \"password\" in validated_data:\n if len(validated_data.get(\"password\").replace(\"*\", \"\")) == 0:\n validated_data.pop(\"password\")\n super().update(instance, validated_data)\n return instance\n\n def create(self, validated_data):\n mail_account = MailAccount.objects.create(**validated_data)\n return mail_account\n\n\nclass AccountField(serializers.PrimaryKeyRelatedField):\n def get_queryset(self):\n return MailAccount.objects.all().order_by(\"-id\")\n\n\nclass MailRuleSerializer(serializers.ModelSerializer):\n account = AccountField(required=True)\n action_parameter = serializers.CharField(\n allow_null=True,\n required=False,\n default=\"\",\n )\n assign_correspondent = CorrespondentField(allow_null=True, required=False)\n assign_tags = TagsField(many=True, allow_null=True, required=False)\n assign_document_type = DocumentTypeField(allow_null=True, required=False)\n order = serializers.IntegerField(required=False)\n\n class Meta:\n model = MailRule\n depth = 1\n fields = [\n \"id\",\n \"name\",\n \"account\",\n \"folder\",\n \"filter_from\",\n \"filter_subject\",\n \"filter_body\",\n \"filter_attachment_filename\",\n \"maximum_age\",\n \"action\",\n \"action_parameter\",\n \"assign_title_from\",\n \"assign_tags\",\n \"assign_correspondent_from\",\n \"assign_correspondent\",\n \"assign_document_type\",\n \"order\",\n \"attachment_type\",\n ]\n\n def update(self, instance, validated_data):\n super().update(instance, validated_data)\n return instance\n\n def create(self, validated_data):\n if \"assign_tags\" in validated_data:\n assign_tags = validated_data.pop(\"assign_tags\")\n mail_rule = MailRule.objects.create(**validated_data)\n if assign_tags:\n mail_rule.assign_tags.set(assign_tags)\n return mail_rule\n\n def validate(self, attrs):\n if (\n attrs[\"action\"] == MailRule.MailAction.TAG\n or attrs[\"action\"] == MailRule.MailAction.MOVE\n ) and attrs[\"action_parameter\"] is None:\n raise serializers.ValidationError(\"An action parameter is required.\")\n\n return attrs\n", "path": "src/paperless_mail/serialisers.py"}]} | 1,803 | 96 |
gh_patches_debug_17612 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-6739 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Poundland spider returning Pep&Co only stores as both Poundland and Pep&Co
Poundland has a clothing brand called "Pep&Co", and typically their larger stores have a "Pep&Co" area inside, looking a bit like a concession in a department store. In their storefinder data, these stores are indicated by a "Pep Shop" flag. The ATP poundland.py spider looks for this, and when it's found generates an additional "Pep&Co" store item in addition to the "Poundland" one.
So far so good. However, it appears that there are also a few (about 14) "Pep&Co" stores that aren't also regular Poundland stores. For example, these can occur in shopping centres, where Poundland is renting two nearby units and using one as a regular Poundland store (without Pep&Co) and the other as a Pep&Co only store. Currently ATP is returning a "Poundland" entry for the "Pep&Co" only store. Since there's a separate entry in the storefinder for the actual Poundland store, this leads to duplicates.
Here's an example mapped in OSM:
Poundland - https://www.openstreetmap.org/node/11293224534 - CV21 2JT
Pep&Co - https://www.openstreetmap.org/node/11293224520 - CV21 2JS
Another (unmapped example) is two stores with postcode TS8 0TJ.
I think these "Pep&Co" only stores can be detected by the branch name (stored in item["branch"]) starting with "Pep & Co". I guess we should test for this, and then not yield the final item if it's found. (In case there's any inconsistency in their data, it would probably be good to add the same test to the` if "Pep Shop" in ` line as an alternative. This would ensure that we don't completely drop a branch who's name starts with "Pep&Co" but doesn't have the "Pep shop" flag set.
</issue>
<code>
[start of locations/spiders/poundland.py]
1 from locations.categories import Extras, apply_yes_no
2 from locations.items import Feature
3 from locations.storefinders.woosmap import WoosmapSpider
4
5
6 class PoundlandSpider(WoosmapSpider):
7 name = "poundland"
8 item_attributes = {"brand": "Poundland", "brand_wikidata": "Q1434528"}
9 key = "woos-4108db5c-39f8-360b-9b7e-102c38034b94"
10 origin = "https://www.poundland.co.uk"
11
12 def parse_item(self, item: Feature, feature: dict, **kwargs):
13 item["branch"] = item.pop("name")
14
15 if "Pep Shop" in feature["properties"]["tags"]:
16 pep = item.deepcopy()
17
18 pep["ref"] = pep["ref"] + "_pep"
19
20 pep["brand"] = "Pep&Co"
21 pep["brand_wikidata"] = "Q24908166"
22
23 pep["located_in"] = self.item_attributes["brand"]
24 pep["located_in_wikidata"] = self.item_attributes["brand_wikidata"]
25
26 yield pep
27
28 apply_yes_no(Extras.ATM, item, "ATM" in feature["properties"]["tags"])
29 item["extras"]["icestore"] = "yes" if "Ice Store" in feature["properties"]["tags"] else "no"
30
31 yield item
32
[end of locations/spiders/poundland.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/poundland.py b/locations/spiders/poundland.py
--- a/locations/spiders/poundland.py
+++ b/locations/spiders/poundland.py
@@ -12,7 +12,7 @@
def parse_item(self, item: Feature, feature: dict, **kwargs):
item["branch"] = item.pop("name")
- if "Pep Shop" in feature["properties"]["tags"]:
+ if "Pep Shop" in feature["properties"]["tags"] or item["branch"].startswith("Pep & Co "):
pep = item.deepcopy()
pep["ref"] = pep["ref"] + "_pep"
@@ -25,6 +25,9 @@
yield pep
+ if item["branch"].startswith("Pep & Co "):
+ return
+
apply_yes_no(Extras.ATM, item, "ATM" in feature["properties"]["tags"])
item["extras"]["icestore"] = "yes" if "Ice Store" in feature["properties"]["tags"] else "no"
| {"golden_diff": "diff --git a/locations/spiders/poundland.py b/locations/spiders/poundland.py\n--- a/locations/spiders/poundland.py\n+++ b/locations/spiders/poundland.py\n@@ -12,7 +12,7 @@\n def parse_item(self, item: Feature, feature: dict, **kwargs):\n item[\"branch\"] = item.pop(\"name\")\n \n- if \"Pep Shop\" in feature[\"properties\"][\"tags\"]:\n+ if \"Pep Shop\" in feature[\"properties\"][\"tags\"] or item[\"branch\"].startswith(\"Pep & Co \"):\n pep = item.deepcopy()\n \n pep[\"ref\"] = pep[\"ref\"] + \"_pep\"\n@@ -25,6 +25,9 @@\n \n yield pep\n \n+ if item[\"branch\"].startswith(\"Pep & Co \"):\n+ return\n+\n apply_yes_no(Extras.ATM, item, \"ATM\" in feature[\"properties\"][\"tags\"])\n item[\"extras\"][\"icestore\"] = \"yes\" if \"Ice Store\" in feature[\"properties\"][\"tags\"] else \"no\"\n", "issue": "Poundland spider returning Pep&Co only stores as both Poundland and Pep&Co\nPoundland has a clothing brand called \"Pep&Co\", and typically their larger stores have a \"Pep&Co\" area inside, looking a bit like a concession in a department store. In their storefinder data, these stores are indicated by a \"Pep Shop\" flag. The ATP poundland.py spider looks for this, and when it's found generates an additional \"Pep&Co\" store item in addition to the \"Poundland\" one.\r\n\r\nSo far so good. However, it appears that there are also a few (about 14) \"Pep&Co\" stores that aren't also regular Poundland stores. For example, these can occur in shopping centres, where Poundland is renting two nearby units and using one as a regular Poundland store (without Pep&Co) and the other as a Pep&Co only store. Currently ATP is returning a \"Poundland\" entry for the \"Pep&Co\" only store. Since there's a separate entry in the storefinder for the actual Poundland store, this leads to duplicates.\r\n\r\nHere's an example mapped in OSM:\r\nPoundland - https://www.openstreetmap.org/node/11293224534 - CV21 2JT\r\nPep&Co - https://www.openstreetmap.org/node/11293224520 - CV21 2JS\r\n\r\nAnother (unmapped example) is two stores with postcode TS8 0TJ.\r\n\r\nI think these \"Pep&Co\" only stores can be detected by the branch name (stored in item[\"branch\"]) starting with \"Pep & Co\". I guess we should test for this, and then not yield the final item if it's found. (In case there's any inconsistency in their data, it would probably be good to add the same test to the` if \"Pep Shop\" in ` line as an alternative. This would ensure that we don't completely drop a branch who's name starts with \"Pep&Co\" but doesn't have the \"Pep shop\" flag set.\n", "before_files": [{"content": "from locations.categories import Extras, apply_yes_no\nfrom locations.items import Feature\nfrom locations.storefinders.woosmap import WoosmapSpider\n\n\nclass PoundlandSpider(WoosmapSpider):\n name = \"poundland\"\n item_attributes = {\"brand\": \"Poundland\", \"brand_wikidata\": \"Q1434528\"}\n key = \"woos-4108db5c-39f8-360b-9b7e-102c38034b94\"\n origin = \"https://www.poundland.co.uk\"\n\n def parse_item(self, item: Feature, feature: dict, **kwargs):\n item[\"branch\"] = item.pop(\"name\")\n\n if \"Pep Shop\" in feature[\"properties\"][\"tags\"]:\n pep = item.deepcopy()\n\n pep[\"ref\"] = pep[\"ref\"] + \"_pep\"\n\n pep[\"brand\"] = \"Pep&Co\"\n pep[\"brand_wikidata\"] = \"Q24908166\"\n\n pep[\"located_in\"] = self.item_attributes[\"brand\"]\n pep[\"located_in_wikidata\"] = self.item_attributes[\"brand_wikidata\"]\n\n yield pep\n\n apply_yes_no(Extras.ATM, item, \"ATM\" in feature[\"properties\"][\"tags\"])\n item[\"extras\"][\"icestore\"] = \"yes\" if \"Ice Store\" in feature[\"properties\"][\"tags\"] else \"no\"\n\n yield item\n", "path": "locations/spiders/poundland.py"}]} | 1,385 | 240 |
gh_patches_debug_8907 | rasdani/github-patches | git_diff | pymedusa__Medusa-1035 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FINDSUBTITLES :: [3824246] Failed to refine video Traceback (most recent call last)
### Before submitting your issue:
Enable debug logging in SickRage settings, reproduce the error (be sure to disable after the bug is fixed)
Branch/Commit: develop/3824246
OS: Linux
What you did: nothing
What happened: ~~rato appeared~~ warning
What you expected: no warning
Logs:
```
2016-09-05 17:55:06 WARNING FINDSUBTITLES :: [3824246] Failed to refine video
Traceback (most recent call last):
File "/home/**********/.sickrage/lib/subliminal/core.py", line 543, in refine
refiner_manager[refiner].plugin(video, **kwargs)
File "/home/**********/.sickrage/sickbeard/refiners/tvepisode.py", line 62, in refine
enrich({'resolution': guess['screen_size'], 'format': guess['format']}, video, overwrite=False)
KeyError: u'screen_size'
```
</issue>
<code>
[start of sickbeard/refiners/tvepisode.py]
1 # -*- coding: utf-8 -*-
2 """TVEpisode refiner."""
3 from __future__ import unicode_literals
4
5 import logging
6 import re
7
8 from subliminal.video import Episode
9
10 from ..common import Quality
11
12 logger = logging.getLogger(__name__)
13
14 SHOW_MAPPING = {
15 'series_tvdb_id': 'tvdb_id',
16 'series_imdb_id': 'imdbid',
17 'year': 'startyear'
18 }
19
20 EPISODE_MAPPING = {
21 'tvdb_id': 'tvdb_id',
22 'episode': 'episode',
23 'season': 'season',
24 'size': 'file_size',
25 'title': 'name',
26 }
27
28 series_re = re.compile(r'^(?P<series>.*?)(?: \((?:(?P<year>\d{4})|(?P<country>[A-Z]{2}))\))?$')
29
30
31 def refine(video, tv_episode=None, **kwargs):
32 """Refine a video by using TVEpisode information.
33
34 :param video: the video to refine.
35 :type video: Episode
36 :param tv_episode: the TVEpisode to be used.
37 :type tv_episode: sickbeard.tv.TVEpisode
38 :param kwargs:
39 """
40 if video.series_tvdb_id and video.tvdb_id:
41 logger.debug('No need to refine with TVEpisode')
42 return
43
44 if not tv_episode:
45 logger.debug('No TVEpisode to be used to refine')
46 return
47
48 if not isinstance(video, Episode):
49 logger.debug('Video {name} is not an episode. Skipping refiner...', name=video.name)
50 return
51
52 if tv_episode.show:
53 logger.debug('Refining using TVShow information.')
54 series, year, country = series_re.match(tv_episode.show.name).groups()
55 enrich({'series': series, 'year': int(year) if year else None}, video)
56 enrich(SHOW_MAPPING, video, tv_episode.show)
57
58 logger.debug('Refining using TVEpisode information.')
59 enrich(EPISODE_MAPPING, video, tv_episode)
60 enrich({'release_group': tv_episode.release_group}, video, overwrite=False)
61 guess = Quality.to_guessit(tv_episode.status)
62 enrich({'resolution': guess['screen_size'], 'format': guess['format']}, video, overwrite=False)
63
64
65 def enrich(attributes, target, source=None, overwrite=True):
66 """Copy attributes from source to target.
67
68 :param attributes: the attributes mapping
69 :type attributes: dict(str -> str)
70 :param target: the target object
71 :param source: the source object. If None, the value in attributes dict will be used as new_value
72 :param overwrite: if source field should be overwritten if not already set
73 :type overwrite: bool
74 """
75 for key, value in attributes.items():
76 old_value = getattr(target, key)
77 if old_value and not overwrite:
78 continue
79
80 new_value = getattr(source, value) if source else value
81
82 if new_value and old_value != new_value:
83 setattr(target, key, new_value)
84 logger.debug('Attribute {key} changed from {old} to {new}', key=key, old=old_value, new=new_value)
85
[end of sickbeard/refiners/tvepisode.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sickbeard/refiners/tvepisode.py b/sickbeard/refiners/tvepisode.py
--- a/sickbeard/refiners/tvepisode.py
+++ b/sickbeard/refiners/tvepisode.py
@@ -59,7 +59,7 @@
enrich(EPISODE_MAPPING, video, tv_episode)
enrich({'release_group': tv_episode.release_group}, video, overwrite=False)
guess = Quality.to_guessit(tv_episode.status)
- enrich({'resolution': guess['screen_size'], 'format': guess['format']}, video, overwrite=False)
+ enrich({'resolution': guess.get('screen_size'), 'format': guess.get('format')}, video, overwrite=False)
def enrich(attributes, target, source=None, overwrite=True):
| {"golden_diff": "diff --git a/sickbeard/refiners/tvepisode.py b/sickbeard/refiners/tvepisode.py\n--- a/sickbeard/refiners/tvepisode.py\n+++ b/sickbeard/refiners/tvepisode.py\n@@ -59,7 +59,7 @@\n enrich(EPISODE_MAPPING, video, tv_episode)\n enrich({'release_group': tv_episode.release_group}, video, overwrite=False)\n guess = Quality.to_guessit(tv_episode.status)\n- enrich({'resolution': guess['screen_size'], 'format': guess['format']}, video, overwrite=False)\n+ enrich({'resolution': guess.get('screen_size'), 'format': guess.get('format')}, video, overwrite=False)\n \n \n def enrich(attributes, target, source=None, overwrite=True):\n", "issue": "FINDSUBTITLES :: [3824246] Failed to refine video Traceback (most recent call last)\n### Before submitting your issue:\n\nEnable debug logging in SickRage settings, reproduce the error (be sure to disable after the bug is fixed)\n\nBranch/Commit: develop/3824246\nOS: Linux\nWhat you did: nothing\nWhat happened: ~~rato appeared~~ warning\nWhat you expected: no warning\nLogs:\n\n```\n2016-09-05 17:55:06 WARNING FINDSUBTITLES :: [3824246] Failed to refine video\nTraceback (most recent call last):\n File \"/home/**********/.sickrage/lib/subliminal/core.py\", line 543, in refine\n refiner_manager[refiner].plugin(video, **kwargs)\n File \"/home/**********/.sickrage/sickbeard/refiners/tvepisode.py\", line 62, in refine\n enrich({'resolution': guess['screen_size'], 'format': guess['format']}, video, overwrite=False)\nKeyError: u'screen_size'\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"TVEpisode refiner.\"\"\"\nfrom __future__ import unicode_literals\n\nimport logging\nimport re\n\nfrom subliminal.video import Episode\n\nfrom ..common import Quality\n\nlogger = logging.getLogger(__name__)\n\nSHOW_MAPPING = {\n 'series_tvdb_id': 'tvdb_id',\n 'series_imdb_id': 'imdbid',\n 'year': 'startyear'\n}\n\nEPISODE_MAPPING = {\n 'tvdb_id': 'tvdb_id',\n 'episode': 'episode',\n 'season': 'season',\n 'size': 'file_size',\n 'title': 'name',\n}\n\nseries_re = re.compile(r'^(?P<series>.*?)(?: \\((?:(?P<year>\\d{4})|(?P<country>[A-Z]{2}))\\))?$')\n\n\ndef refine(video, tv_episode=None, **kwargs):\n \"\"\"Refine a video by using TVEpisode information.\n\n :param video: the video to refine.\n :type video: Episode\n :param tv_episode: the TVEpisode to be used.\n :type tv_episode: sickbeard.tv.TVEpisode\n :param kwargs:\n \"\"\"\n if video.series_tvdb_id and video.tvdb_id:\n logger.debug('No need to refine with TVEpisode')\n return\n\n if not tv_episode:\n logger.debug('No TVEpisode to be used to refine')\n return\n\n if not isinstance(video, Episode):\n logger.debug('Video {name} is not an episode. Skipping refiner...', name=video.name)\n return\n\n if tv_episode.show:\n logger.debug('Refining using TVShow information.')\n series, year, country = series_re.match(tv_episode.show.name).groups()\n enrich({'series': series, 'year': int(year) if year else None}, video)\n enrich(SHOW_MAPPING, video, tv_episode.show)\n\n logger.debug('Refining using TVEpisode information.')\n enrich(EPISODE_MAPPING, video, tv_episode)\n enrich({'release_group': tv_episode.release_group}, video, overwrite=False)\n guess = Quality.to_guessit(tv_episode.status)\n enrich({'resolution': guess['screen_size'], 'format': guess['format']}, video, overwrite=False)\n\n\ndef enrich(attributes, target, source=None, overwrite=True):\n \"\"\"Copy attributes from source to target.\n\n :param attributes: the attributes mapping\n :type attributes: dict(str -> str)\n :param target: the target object\n :param source: the source object. If None, the value in attributes dict will be used as new_value\n :param overwrite: if source field should be overwritten if not already set\n :type overwrite: bool\n \"\"\"\n for key, value in attributes.items():\n old_value = getattr(target, key)\n if old_value and not overwrite:\n continue\n\n new_value = getattr(source, value) if source else value\n\n if new_value and old_value != new_value:\n setattr(target, key, new_value)\n logger.debug('Attribute {key} changed from {old} to {new}', key=key, old=old_value, new=new_value)\n", "path": "sickbeard/refiners/tvepisode.py"}]} | 1,656 | 171 |
gh_patches_debug_29166 | rasdani/github-patches | git_diff | svthalia__concrexit-1870 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add cancelled registrations to event in api v2
### Is your feature request related to a problem? Please describe.
API v2 doesn't return a cancelled registration when you've cancelled a registration (in the event serializer). So we cannot display stuff like 'Your registration is cancelled (after the deadline)'.
### Describe the solution you'd like
Also return a registration if it's cancelled, and include fields like is_cancelled, is_late_cancellation, etc.
### Motivation
This makes it possible to show nice status texts like in the old app and the website.
### Describe alternatives you've considered
Leaving out the messages for cancelled registrations. In that case, you can only see that there is no register button, but not why.
</issue>
<code>
[start of website/events/api/v2/serializers/event_registration.py]
1 from rest_framework import serializers
2
3 from events.models import EventRegistration
4 from members.api.v2.serializers.member import MemberSerializer
5 from payments.api.v2.serializers import PaymentSerializer
6
7
8 class EventRegistrationSerializer(serializers.ModelSerializer):
9 """Serializer for event registrations."""
10
11 def __init__(self, *args, **kwargs):
12 # Don't pass the 'fields' arg up to the superclass
13 fields = kwargs.pop("fields", {"pk", "member", "name"})
14
15 # Instantiate the superclass normally
16 super().__init__(*args, **kwargs)
17
18 allowed = set(fields)
19 existing = set(self.fields.keys())
20 for field_name in existing - allowed:
21 self.fields.pop(field_name)
22
23 class Meta:
24 model = EventRegistration
25 fields = (
26 "pk",
27 "present",
28 "queue_position",
29 "date",
30 "payment",
31 "member",
32 "name",
33 )
34
35 payment = PaymentSerializer()
36 member = MemberSerializer(detailed=False, read_only=True)
37
[end of website/events/api/v2/serializers/event_registration.py]
[start of website/events/api/v2/serializers/event.py]
1 from rest_framework import serializers
2
3 from activemembers.api.v2.serializers.member_group import MemberGroupSerializer
4 from announcements.api.v2.serializers import SlideSerializer
5 from documents.api.v2.serializers.document import DocumentSerializer
6 from events import services
7 from events.api.v2.serializers.event_registration import EventRegistrationSerializer
8 from events.models import Event, EventRegistration
9 from thaliawebsite.api.v2.serializers import CleanedHTMLSerializer
10 from utils.snippets import create_google_maps_url
11
12
13 class EventSerializer(serializers.ModelSerializer):
14 """Serializer for events."""
15
16 class Meta:
17 model = Event
18 fields = (
19 "pk",
20 "title",
21 "description",
22 "start",
23 "end",
24 "category",
25 "registration_start",
26 "registration_end",
27 "cancel_deadline",
28 "optional_registrations",
29 "location",
30 "price",
31 "fine",
32 "num_participants",
33 "max_participants",
34 "no_registration_message",
35 "cancel_too_late_message",
36 "has_fields",
37 "food_event",
38 "maps_url",
39 "user_permissions",
40 "user_registration",
41 "organiser",
42 "slide",
43 "documents",
44 )
45
46 description = CleanedHTMLSerializer()
47 organiser = MemberGroupSerializer()
48 user_registration = serializers.SerializerMethodField("_user_registration")
49 num_participants = serializers.SerializerMethodField("_num_participants")
50 maps_url = serializers.SerializerMethodField("_maps_url")
51 price = serializers.DecimalField(max_digits=5, decimal_places=2)
52 fine = serializers.DecimalField(max_digits=5, decimal_places=2)
53 slide = SlideSerializer()
54 documents = DocumentSerializer(many=True)
55 user_permissions = serializers.SerializerMethodField("_user_permissions")
56
57 def _user_registration(self, instance):
58 try:
59 if self.context["request"].member:
60 reg = instance.eventregistration_set.get(
61 member=self.context["request"].member, date_cancelled=None
62 )
63 return EventRegistrationSerializer(
64 reg,
65 context=self.context,
66 fields=("pk", "present", "queue_position", "date", "payment"),
67 ).data
68 except EventRegistration.DoesNotExist:
69 pass
70 return None
71
72 def _num_participants(self, instance):
73 if (
74 instance.max_participants
75 and instance.participants.count() > instance.max_participants
76 ):
77 return instance.max_participants
78 return instance.participants.count()
79
80 def _user_permissions(self, instance):
81 member = self.context["request"].member
82 return services.event_permissions(member, instance)
83
84 def _maps_url(self, instance):
85 return create_google_maps_url(instance.map_location, zoom=13, size="450x250")
86
[end of website/events/api/v2/serializers/event.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py
--- a/website/events/api/v2/serializers/event.py
+++ b/website/events/api/v2/serializers/event.py
@@ -58,12 +58,20 @@
try:
if self.context["request"].member:
reg = instance.eventregistration_set.get(
- member=self.context["request"].member, date_cancelled=None
+ member=self.context["request"].member
)
return EventRegistrationSerializer(
reg,
context=self.context,
- fields=("pk", "present", "queue_position", "date", "payment"),
+ fields=(
+ "pk",
+ "present",
+ "queue_position",
+ "is_cancelled",
+ "is_late_cancellation",
+ "date",
+ "payment",
+ ),
).data
except EventRegistration.DoesNotExist:
pass
diff --git a/website/events/api/v2/serializers/event_registration.py b/website/events/api/v2/serializers/event_registration.py
--- a/website/events/api/v2/serializers/event_registration.py
+++ b/website/events/api/v2/serializers/event_registration.py
@@ -34,3 +34,16 @@
payment = PaymentSerializer()
member = MemberSerializer(detailed=False, read_only=True)
+ is_cancelled = serializers.SerializerMethodField("_is_cancelled")
+ is_late_cancellation = serializers.SerializerMethodField("_is_late_cancellation")
+ queue_position = serializers.SerializerMethodField("_queue_position")
+
+ def _is_late_cancellation(self, instance):
+ return instance.is_late_cancellation()
+
+ def _queue_position(self, instance):
+ pos = instance.queue_position
+ return pos if pos and pos > 0 else None
+
+ def _is_cancelled(self, instance):
+ return instance.date_cancelled is not None
| {"golden_diff": "diff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py\n--- a/website/events/api/v2/serializers/event.py\n+++ b/website/events/api/v2/serializers/event.py\n@@ -58,12 +58,20 @@\n try:\n if self.context[\"request\"].member:\n reg = instance.eventregistration_set.get(\n- member=self.context[\"request\"].member, date_cancelled=None\n+ member=self.context[\"request\"].member\n )\n return EventRegistrationSerializer(\n reg,\n context=self.context,\n- fields=(\"pk\", \"present\", \"queue_position\", \"date\", \"payment\"),\n+ fields=(\n+ \"pk\",\n+ \"present\",\n+ \"queue_position\",\n+ \"is_cancelled\",\n+ \"is_late_cancellation\",\n+ \"date\",\n+ \"payment\",\n+ ),\n ).data\n except EventRegistration.DoesNotExist:\n pass\ndiff --git a/website/events/api/v2/serializers/event_registration.py b/website/events/api/v2/serializers/event_registration.py\n--- a/website/events/api/v2/serializers/event_registration.py\n+++ b/website/events/api/v2/serializers/event_registration.py\n@@ -34,3 +34,16 @@\n \n payment = PaymentSerializer()\n member = MemberSerializer(detailed=False, read_only=True)\n+ is_cancelled = serializers.SerializerMethodField(\"_is_cancelled\")\n+ is_late_cancellation = serializers.SerializerMethodField(\"_is_late_cancellation\")\n+ queue_position = serializers.SerializerMethodField(\"_queue_position\")\n+\n+ def _is_late_cancellation(self, instance):\n+ return instance.is_late_cancellation()\n+\n+ def _queue_position(self, instance):\n+ pos = instance.queue_position\n+ return pos if pos and pos > 0 else None\n+\n+ def _is_cancelled(self, instance):\n+ return instance.date_cancelled is not None\n", "issue": "Add cancelled registrations to event in api v2\n### Is your feature request related to a problem? Please describe.\r\nAPI v2 doesn't return a cancelled registration when you've cancelled a registration (in the event serializer). So we cannot display stuff like 'Your registration is cancelled (after the deadline)'.\r\n\r\n### Describe the solution you'd like\r\nAlso return a registration if it's cancelled, and include fields like is_cancelled, is_late_cancellation, etc.\r\n\r\n### Motivation\r\nThis makes it possible to show nice status texts like in the old app and the website.\r\n\r\n### Describe alternatives you've considered\r\nLeaving out the messages for cancelled registrations. In that case, you can only see that there is no register button, but not why.\r\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom events.models import EventRegistration\nfrom members.api.v2.serializers.member import MemberSerializer\nfrom payments.api.v2.serializers import PaymentSerializer\n\n\nclass EventRegistrationSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for event registrations.\"\"\"\n\n def __init__(self, *args, **kwargs):\n # Don't pass the 'fields' arg up to the superclass\n fields = kwargs.pop(\"fields\", {\"pk\", \"member\", \"name\"})\n\n # Instantiate the superclass normally\n super().__init__(*args, **kwargs)\n\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n class Meta:\n model = EventRegistration\n fields = (\n \"pk\",\n \"present\",\n \"queue_position\",\n \"date\",\n \"payment\",\n \"member\",\n \"name\",\n )\n\n payment = PaymentSerializer()\n member = MemberSerializer(detailed=False, read_only=True)\n", "path": "website/events/api/v2/serializers/event_registration.py"}, {"content": "from rest_framework import serializers\n\nfrom activemembers.api.v2.serializers.member_group import MemberGroupSerializer\nfrom announcements.api.v2.serializers import SlideSerializer\nfrom documents.api.v2.serializers.document import DocumentSerializer\nfrom events import services\nfrom events.api.v2.serializers.event_registration import EventRegistrationSerializer\nfrom events.models import Event, EventRegistration\nfrom thaliawebsite.api.v2.serializers import CleanedHTMLSerializer\nfrom utils.snippets import create_google_maps_url\n\n\nclass EventSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for events.\"\"\"\n\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"title\",\n \"description\",\n \"start\",\n \"end\",\n \"category\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"optional_registrations\",\n \"location\",\n \"price\",\n \"fine\",\n \"num_participants\",\n \"max_participants\",\n \"no_registration_message\",\n \"cancel_too_late_message\",\n \"has_fields\",\n \"food_event\",\n \"maps_url\",\n \"user_permissions\",\n \"user_registration\",\n \"organiser\",\n \"slide\",\n \"documents\",\n )\n\n description = CleanedHTMLSerializer()\n organiser = MemberGroupSerializer()\n user_registration = serializers.SerializerMethodField(\"_user_registration\")\n num_participants = serializers.SerializerMethodField(\"_num_participants\")\n maps_url = serializers.SerializerMethodField(\"_maps_url\")\n price = serializers.DecimalField(max_digits=5, decimal_places=2)\n fine = serializers.DecimalField(max_digits=5, decimal_places=2)\n slide = SlideSerializer()\n documents = DocumentSerializer(many=True)\n user_permissions = serializers.SerializerMethodField(\"_user_permissions\")\n\n def _user_registration(self, instance):\n try:\n if self.context[\"request\"].member:\n reg = instance.eventregistration_set.get(\n member=self.context[\"request\"].member, date_cancelled=None\n )\n return EventRegistrationSerializer(\n reg,\n context=self.context,\n fields=(\"pk\", \"present\", \"queue_position\", \"date\", \"payment\"),\n ).data\n except EventRegistration.DoesNotExist:\n pass\n return None\n\n def _num_participants(self, instance):\n if (\n instance.max_participants\n and instance.participants.count() > instance.max_participants\n ):\n return instance.max_participants\n return instance.participants.count()\n\n def _user_permissions(self, instance):\n member = self.context[\"request\"].member\n return services.event_permissions(member, instance)\n\n def _maps_url(self, instance):\n return create_google_maps_url(instance.map_location, zoom=13, size=\"450x250\")\n", "path": "website/events/api/v2/serializers/event.py"}]} | 1,746 | 434 |
gh_patches_debug_58117 | rasdani/github-patches | git_diff | weni-ai__bothub-engine-76 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Next link in pagination broken in production
The links starts with ```https://bothub/```, correct is ```https://bothub.it/```
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2
3 with open('requirements.txt') as fp:
4 install_requires = fp.read()
5 install_requires = list(
6 filter(lambda x: len(x) > 0, install_requires.split('\n')))
7
8 setup(
9 name='bothub',
10 version='1.7.1',
11 description='bothub',
12 packages=find_packages(),
13 install_requires=install_requires,
14 python_requires='>=3.6',
15 )
16
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@
setup(
name='bothub',
- version='1.7.1',
+ version='1.7.2',
description='bothub',
packages=find_packages(),
install_requires=install_requires,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,7 +7,7 @@\n \n setup(\n name='bothub',\n- version='1.7.1',\n+ version='1.7.2',\n description='bothub',\n packages=find_packages(),\n install_requires=install_requires,\n", "issue": "Next link in pagination broken in production\nThe links starts with ```https://bothub/```, correct is ```https://bothub.it/```\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('requirements.txt') as fp:\n install_requires = fp.read()\ninstall_requires = list(\n filter(lambda x: len(x) > 0, install_requires.split('\\n')))\n\nsetup(\n name='bothub',\n version='1.7.1',\n description='bothub',\n packages=find_packages(),\n install_requires=install_requires,\n python_requires='>=3.6',\n)\n", "path": "setup.py"}]} | 678 | 78 |
gh_patches_debug_18546 | rasdani/github-patches | git_diff | ethereum__web3.py-803 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve warning on failed address checksum
* Version: 4
### What was wrong?
Error message about checksum failure isn't clear enough, especially when the address is supplied as all-lower-case.
### How can it be fixed?
Special-case the warning, so that an all-lower-case address gives a specific warning, along the lines of: "web3py only accepts checksummed addresses. Please report it as a bug on any project that does not provide you checksummed addresses. In the meantime, you can force an address into checksummed state with `valid_address = w3.toChecksumAddress(questionable_address)` after double-checking that there are no errors in the address. Alternatively, you can use an ENS name in its place."
But, you know, not a paragraph...
</issue>
<code>
[start of web3/utils/validation.py]
1 import itertools
2
3 from eth_utils import (
4 is_0x_prefixed,
5 is_boolean,
6 is_bytes,
7 is_checksum_address,
8 is_dict,
9 is_hex_address,
10 is_integer,
11 is_list_like,
12 is_string,
13 )
14
15 from web3.exceptions import (
16 InvalidAddress,
17 )
18 from web3.utils.abi import (
19 is_address_type,
20 is_array_type,
21 is_bool_type,
22 is_bytes_type,
23 is_int_type,
24 is_recognized_type,
25 is_string_type,
26 is_uint_type,
27 length_of_array_type,
28 sub_type_of_array_type,
29 )
30
31
32 def validate_abi(abi):
33 """
34 Helper function for validating an ABI
35 """
36 if not is_list_like(abi):
37 raise ValueError("'abi' is not a list")
38 for e in abi:
39 if not is_dict(e):
40 raise ValueError("The elements of 'abi' are not all dictionaries")
41
42
43 def validate_abi_type(abi_type):
44 """
45 Helper function for validating an abi_type
46 """
47 if not is_recognized_type(abi_type):
48 raise ValueError("Unrecognized abi_type: {abi_type}".format(abi_type=abi_type))
49
50
51 def validate_abi_value(abi_type, value):
52 """
53 Helper function for validating a value against the expected abi_type
54 Note: abi_type 'bytes' must either be python3 'bytes' object or ''
55 """
56 if is_array_type(abi_type) and is_list_like(value):
57 # validate length
58 specified_length = length_of_array_type(abi_type)
59 if specified_length is not None:
60 if specified_length < 1:
61 raise TypeError(
62 "Invalid abi-type: {abi_type}. Length of fixed sized arrays"
63 "must be greater than 0."
64 .format(abi_type=abi_type)
65 )
66 if specified_length != len(value):
67 raise TypeError(
68 "The following array length does not the length specified"
69 "by the abi-type, {abi_type}: {value}"
70 .format(abi_type=abi_type, value=value)
71 )
72
73 # validate sub_types
74 sub_type = sub_type_of_array_type(abi_type)
75 for v in value:
76 validate_abi_value(sub_type, v)
77 return
78 elif is_bool_type(abi_type) and is_boolean(value):
79 return
80 elif is_uint_type(abi_type) and is_integer(value) and value >= 0:
81 return
82 elif is_int_type(abi_type) and is_integer(value):
83 return
84 elif is_address_type(abi_type):
85 validate_address(value)
86 return
87 elif is_bytes_type(abi_type):
88 if is_bytes(value):
89 return
90 elif is_string(value):
91 if is_0x_prefixed(value):
92 return
93 else:
94 raise TypeError(
95 "ABI values of abi-type 'bytes' must be either"
96 "a python3 'bytes' object or an '0x' prefixed string."
97 )
98 elif is_string_type(abi_type) and is_string(value):
99 return
100
101 raise TypeError(
102 "The following abi value is not a '{abi_type}': {value}"
103 .format(abi_type=abi_type, value=value)
104 )
105
106
107 def validate_address(value):
108 """
109 Helper function for validating an address
110 """
111 if not isinstance(value, str):
112 raise TypeError('Address {} must be provided as a string'.format(value))
113 if not is_hex_address(value):
114 raise InvalidAddress("Address must be 20 bytes, as a hex string with a 0x prefix", value)
115 if not is_checksum_address(value):
116 raise InvalidAddress("Address has an invalid EIP checksum", value)
117
118
119 def has_one_val(*args, **kwargs):
120 vals = itertools.chain(args, kwargs.values())
121 not_nones = list(filter(lambda val: val is not None, vals))
122 return len(not_nones) == 1
123
124
125 def assert_one_val(*args, **kwargs):
126 if not has_one_val(*args, **kwargs):
127 raise TypeError(
128 "Exactly one of the passed values can be specified. "
129 "Instead, values were: %r, %r" % (args, kwargs)
130 )
131
[end of web3/utils/validation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/web3/utils/validation.py b/web3/utils/validation.py
--- a/web3/utils/validation.py
+++ b/web3/utils/validation.py
@@ -113,7 +113,21 @@
if not is_hex_address(value):
raise InvalidAddress("Address must be 20 bytes, as a hex string with a 0x prefix", value)
if not is_checksum_address(value):
- raise InvalidAddress("Address has an invalid EIP checksum", value)
+ if value == value.lower():
+ raise InvalidAddress(
+ "Web3.py only accepts checksum addresses. "
+ "The software that gave you this non-checksum address should be considered unsafe, "
+ "please file it as a bug on their platform. "
+ "Try using an ENS name instead. Or, if you must accept lower safety, "
+ "use Web3.toChecksumAddress(lower_case_address).",
+ value,
+ )
+ else:
+ raise InvalidAddress(
+ "Address has an invalid EIP-55 checksum. "
+ "After looking up the address from the original source, try again.",
+ value,
+ )
def has_one_val(*args, **kwargs):
| {"golden_diff": "diff --git a/web3/utils/validation.py b/web3/utils/validation.py\n--- a/web3/utils/validation.py\n+++ b/web3/utils/validation.py\n@@ -113,7 +113,21 @@\n if not is_hex_address(value):\n raise InvalidAddress(\"Address must be 20 bytes, as a hex string with a 0x prefix\", value)\n if not is_checksum_address(value):\n- raise InvalidAddress(\"Address has an invalid EIP checksum\", value)\n+ if value == value.lower():\n+ raise InvalidAddress(\n+ \"Web3.py only accepts checksum addresses. \"\n+ \"The software that gave you this non-checksum address should be considered unsafe, \"\n+ \"please file it as a bug on their platform. \"\n+ \"Try using an ENS name instead. Or, if you must accept lower safety, \"\n+ \"use Web3.toChecksumAddress(lower_case_address).\",\n+ value,\n+ )\n+ else:\n+ raise InvalidAddress(\n+ \"Address has an invalid EIP-55 checksum. \"\n+ \"After looking up the address from the original source, try again.\",\n+ value,\n+ )\n \n \n def has_one_val(*args, **kwargs):\n", "issue": "Improve warning on failed address checksum\n* Version: 4\r\n\r\n### What was wrong?\r\n\r\nError message about checksum failure isn't clear enough, especially when the address is supplied as all-lower-case.\r\n\r\n### How can it be fixed?\r\n\r\nSpecial-case the warning, so that an all-lower-case address gives a specific warning, along the lines of: \"web3py only accepts checksummed addresses. Please report it as a bug on any project that does not provide you checksummed addresses. In the meantime, you can force an address into checksummed state with `valid_address = w3.toChecksumAddress(questionable_address)` after double-checking that there are no errors in the address. Alternatively, you can use an ENS name in its place.\"\r\n\r\nBut, you know, not a paragraph...\n", "before_files": [{"content": "import itertools\n\nfrom eth_utils import (\n is_0x_prefixed,\n is_boolean,\n is_bytes,\n is_checksum_address,\n is_dict,\n is_hex_address,\n is_integer,\n is_list_like,\n is_string,\n)\n\nfrom web3.exceptions import (\n InvalidAddress,\n)\nfrom web3.utils.abi import (\n is_address_type,\n is_array_type,\n is_bool_type,\n is_bytes_type,\n is_int_type,\n is_recognized_type,\n is_string_type,\n is_uint_type,\n length_of_array_type,\n sub_type_of_array_type,\n)\n\n\ndef validate_abi(abi):\n \"\"\"\n Helper function for validating an ABI\n \"\"\"\n if not is_list_like(abi):\n raise ValueError(\"'abi' is not a list\")\n for e in abi:\n if not is_dict(e):\n raise ValueError(\"The elements of 'abi' are not all dictionaries\")\n\n\ndef validate_abi_type(abi_type):\n \"\"\"\n Helper function for validating an abi_type\n \"\"\"\n if not is_recognized_type(abi_type):\n raise ValueError(\"Unrecognized abi_type: {abi_type}\".format(abi_type=abi_type))\n\n\ndef validate_abi_value(abi_type, value):\n \"\"\"\n Helper function for validating a value against the expected abi_type\n Note: abi_type 'bytes' must either be python3 'bytes' object or ''\n \"\"\"\n if is_array_type(abi_type) and is_list_like(value):\n # validate length\n specified_length = length_of_array_type(abi_type)\n if specified_length is not None:\n if specified_length < 1:\n raise TypeError(\n \"Invalid abi-type: {abi_type}. Length of fixed sized arrays\"\n \"must be greater than 0.\"\n .format(abi_type=abi_type)\n )\n if specified_length != len(value):\n raise TypeError(\n \"The following array length does not the length specified\"\n \"by the abi-type, {abi_type}: {value}\"\n .format(abi_type=abi_type, value=value)\n )\n\n # validate sub_types\n sub_type = sub_type_of_array_type(abi_type)\n for v in value:\n validate_abi_value(sub_type, v)\n return\n elif is_bool_type(abi_type) and is_boolean(value):\n return\n elif is_uint_type(abi_type) and is_integer(value) and value >= 0:\n return\n elif is_int_type(abi_type) and is_integer(value):\n return\n elif is_address_type(abi_type):\n validate_address(value)\n return\n elif is_bytes_type(abi_type):\n if is_bytes(value):\n return\n elif is_string(value):\n if is_0x_prefixed(value):\n return\n else:\n raise TypeError(\n \"ABI values of abi-type 'bytes' must be either\"\n \"a python3 'bytes' object or an '0x' prefixed string.\"\n )\n elif is_string_type(abi_type) and is_string(value):\n return\n\n raise TypeError(\n \"The following abi value is not a '{abi_type}': {value}\"\n .format(abi_type=abi_type, value=value)\n )\n\n\ndef validate_address(value):\n \"\"\"\n Helper function for validating an address\n \"\"\"\n if not isinstance(value, str):\n raise TypeError('Address {} must be provided as a string'.format(value))\n if not is_hex_address(value):\n raise InvalidAddress(\"Address must be 20 bytes, as a hex string with a 0x prefix\", value)\n if not is_checksum_address(value):\n raise InvalidAddress(\"Address has an invalid EIP checksum\", value)\n\n\ndef has_one_val(*args, **kwargs):\n vals = itertools.chain(args, kwargs.values())\n not_nones = list(filter(lambda val: val is not None, vals))\n return len(not_nones) == 1\n\n\ndef assert_one_val(*args, **kwargs):\n if not has_one_val(*args, **kwargs):\n raise TypeError(\n \"Exactly one of the passed values can be specified. \"\n \"Instead, values were: %r, %r\" % (args, kwargs)\n )\n", "path": "web3/utils/validation.py"}]} | 1,888 | 266 |
gh_patches_debug_38607 | rasdani/github-patches | git_diff | liqd__a4-opin-688 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spatial Idea Challenge: No comments possible
In the blueprint Spatial Idea Challenge, in phase 1 comments cannot be made (be sure to test with normal user account). We need to fix the blueprint, apparently. See here https://opin-stage.liqd.net/de/maps/test-the-comments/ for an example.
</issue>
<code>
[start of euth/maps/views.py]
1 from django.conf import settings
2 from easy_thumbnails.files import get_thumbnailer
3
4 from euth.ideas import views as idea_views
5
6 from . import forms
7 from .models import MapIdea
8
9
10 class MapIdeaListView(idea_views.IdeaListView):
11 model = MapIdea
12
13 def dump_geojson(self):
14 result = {}
15 result['type'] = 'FeatureCollection'
16 feature_list = []
17
18 for item in self.get_queryset():
19
20 url = ''
21
22 if item.image:
23 image = get_thumbnailer(item.image)['map_thumbnail']
24 url = image.url
25
26 properties = {
27 'name': item.name,
28 'slug': item.slug,
29 'image': url,
30 'comments_count': item.comment_count,
31 'positive_rating_count': item.positive_rating_count,
32 'negative_rating_count': item.negative_rating_count,
33 'url': item.get_absolute_url()
34 }
35 point_dict = item.point
36 point_dict['properties'] = properties
37 feature_list.append(point_dict)
38
39 result['features'] = feature_list
40 return result
41
42 def get_context_data(self, **kwargs):
43 context = super().get_context_data(**kwargs)
44 context['mapideas_json'] = self.dump_geojson()
45 context['map_url'] = settings.BASE_MAP
46 context['polygon'] = self.module.settings_instance.polygon
47 return context
48
49
50 class MapIdeaCreateView(idea_views.IdeaCreateView):
51 model = MapIdea
52 form_class = forms.MapIdeaForm
53 permission_required = 'euth_maps.propose_idea'
54
55 def get_form_kwargs(self):
56 kwargs = super().get_form_kwargs()
57 kwargs['settings_instance'] = self.module.settings_instance
58 return kwargs
59
60
61 class MapIdeaUpdateView(idea_views.IdeaUpdateView):
62 model = MapIdea
63 form_class = forms.MapIdeaForm
64
65 def get_form_kwargs(self):
66 kwargs = super().get_form_kwargs()
67 kwargs['settings_instance'] = self.object.module.settings_instance
68 return kwargs
69
70
71 class MapIdeaDeleteView(idea_views.IdeaDeleteView):
72 model = MapIdea
73
74
75 class MapIdeaDetailView(idea_views.IdeaDetailView):
76 model = MapIdea
77 queryset = MapIdea.objects.annotate_positive_rating_count()\
78 .annotate_negative_rating_count()
79
80 def get_context_data(self, **kwargs):
81 context = super().get_context_data(**kwargs)
82 context['map_url'] = settings.BASE_MAP
83 return context
84
[end of euth/maps/views.py]
[start of euth/maps/rules.py]
1 import rules
2 from rules.predicates import is_superuser
3
4 from adhocracy4.modules.predicates import (is_context_initiator,
5 is_context_member,
6 is_context_moderator)
7 from adhocracy4.phases.predicates import phase_allows_create
8
9 from .models import MapIdea
10
11 rules.add_perm('euth_maps.propose_idea',
12 is_superuser | is_context_moderator | is_context_initiator |
13 (is_context_member & phase_allows_create(MapIdea)))
14
[end of euth/maps/rules.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/euth/maps/rules.py b/euth/maps/rules.py
--- a/euth/maps/rules.py
+++ b/euth/maps/rules.py
@@ -3,11 +3,36 @@
from adhocracy4.modules.predicates import (is_context_initiator,
is_context_member,
- is_context_moderator)
-from adhocracy4.phases.predicates import phase_allows_create
-
+ is_context_moderator,
+ is_owner,
+ is_public_context)
+from adhocracy4.phases.predicates import (phase_allows_comment,
+ phase_allows_create,
+ phase_allows_modify,
+ phase_allows_rate)
from .models import MapIdea
-rules.add_perm('euth_maps.propose_idea',
+
+rules.add_perm('euth_maps.rate_mapidea',
+ is_superuser | is_context_moderator | is_context_initiator |
+ (is_context_member & phase_allows_rate))
+
+
+rules.add_perm('euth_maps.comment_mapidea',
+ is_superuser | is_context_moderator | is_context_initiator |
+ (is_context_member & phase_allows_comment))
+
+
+rules.add_perm('euth_maps.modify_mapidea',
+ is_superuser | is_context_moderator | is_context_initiator |
+ (is_context_member & is_owner & phase_allows_modify))
+
+
+rules.add_perm('euth_maps.propose_mapidea',
is_superuser | is_context_moderator | is_context_initiator |
(is_context_member & phase_allows_create(MapIdea)))
+
+
+rules.add_perm('euth_maps.view_mapidea',
+ is_superuser | is_context_moderator | is_context_initiator |
+ is_context_member | is_public_context)
diff --git a/euth/maps/views.py b/euth/maps/views.py
--- a/euth/maps/views.py
+++ b/euth/maps/views.py
@@ -50,7 +50,7 @@
class MapIdeaCreateView(idea_views.IdeaCreateView):
model = MapIdea
form_class = forms.MapIdeaForm
- permission_required = 'euth_maps.propose_idea'
+ permission_required = 'euth_maps.propose_mapidea'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
@@ -60,6 +60,7 @@
class MapIdeaUpdateView(idea_views.IdeaUpdateView):
model = MapIdea
+ permission_required = 'euth_maps.modify_mapidea'
form_class = forms.MapIdeaForm
def get_form_kwargs(self):
@@ -70,10 +71,12 @@
class MapIdeaDeleteView(idea_views.IdeaDeleteView):
model = MapIdea
+ permission_required = 'euth_maps.modify_mapidea'
class MapIdeaDetailView(idea_views.IdeaDetailView):
model = MapIdea
+ permission_required = 'euth_maps.view_mapidea'
queryset = MapIdea.objects.annotate_positive_rating_count()\
.annotate_negative_rating_count()
| {"golden_diff": "diff --git a/euth/maps/rules.py b/euth/maps/rules.py\n--- a/euth/maps/rules.py\n+++ b/euth/maps/rules.py\n@@ -3,11 +3,36 @@\n \n from adhocracy4.modules.predicates import (is_context_initiator,\n is_context_member,\n- is_context_moderator)\n-from adhocracy4.phases.predicates import phase_allows_create\n-\n+ is_context_moderator,\n+ is_owner,\n+ is_public_context)\n+from adhocracy4.phases.predicates import (phase_allows_comment,\n+ phase_allows_create,\n+ phase_allows_modify,\n+ phase_allows_rate)\n from .models import MapIdea\n \n-rules.add_perm('euth_maps.propose_idea',\n+\n+rules.add_perm('euth_maps.rate_mapidea',\n+ is_superuser | is_context_moderator | is_context_initiator |\n+ (is_context_member & phase_allows_rate))\n+\n+\n+rules.add_perm('euth_maps.comment_mapidea',\n+ is_superuser | is_context_moderator | is_context_initiator |\n+ (is_context_member & phase_allows_comment))\n+\n+\n+rules.add_perm('euth_maps.modify_mapidea',\n+ is_superuser | is_context_moderator | is_context_initiator |\n+ (is_context_member & is_owner & phase_allows_modify))\n+\n+\n+rules.add_perm('euth_maps.propose_mapidea',\n is_superuser | is_context_moderator | is_context_initiator |\n (is_context_member & phase_allows_create(MapIdea)))\n+\n+\n+rules.add_perm('euth_maps.view_mapidea',\n+ is_superuser | is_context_moderator | is_context_initiator |\n+ is_context_member | is_public_context)\ndiff --git a/euth/maps/views.py b/euth/maps/views.py\n--- a/euth/maps/views.py\n+++ b/euth/maps/views.py\n@@ -50,7 +50,7 @@\n class MapIdeaCreateView(idea_views.IdeaCreateView):\n model = MapIdea\n form_class = forms.MapIdeaForm\n- permission_required = 'euth_maps.propose_idea'\n+ permission_required = 'euth_maps.propose_mapidea'\n \n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n@@ -60,6 +60,7 @@\n \n class MapIdeaUpdateView(idea_views.IdeaUpdateView):\n model = MapIdea\n+ permission_required = 'euth_maps.modify_mapidea'\n form_class = forms.MapIdeaForm\n \n def get_form_kwargs(self):\n@@ -70,10 +71,12 @@\n \n class MapIdeaDeleteView(idea_views.IdeaDeleteView):\n model = MapIdea\n+ permission_required = 'euth_maps.modify_mapidea'\n \n \n class MapIdeaDetailView(idea_views.IdeaDetailView):\n model = MapIdea\n+ permission_required = 'euth_maps.view_mapidea'\n queryset = MapIdea.objects.annotate_positive_rating_count()\\\n .annotate_negative_rating_count()\n", "issue": "Spatial Idea Challenge: No comments possible\nIn the blueprint Spatial Idea Challenge, in phase 1 comments cannot be made (be sure to test with normal user account). We need to fix the blueprint, apparently. See here https://opin-stage.liqd.net/de/maps/test-the-comments/ for an example.\n", "before_files": [{"content": "from django.conf import settings\nfrom easy_thumbnails.files import get_thumbnailer\n\nfrom euth.ideas import views as idea_views\n\nfrom . import forms\nfrom .models import MapIdea\n\n\nclass MapIdeaListView(idea_views.IdeaListView):\n model = MapIdea\n\n def dump_geojson(self):\n result = {}\n result['type'] = 'FeatureCollection'\n feature_list = []\n\n for item in self.get_queryset():\n\n url = ''\n\n if item.image:\n image = get_thumbnailer(item.image)['map_thumbnail']\n url = image.url\n\n properties = {\n 'name': item.name,\n 'slug': item.slug,\n 'image': url,\n 'comments_count': item.comment_count,\n 'positive_rating_count': item.positive_rating_count,\n 'negative_rating_count': item.negative_rating_count,\n 'url': item.get_absolute_url()\n }\n point_dict = item.point\n point_dict['properties'] = properties\n feature_list.append(point_dict)\n\n result['features'] = feature_list\n return result\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['mapideas_json'] = self.dump_geojson()\n context['map_url'] = settings.BASE_MAP\n context['polygon'] = self.module.settings_instance.polygon\n return context\n\n\nclass MapIdeaCreateView(idea_views.IdeaCreateView):\n model = MapIdea\n form_class = forms.MapIdeaForm\n permission_required = 'euth_maps.propose_idea'\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['settings_instance'] = self.module.settings_instance\n return kwargs\n\n\nclass MapIdeaUpdateView(idea_views.IdeaUpdateView):\n model = MapIdea\n form_class = forms.MapIdeaForm\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['settings_instance'] = self.object.module.settings_instance\n return kwargs\n\n\nclass MapIdeaDeleteView(idea_views.IdeaDeleteView):\n model = MapIdea\n\n\nclass MapIdeaDetailView(idea_views.IdeaDetailView):\n model = MapIdea\n queryset = MapIdea.objects.annotate_positive_rating_count()\\\n .annotate_negative_rating_count()\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['map_url'] = settings.BASE_MAP\n return context\n", "path": "euth/maps/views.py"}, {"content": "import rules\nfrom rules.predicates import is_superuser\n\nfrom adhocracy4.modules.predicates import (is_context_initiator,\n is_context_member,\n is_context_moderator)\nfrom adhocracy4.phases.predicates import phase_allows_create\n\nfrom .models import MapIdea\n\nrules.add_perm('euth_maps.propose_idea',\n is_superuser | is_context_moderator | is_context_initiator |\n (is_context_member & phase_allows_create(MapIdea)))\n", "path": "euth/maps/rules.py"}]} | 1,438 | 666 |
gh_patches_debug_26237 | rasdani/github-patches | git_diff | netket__netket-212 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Get rid of netket::Ising and other hard-coded hamiltonians in c++
In the spirit of #199 , we can safely remove the Ising and Heisenberg hamiltonians from the C++ classes, since those are nothing but specific cases of more general hamiltonians (basically they are just LocalOperators or even GraphOperator).
Convenient constructors can be defined in few lines of python, and moved to the python part of the library.
The BoseHubbard hamiltonian is a bit trickier, we can keep it as it is for the moment.
</issue>
<code>
[start of netket/operator.py]
1 from ._C_netket.operator import *
2
[end of netket/operator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netket/operator.py b/netket/operator.py
--- a/netket/operator.py
+++ b/netket/operator.py
@@ -1 +1,52 @@
from ._C_netket.operator import *
+import numpy as _np
+
+
+def Ising(hilbert, h, J=1.0):
+ """
+ Constructs a new ``Ising`` given a hilbert space, a transverse field,
+ and (if specified) a coupling constant.
+
+ Args:
+ hilbert: Hilbert space the operator acts on.
+ h: The strength of the transverse field.
+ J: The strength of the coupling. Default is 1.0.
+
+ Examples:
+ Constructs an ``Ising`` operator for a 1D system.
+
+ ```python
+ >>> import netket as nk
+ >>> g = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)
+ >>> hi = nk.hilbert.Spin(s=0.5, graph=g)
+ >>> op = nk.operator.Ising(h=1.321, hilbert=hi, J=0.5)
+ >>> print(op.hilbert.size)
+ 20
+ """
+ sigma_x = _np.array([[0, 1], [1, 0]])
+ sz_sz = _np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
+ return GraphOperator(hilbert, siteops=[-h * sigma_x], bondops=[J * sz_sz])
+
+
+def Heisenberg(hilbert):
+ """
+ Constructs a new ``Heisenberg`` given a hilbert space.
+ Args:
+ hilbert: Hilbert space the operator acts on.
+ Examples:
+ Constructs a ``Heisenberg`` operator for a 1D system.
+ ```python
+ >>> import netket as nk
+ >>> g = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)
+ >>> hi = nk.hilbert.Spin(s=0.5, total_sz=0, graph=g)
+ >>> op = nk.operator.Heisenberg(hilbert=hi)
+ >>> print(op.hilbert.size)
+ 20
+ """
+ sz_sz = _np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
+ exchange = _np.array([[0, 0, 0, 0], [0, 0, 2, 0], [0, 2, 0, 0], [0, 0, 0, 0]])
+ if hilbert.graph.is_bipartite:
+ heis_term = sz_sz - exchange
+ else:
+ heis_term = sz_sz + exchange
+ return GraphOperator(hilbert, bondops=[heis_term])
| {"golden_diff": "diff --git a/netket/operator.py b/netket/operator.py\n--- a/netket/operator.py\n+++ b/netket/operator.py\n@@ -1 +1,52 @@\n from ._C_netket.operator import *\n+import numpy as _np\n+\n+\n+def Ising(hilbert, h, J=1.0):\n+ \"\"\"\n+ Constructs a new ``Ising`` given a hilbert space, a transverse field,\n+ and (if specified) a coupling constant.\n+\n+ Args:\n+ hilbert: Hilbert space the operator acts on.\n+ h: The strength of the transverse field.\n+ J: The strength of the coupling. Default is 1.0.\n+\n+ Examples:\n+ Constructs an ``Ising`` operator for a 1D system.\n+\n+ ```python\n+ >>> import netket as nk\n+ >>> g = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)\n+ >>> hi = nk.hilbert.Spin(s=0.5, graph=g)\n+ >>> op = nk.operator.Ising(h=1.321, hilbert=hi, J=0.5)\n+ >>> print(op.hilbert.size)\n+ 20\n+ \"\"\"\n+ sigma_x = _np.array([[0, 1], [1, 0]])\n+ sz_sz = _np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n+ return GraphOperator(hilbert, siteops=[-h * sigma_x], bondops=[J * sz_sz])\n+\n+\n+def Heisenberg(hilbert):\n+ \"\"\"\n+ Constructs a new ``Heisenberg`` given a hilbert space.\n+ Args:\n+ hilbert: Hilbert space the operator acts on.\n+ Examples:\n+ Constructs a ``Heisenberg`` operator for a 1D system.\n+ ```python\n+ >>> import netket as nk\n+ >>> g = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)\n+ >>> hi = nk.hilbert.Spin(s=0.5, total_sz=0, graph=g)\n+ >>> op = nk.operator.Heisenberg(hilbert=hi)\n+ >>> print(op.hilbert.size)\n+ 20\n+ \"\"\"\n+ sz_sz = _np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n+ exchange = _np.array([[0, 0, 0, 0], [0, 0, 2, 0], [0, 2, 0, 0], [0, 0, 0, 0]])\n+ if hilbert.graph.is_bipartite:\n+ heis_term = sz_sz - exchange\n+ else:\n+ heis_term = sz_sz + exchange\n+ return GraphOperator(hilbert, bondops=[heis_term])\n", "issue": "Get rid of netket::Ising and other hard-coded hamiltonians in c++\nIn the spirit of #199 , we can safely remove the Ising and Heisenberg hamiltonians from the C++ classes, since those are nothing but specific cases of more general hamiltonians (basically they are just LocalOperators or even GraphOperator). \r\nConvenient constructors can be defined in few lines of python, and moved to the python part of the library. \r\n\r\nThe BoseHubbard hamiltonian is a bit trickier, we can keep it as it is for the moment. \n", "before_files": [{"content": "from ._C_netket.operator import *\n", "path": "netket/operator.py"}]} | 666 | 712 |
gh_patches_debug_37010 | rasdani/github-patches | git_diff | mirumee__ariadne-565 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Federated schemas should not require at least one query to be implemented
In a Federated environment, the Gateway instantiates the Query type by default. This means that an implementing services should _not_ be required to implement or extend a query.
# Ideal Scenario
This is an example scenario of what is valid in Node and Java implementations. For example, it should be valid to expose a service that exposes no root queries, but only the federated query fields, like below.
Produced Query type:
**Example**: This is what the schemas would look like for two federated services:
## Product Service
product/schema.gql
```gql
extend type Query {
products: [Product]
}
type Product {
id: ID!
name: String
reviews: [ProductReview]
}
extend type ProductReview @key(fields: "id") {
id: ID! @external
}
```
**Output**:
```
products: [Product]
_entities(representations: [_Any]): [_Entity]
_service: _Service
```
## Review Service
review/schema.gql
```gql
# Notice how we don't have to extend the Query type
type ProductReview @key(fields: "id") {
id: ID!
comment: String!
}
```
**Output**:
This should be valid.
```
_entities(representations: [_Any]): [_Entity]
_service: _Service
```
# Breaking Scenario
When attempting to implement the `ProductReview` service (see example above) without extending the Query type, Ariadne will fail to [generate a federated schema](https://github.com/mirumee/ariadne/blob/master/ariadne/contrib/federation/schema.py#L57). This is because `make_executable_schema` attempts to generate a federated schema by [extending a Query type](https://github.com/mirumee/ariadne/blob/master/ariadne/contrib/federation/schema.py#L24) with the assumption that a Query type has been defined, which technically it isn't.
</issue>
<code>
[start of ariadne/contrib/federation/schema.py]
1 from typing import Dict, List, Type, Union, cast
2
3 from graphql import extend_schema, parse
4 from graphql.language import DocumentNode
5 from graphql.type import (
6 GraphQLObjectType,
7 GraphQLSchema,
8 GraphQLUnionType,
9 )
10
11 from ...executable_schema import make_executable_schema, join_type_defs
12 from ...schema_visitor import SchemaDirectiveVisitor
13 from ...types import SchemaBindable
14 from .utils import get_entity_types, purge_schema_directives, resolve_entities
15
16
17 federation_service_type_defs = """
18 scalar _Any
19
20 type _Service {
21 sdl: String
22 }
23
24 extend type Query {
25 _service: _Service!
26 }
27
28 directive @external on FIELD_DEFINITION
29 directive @requires(fields: String!) on FIELD_DEFINITION
30 directive @provides(fields: String!) on FIELD_DEFINITION
31 directive @key(fields: String!) repeatable on OBJECT | INTERFACE
32 directive @extends on OBJECT | INTERFACE
33 """
34
35 federation_entity_type_defs = """
36 union _Entity
37
38 extend type Query {
39 _entities(representations: [_Any!]!): [_Entity]!
40 }
41 """
42
43
44 def make_federated_schema(
45 type_defs: Union[str, List[str]],
46 *bindables: Union[SchemaBindable, List[SchemaBindable]],
47 directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,
48 ) -> GraphQLSchema:
49 if isinstance(type_defs, list):
50 type_defs = join_type_defs(type_defs)
51
52 # Remove custom schema directives (to avoid apollo-gateway crashes).
53 # NOTE: This does NOT interfere with ariadne's directives support.
54 sdl = purge_schema_directives(type_defs)
55
56 type_defs = join_type_defs([type_defs, federation_service_type_defs])
57 schema = make_executable_schema(
58 type_defs,
59 *bindables,
60 directives=directives,
61 )
62
63 # Parse through the schema to find all entities with key directive.
64 entity_types = get_entity_types(schema)
65 has_entities = len(entity_types) > 0
66
67 # Add the federation type definitions.
68 if has_entities:
69 schema = extend_federated_schema(
70 schema,
71 parse(federation_entity_type_defs),
72 )
73
74 # Add _entities query.
75 entity_type = schema.get_type("_Entity")
76 if entity_type:
77 entity_type = cast(GraphQLUnionType, entity_type)
78 entity_type.types = entity_types
79
80 query_type = schema.get_type("Query")
81 if query_type:
82 query_type = cast(GraphQLObjectType, query_type)
83 query_type.fields["_entities"].resolve = resolve_entities
84
85 # Add _service query.
86 query_type = schema.get_type("Query")
87 if query_type:
88 query_type = cast(GraphQLObjectType, query_type)
89 query_type.fields["_service"].resolve = lambda _service, info: {"sdl": sdl}
90
91 return schema
92
93
94 def extend_federated_schema(
95 schema: GraphQLSchema,
96 document_ast: DocumentNode,
97 assume_valid: bool = False,
98 assume_valid_sdl: bool = False,
99 ) -> GraphQLSchema:
100 extended_schema = extend_schema(
101 schema,
102 document_ast,
103 assume_valid,
104 assume_valid_sdl,
105 )
106
107 for (k, v) in schema.type_map.items():
108 resolve_reference = getattr(v, "__resolve_reference__", None)
109 if resolve_reference and k in extended_schema.type_map:
110 setattr(
111 extended_schema.type_map[k],
112 "__resolve_reference__",
113 resolve_reference,
114 )
115
116 return extended_schema
117
[end of ariadne/contrib/federation/schema.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ariadne/contrib/federation/schema.py b/ariadne/contrib/federation/schema.py
--- a/ariadne/contrib/federation/schema.py
+++ b/ariadne/contrib/federation/schema.py
@@ -2,6 +2,7 @@
from graphql import extend_schema, parse
from graphql.language import DocumentNode
+from graphql.language.ast import ObjectTypeDefinitionNode
from graphql.type import (
GraphQLObjectType,
GraphQLSchema,
@@ -17,13 +18,13 @@
federation_service_type_defs = """
scalar _Any
- type _Service {
+ type _Service {{
sdl: String
- }
+ }}
- extend type Query {
+ {type_token} Query {{
_service: _Service!
- }
+ }}
directive @external on FIELD_DEFINITION
directive @requires(fields: String!) on FIELD_DEFINITION
@@ -41,6 +42,17 @@
"""
+def has_query_type(type_defs: str) -> bool:
+ ast_document = parse(type_defs)
+ for definition in ast_document.definitions:
+ if (
+ isinstance(definition, ObjectTypeDefinitionNode)
+ and definition.name.value == "Query"
+ ):
+ return True
+ return False
+
+
def make_federated_schema(
type_defs: Union[str, List[str]],
*bindables: Union[SchemaBindable, List[SchemaBindable]],
@@ -52,8 +64,10 @@
# Remove custom schema directives (to avoid apollo-gateway crashes).
# NOTE: This does NOT interfere with ariadne's directives support.
sdl = purge_schema_directives(type_defs)
+ type_token = "extend type" if has_query_type(sdl) else "type"
+ federation_service_type = federation_service_type_defs.format(type_token=type_token)
- type_defs = join_type_defs([type_defs, federation_service_type_defs])
+ type_defs = join_type_defs([type_defs, federation_service_type])
schema = make_executable_schema(
type_defs,
*bindables,
@@ -66,10 +80,7 @@
# Add the federation type definitions.
if has_entities:
- schema = extend_federated_schema(
- schema,
- parse(federation_entity_type_defs),
- )
+ schema = extend_federated_schema(schema, parse(federation_entity_type_defs))
# Add _entities query.
entity_type = schema.get_type("_Entity")
| {"golden_diff": "diff --git a/ariadne/contrib/federation/schema.py b/ariadne/contrib/federation/schema.py\n--- a/ariadne/contrib/federation/schema.py\n+++ b/ariadne/contrib/federation/schema.py\n@@ -2,6 +2,7 @@\n \n from graphql import extend_schema, parse\n from graphql.language import DocumentNode\n+from graphql.language.ast import ObjectTypeDefinitionNode\n from graphql.type import (\n GraphQLObjectType,\n GraphQLSchema,\n@@ -17,13 +18,13 @@\n federation_service_type_defs = \"\"\"\n scalar _Any\n \n- type _Service {\n+ type _Service {{\n sdl: String\n- }\n+ }}\n \n- extend type Query {\n+ {type_token} Query {{\n _service: _Service!\n- }\n+ }}\n \n directive @external on FIELD_DEFINITION\n directive @requires(fields: String!) on FIELD_DEFINITION\n@@ -41,6 +42,17 @@\n \"\"\"\n \n \n+def has_query_type(type_defs: str) -> bool:\n+ ast_document = parse(type_defs)\n+ for definition in ast_document.definitions:\n+ if (\n+ isinstance(definition, ObjectTypeDefinitionNode)\n+ and definition.name.value == \"Query\"\n+ ):\n+ return True\n+ return False\n+\n+\n def make_federated_schema(\n type_defs: Union[str, List[str]],\n *bindables: Union[SchemaBindable, List[SchemaBindable]],\n@@ -52,8 +64,10 @@\n # Remove custom schema directives (to avoid apollo-gateway crashes).\n # NOTE: This does NOT interfere with ariadne's directives support.\n sdl = purge_schema_directives(type_defs)\n+ type_token = \"extend type\" if has_query_type(sdl) else \"type\"\n+ federation_service_type = federation_service_type_defs.format(type_token=type_token)\n \n- type_defs = join_type_defs([type_defs, federation_service_type_defs])\n+ type_defs = join_type_defs([type_defs, federation_service_type])\n schema = make_executable_schema(\n type_defs,\n *bindables,\n@@ -66,10 +80,7 @@\n \n # Add the federation type definitions.\n if has_entities:\n- schema = extend_federated_schema(\n- schema,\n- parse(federation_entity_type_defs),\n- )\n+ schema = extend_federated_schema(schema, parse(federation_entity_type_defs))\n \n # Add _entities query.\n entity_type = schema.get_type(\"_Entity\")\n", "issue": "Federated schemas should not require at least one query to be implemented\nIn a Federated environment, the Gateway instantiates the Query type by default. This means that an implementing services should _not_ be required to implement or extend a query. \r\n\r\n# Ideal Scenario\r\nThis is an example scenario of what is valid in Node and Java implementations. For example, it should be valid to expose a service that exposes no root queries, but only the federated query fields, like below.\r\n\r\nProduced Query type:\r\n\r\n\r\n**Example**: This is what the schemas would look like for two federated services:\r\n## Product Service\r\nproduct/schema.gql\r\n```gql\r\nextend type Query {\r\n products: [Product]\r\n}\r\n\r\ntype Product {\r\n id: ID!\r\n name: String\r\n reviews: [ProductReview]\r\n}\r\n\r\nextend type ProductReview @key(fields: \"id\") {\r\n id: ID! @external\r\n}\r\n```\r\n**Output**:\r\n\r\n```\r\nproducts: [Product]\r\n_entities(representations: [_Any]): [_Entity]\r\n_service: _Service\r\n```\r\n\r\n## Review Service\r\nreview/schema.gql\r\n```gql\r\n# Notice how we don't have to extend the Query type\r\ntype ProductReview @key(fields: \"id\") {\r\n id: ID!\r\n comment: String!\r\n}\r\n```\r\n\r\n**Output**:\r\nThis should be valid.\r\n```\r\n_entities(representations: [_Any]): [_Entity]\r\n_service: _Service\r\n```\r\n\r\n\r\n# Breaking Scenario\r\nWhen attempting to implement the `ProductReview` service (see example above) without extending the Query type, Ariadne will fail to [generate a federated schema](https://github.com/mirumee/ariadne/blob/master/ariadne/contrib/federation/schema.py#L57). This is because `make_executable_schema` attempts to generate a federated schema by [extending a Query type](https://github.com/mirumee/ariadne/blob/master/ariadne/contrib/federation/schema.py#L24) with the assumption that a Query type has been defined, which technically it isn't. \r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Dict, List, Type, Union, cast\n\nfrom graphql import extend_schema, parse\nfrom graphql.language import DocumentNode\nfrom graphql.type import (\n GraphQLObjectType,\n GraphQLSchema,\n GraphQLUnionType,\n)\n\nfrom ...executable_schema import make_executable_schema, join_type_defs\nfrom ...schema_visitor import SchemaDirectiveVisitor\nfrom ...types import SchemaBindable\nfrom .utils import get_entity_types, purge_schema_directives, resolve_entities\n\n\nfederation_service_type_defs = \"\"\"\n scalar _Any\n\n type _Service {\n sdl: String\n }\n\n extend type Query {\n _service: _Service!\n }\n\n directive @external on FIELD_DEFINITION\n directive @requires(fields: String!) on FIELD_DEFINITION\n directive @provides(fields: String!) on FIELD_DEFINITION\n directive @key(fields: String!) repeatable on OBJECT | INTERFACE\n directive @extends on OBJECT | INTERFACE\n\"\"\"\n\nfederation_entity_type_defs = \"\"\"\n union _Entity\n\n extend type Query {\n _entities(representations: [_Any!]!): [_Entity]!\n }\n\"\"\"\n\n\ndef make_federated_schema(\n type_defs: Union[str, List[str]],\n *bindables: Union[SchemaBindable, List[SchemaBindable]],\n directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n # Remove custom schema directives (to avoid apollo-gateway crashes).\n # NOTE: This does NOT interfere with ariadne's directives support.\n sdl = purge_schema_directives(type_defs)\n\n type_defs = join_type_defs([type_defs, federation_service_type_defs])\n schema = make_executable_schema(\n type_defs,\n *bindables,\n directives=directives,\n )\n\n # Parse through the schema to find all entities with key directive.\n entity_types = get_entity_types(schema)\n has_entities = len(entity_types) > 0\n\n # Add the federation type definitions.\n if has_entities:\n schema = extend_federated_schema(\n schema,\n parse(federation_entity_type_defs),\n )\n\n # Add _entities query.\n entity_type = schema.get_type(\"_Entity\")\n if entity_type:\n entity_type = cast(GraphQLUnionType, entity_type)\n entity_type.types = entity_types\n\n query_type = schema.get_type(\"Query\")\n if query_type:\n query_type = cast(GraphQLObjectType, query_type)\n query_type.fields[\"_entities\"].resolve = resolve_entities\n\n # Add _service query.\n query_type = schema.get_type(\"Query\")\n if query_type:\n query_type = cast(GraphQLObjectType, query_type)\n query_type.fields[\"_service\"].resolve = lambda _service, info: {\"sdl\": sdl}\n\n return schema\n\n\ndef extend_federated_schema(\n schema: GraphQLSchema,\n document_ast: DocumentNode,\n assume_valid: bool = False,\n assume_valid_sdl: bool = False,\n) -> GraphQLSchema:\n extended_schema = extend_schema(\n schema,\n document_ast,\n assume_valid,\n assume_valid_sdl,\n )\n\n for (k, v) in schema.type_map.items():\n resolve_reference = getattr(v, \"__resolve_reference__\", None)\n if resolve_reference and k in extended_schema.type_map:\n setattr(\n extended_schema.type_map[k],\n \"__resolve_reference__\",\n resolve_reference,\n )\n\n return extended_schema\n", "path": "ariadne/contrib/federation/schema.py"}]} | 1,968 | 548 |
gh_patches_debug_34083 | rasdani/github-patches | git_diff | Nitrate__Nitrate-532 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Replace deprecated inspect.getargspec call in log_call
`inspect.getargspec` has been deprecated. Replace it with equivalent function call.
</issue>
<code>
[start of src/tcms/xmlrpc/decorators.py]
1 # -*- coding: utf-8 -*-
2
3 import inspect
4 import logging
5
6 from functools import wraps
7
8 from django.conf import settings
9 from kobo.django.xmlrpc.models import XmlRpcLog
10
11
12 __all__ = ('log_call',)
13
14 logger = logging.getLogger('nitrate.xmlrpc')
15
16 if settings.DEBUG:
17 # To avoid pollute XMLRPC logs with those generated during development
18 def create_log(user, method, args):
19 log_msg = 'user: {}, method: {}, args: {}'.format(
20 user.username if hasattr(user, 'username') else user,
21 method,
22 args)
23 logger.debug(log_msg)
24 else:
25 create_log = XmlRpcLog.objects.create
26
27
28 def log_call(*args, **kwargs):
29 """Log XMLRPC-specific invocations
30
31 This is copied from kobo.django.xmlrpc.decorators to add custom abitlities,
32 so that we don't have to wait upstream to make the changes.
33
34 Usage::
35
36 from tcms.core.decorators import log_call
37 @log_call(namespace='TestNamespace')
38 def func(request):
39 return None
40 """
41 namespace = kwargs.get('namespace', '')
42 if namespace:
43 namespace = namespace + '.'
44
45 def decorator(function):
46 argspec = inspect.getargspec(function)
47 # Each XMLRPC method has an HttpRequest argument as the first one,
48 # it'll be ignored in the log.
49 arg_names = argspec.args[1:]
50
51 @wraps(function)
52 def _new_function(request, *args, **kwargs):
53 try:
54 known_args = zip(arg_names, args)
55 unknown_args = list(enumerate(args[len(arg_names):]))
56 keyword_args = [(key, value) for key, value in
57 kwargs.items()
58 if (key, value) not in known_args]
59
60 create_log(user=request.user,
61 method=f'{namespace}{function.__name__}',
62 args=str(known_args + unknown_args + keyword_args))
63 except Exception:
64 pass
65 return function(request, *args, **kwargs)
66
67 return _new_function
68
69 return decorator
70
[end of src/tcms/xmlrpc/decorators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/tcms/xmlrpc/decorators.py b/src/tcms/xmlrpc/decorators.py
--- a/src/tcms/xmlrpc/decorators.py
+++ b/src/tcms/xmlrpc/decorators.py
@@ -28,7 +28,7 @@
def log_call(*args, **kwargs):
"""Log XMLRPC-specific invocations
- This is copied from kobo.django.xmlrpc.decorators to add custom abitlities,
+ This is copied from kobo.django.xmlrpc.decorators to add custom abilities,
so that we don't have to wait upstream to make the changes.
Usage::
@@ -43,7 +43,7 @@
namespace = namespace + '.'
def decorator(function):
- argspec = inspect.getargspec(function)
+ argspec = inspect.getfullargspec(function)
# Each XMLRPC method has an HttpRequest argument as the first one,
# it'll be ignored in the log.
arg_names = argspec.args[1:]
@@ -51,17 +51,19 @@
@wraps(function)
def _new_function(request, *args, **kwargs):
try:
- known_args = zip(arg_names, args)
+ known_args = list(zip(arg_names, args))
unknown_args = list(enumerate(args[len(arg_names):]))
- keyword_args = [(key, value) for key, value in
- kwargs.items()
- if (key, value) not in known_args]
+ keyword_args = [
+ (key, value) for key, value in kwargs.items()
+ if (key, value) not in known_args
+ ]
create_log(user=request.user,
method=f'{namespace}{function.__name__}',
args=str(known_args + unknown_args + keyword_args))
except Exception:
- pass
+ logger.exception(
+ f'Fail to log XMLRPC call on {function.__name__}')
return function(request, *args, **kwargs)
return _new_function
| {"golden_diff": "diff --git a/src/tcms/xmlrpc/decorators.py b/src/tcms/xmlrpc/decorators.py\n--- a/src/tcms/xmlrpc/decorators.py\n+++ b/src/tcms/xmlrpc/decorators.py\n@@ -28,7 +28,7 @@\n def log_call(*args, **kwargs):\n \"\"\"Log XMLRPC-specific invocations\n \n- This is copied from kobo.django.xmlrpc.decorators to add custom abitlities,\n+ This is copied from kobo.django.xmlrpc.decorators to add custom abilities,\n so that we don't have to wait upstream to make the changes.\n \n Usage::\n@@ -43,7 +43,7 @@\n namespace = namespace + '.'\n \n def decorator(function):\n- argspec = inspect.getargspec(function)\n+ argspec = inspect.getfullargspec(function)\n # Each XMLRPC method has an HttpRequest argument as the first one,\n # it'll be ignored in the log.\n arg_names = argspec.args[1:]\n@@ -51,17 +51,19 @@\n @wraps(function)\n def _new_function(request, *args, **kwargs):\n try:\n- known_args = zip(arg_names, args)\n+ known_args = list(zip(arg_names, args))\n unknown_args = list(enumerate(args[len(arg_names):]))\n- keyword_args = [(key, value) for key, value in\n- kwargs.items()\n- if (key, value) not in known_args]\n+ keyword_args = [\n+ (key, value) for key, value in kwargs.items()\n+ if (key, value) not in known_args\n+ ]\n \n create_log(user=request.user,\n method=f'{namespace}{function.__name__}',\n args=str(known_args + unknown_args + keyword_args))\n except Exception:\n- pass\n+ logger.exception(\n+ f'Fail to log XMLRPC call on {function.__name__}')\n return function(request, *args, **kwargs)\n \n return _new_function\n", "issue": "Replace deprecated inspect.getargspec call in log_call\n`inspect.getargspec` has been deprecated. Replace it with equivalent function call.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport inspect\nimport logging\n\nfrom functools import wraps\n\nfrom django.conf import settings\nfrom kobo.django.xmlrpc.models import XmlRpcLog\n\n\n__all__ = ('log_call',)\n\nlogger = logging.getLogger('nitrate.xmlrpc')\n\nif settings.DEBUG:\n # To avoid pollute XMLRPC logs with those generated during development\n def create_log(user, method, args):\n log_msg = 'user: {}, method: {}, args: {}'.format(\n user.username if hasattr(user, 'username') else user,\n method,\n args)\n logger.debug(log_msg)\nelse:\n create_log = XmlRpcLog.objects.create\n\n\ndef log_call(*args, **kwargs):\n \"\"\"Log XMLRPC-specific invocations\n\n This is copied from kobo.django.xmlrpc.decorators to add custom abitlities,\n so that we don't have to wait upstream to make the changes.\n\n Usage::\n\n from tcms.core.decorators import log_call\n @log_call(namespace='TestNamespace')\n def func(request):\n return None\n \"\"\"\n namespace = kwargs.get('namespace', '')\n if namespace:\n namespace = namespace + '.'\n\n def decorator(function):\n argspec = inspect.getargspec(function)\n # Each XMLRPC method has an HttpRequest argument as the first one,\n # it'll be ignored in the log.\n arg_names = argspec.args[1:]\n\n @wraps(function)\n def _new_function(request, *args, **kwargs):\n try:\n known_args = zip(arg_names, args)\n unknown_args = list(enumerate(args[len(arg_names):]))\n keyword_args = [(key, value) for key, value in\n kwargs.items()\n if (key, value) not in known_args]\n\n create_log(user=request.user,\n method=f'{namespace}{function.__name__}',\n args=str(known_args + unknown_args + keyword_args))\n except Exception:\n pass\n return function(request, *args, **kwargs)\n\n return _new_function\n\n return decorator\n", "path": "src/tcms/xmlrpc/decorators.py"}]} | 1,149 | 443 |
gh_patches_debug_5812 | rasdani/github-patches | git_diff | wagtail__wagtail-713 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo in wagtailforms.forms.FormBuilder.formfields
Line 86:
``` python
raise Exception("Unrecognised field type: " + form.field_type)
```
This raises `NameError: global name 'form' is not defined`
Ordinarily the flow of things wouldn't run into this line, but I was caught out when badly naming a form field in the json fixtures.
</issue>
<code>
[start of wagtail/wagtailforms/forms.py]
1 import django.forms
2 from django.utils.datastructures import SortedDict
3
4
5 class BaseForm(django.forms.Form):
6 def __init__(self, *args, **kwargs):
7 kwargs.setdefault('label_suffix', '')
8 return super(BaseForm, self).__init__(*args, **kwargs)
9
10
11 class FormBuilder(object):
12 def __init__(self, fields):
13 self.fields = fields
14
15 def create_singleline_field(self, field, options):
16 # TODO: This is a default value - it may need to be changed
17 options['max_length'] = 255
18 return django.forms.CharField(**options)
19
20 def create_multiline_field(self, field, options):
21 return django.forms.CharField(widget=django.forms.Textarea, **options)
22
23 def create_date_field(self, field, options):
24 return django.forms.DateField(**options)
25
26 def create_datetime_field(self, field, options):
27 return django.forms.DateTimeField(**options)
28
29 def create_email_field(self, field, options):
30 return django.forms.EmailField(**options)
31
32 def create_url_field(self, field, options):
33 return django.forms.URLField(**options)
34
35 def create_number_field(self, field, options):
36 return django.forms.DecimalField(**options)
37
38 def create_dropdown_field(self, field, options):
39 options['choices'] = map(
40 lambda x: (x.strip(), x.strip()),
41 field.choices.split(',')
42 )
43 return django.forms.ChoiceField(**options)
44
45 def create_radio_field(self, field, options):
46 options['choices'] = map(
47 lambda x: (x.strip(), x.strip()),
48 field.choices.split(',')
49 )
50 return django.forms.ChoiceField(widget=django.forms.RadioSelect, **options)
51
52 def create_checkboxes_field(self, field, options):
53 options['choices'] = [(x.strip(), x.strip()) for x in field.choices.split(',')]
54 options['initial'] = [x.strip() for x in field.default_value.split(',')]
55 return django.forms.MultipleChoiceField(
56 widget=django.forms.CheckboxSelectMultiple, **options
57 )
58
59 def create_checkbox_field(self, field, options):
60 return django.forms.BooleanField(**options)
61
62 FIELD_TYPES = {
63 'singleline': create_singleline_field,
64 'multiline': create_multiline_field,
65 'date': create_date_field,
66 'datetime': create_datetime_field,
67 'email': create_email_field,
68 'url': create_url_field,
69 'number': create_number_field,
70 'dropdown': create_dropdown_field,
71 'radio': create_radio_field,
72 'checkboxes': create_checkboxes_field,
73 'checkbox': create_checkbox_field,
74 }
75
76 @property
77 def formfields(self):
78 formfields = SortedDict()
79
80 for field in self.fields:
81 options = self.get_field_options(field)
82
83 if field.field_type in self.FIELD_TYPES:
84 formfields[field.clean_name] = self.FIELD_TYPES[field.field_type](self, field, options)
85 else:
86 raise Exception("Unrecognised field type: " + form.field_type)
87
88 return formfields
89
90 def get_field_options(self, field):
91 options = {}
92 options['label'] = field.label
93 options['help_text'] = field.help_text
94 options['required'] = field.required
95 options['initial'] = field.default_value
96 return options
97
98 def get_form_class(self):
99 return type('WagtailForm', (BaseForm,), self.formfields)
100
101
102 class SelectDateForm(django.forms.Form):
103 date_from = django.forms.DateTimeField(
104 required=False,
105 widget=django.forms.DateInput(attrs={'placeholder': 'Date from'})
106 )
107 date_to = django.forms.DateTimeField(
108 required=False,
109 widget=django.forms.DateInput(attrs={'placeholder': 'Date to'})
110 )
111
[end of wagtail/wagtailforms/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/wagtailforms/forms.py b/wagtail/wagtailforms/forms.py
--- a/wagtail/wagtailforms/forms.py
+++ b/wagtail/wagtailforms/forms.py
@@ -83,7 +83,7 @@
if field.field_type in self.FIELD_TYPES:
formfields[field.clean_name] = self.FIELD_TYPES[field.field_type](self, field, options)
else:
- raise Exception("Unrecognised field type: " + form.field_type)
+ raise Exception("Unrecognised field type: " + field.field_type)
return formfields
| {"golden_diff": "diff --git a/wagtail/wagtailforms/forms.py b/wagtail/wagtailforms/forms.py\n--- a/wagtail/wagtailforms/forms.py\n+++ b/wagtail/wagtailforms/forms.py\n@@ -83,7 +83,7 @@\n if field.field_type in self.FIELD_TYPES:\n formfields[field.clean_name] = self.FIELD_TYPES[field.field_type](self, field, options)\n else:\n- raise Exception(\"Unrecognised field type: \" + form.field_type)\n+ raise Exception(\"Unrecognised field type: \" + field.field_type)\n \n return formfields\n", "issue": "Typo in wagtailforms.forms.FormBuilder.formfields\nLine 86:\n\n``` python\nraise Exception(\"Unrecognised field type: \" + form.field_type)\n```\n\nThis raises `NameError: global name 'form' is not defined`\n\nOrdinarily the flow of things wouldn't run into this line, but I was caught out when badly naming a form field in the json fixtures.\n\n", "before_files": [{"content": "import django.forms\nfrom django.utils.datastructures import SortedDict\n\n\nclass BaseForm(django.forms.Form):\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('label_suffix', '')\n return super(BaseForm, self).__init__(*args, **kwargs)\n\n\nclass FormBuilder(object):\n def __init__(self, fields):\n self.fields = fields\n\n def create_singleline_field(self, field, options):\n # TODO: This is a default value - it may need to be changed\n options['max_length'] = 255\n return django.forms.CharField(**options)\n\n def create_multiline_field(self, field, options):\n return django.forms.CharField(widget=django.forms.Textarea, **options)\n\n def create_date_field(self, field, options):\n return django.forms.DateField(**options)\n\n def create_datetime_field(self, field, options):\n return django.forms.DateTimeField(**options)\n\n def create_email_field(self, field, options):\n return django.forms.EmailField(**options)\n\n def create_url_field(self, field, options):\n return django.forms.URLField(**options)\n\n def create_number_field(self, field, options):\n return django.forms.DecimalField(**options)\n\n def create_dropdown_field(self, field, options):\n options['choices'] = map(\n lambda x: (x.strip(), x.strip()),\n field.choices.split(',')\n )\n return django.forms.ChoiceField(**options)\n\n def create_radio_field(self, field, options):\n options['choices'] = map(\n lambda x: (x.strip(), x.strip()),\n field.choices.split(',')\n )\n return django.forms.ChoiceField(widget=django.forms.RadioSelect, **options)\n\n def create_checkboxes_field(self, field, options):\n options['choices'] = [(x.strip(), x.strip()) for x in field.choices.split(',')]\n options['initial'] = [x.strip() for x in field.default_value.split(',')]\n return django.forms.MultipleChoiceField(\n widget=django.forms.CheckboxSelectMultiple, **options\n )\n\n def create_checkbox_field(self, field, options):\n return django.forms.BooleanField(**options)\n\n FIELD_TYPES = {\n 'singleline': create_singleline_field,\n 'multiline': create_multiline_field,\n 'date': create_date_field,\n 'datetime': create_datetime_field,\n 'email': create_email_field,\n 'url': create_url_field,\n 'number': create_number_field,\n 'dropdown': create_dropdown_field,\n 'radio': create_radio_field,\n 'checkboxes': create_checkboxes_field,\n 'checkbox': create_checkbox_field,\n }\n\n @property\n def formfields(self):\n formfields = SortedDict()\n\n for field in self.fields:\n options = self.get_field_options(field)\n\n if field.field_type in self.FIELD_TYPES:\n formfields[field.clean_name] = self.FIELD_TYPES[field.field_type](self, field, options)\n else:\n raise Exception(\"Unrecognised field type: \" + form.field_type)\n\n return formfields\n\n def get_field_options(self, field):\n options = {}\n options['label'] = field.label\n options['help_text'] = field.help_text\n options['required'] = field.required\n options['initial'] = field.default_value\n return options\n\n def get_form_class(self):\n return type('WagtailForm', (BaseForm,), self.formfields)\n\n\nclass SelectDateForm(django.forms.Form):\n date_from = django.forms.DateTimeField(\n required=False,\n widget=django.forms.DateInput(attrs={'placeholder': 'Date from'})\n )\n date_to = django.forms.DateTimeField(\n required=False,\n widget=django.forms.DateInput(attrs={'placeholder': 'Date to'})\n )\n", "path": "wagtail/wagtailforms/forms.py"}]} | 1,654 | 135 |
gh_patches_debug_29536 | rasdani/github-patches | git_diff | fossasia__open-event-server-2067 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Internal server error in promotion link
https://open-event-dev.herokuapp.com/events/110/tickets/promotion/
</issue>
<code>
[start of app/views/admin/models_views/ticket_sales.py]
1 import flask_login
2 import pycountry
3 from flask import redirect
4 from flask import request
5 from flask import url_for
6 from flask_admin import BaseView, expose
7
8 from app import get_settings
9 from app.helpers.cache import cache
10 from app.helpers.data_getter import DataGetter
11 from app.helpers.ticketing import TicketingManager
12 from app.models.ticket import Ticket
13
14 class TicketSalesView(BaseView):
15 @cache.memoize(50)
16 def get_ticket(self, ticket_id):
17 return Ticket.query.get(ticket_id)
18
19 @expose('/')
20 @flask_login.login_required
21 def display_ticket_stats(self, event_id):
22 event = DataGetter.get_event(event_id)
23 orders = TicketingManager.get_orders(event_id)
24
25 completed_count = 0
26 completed_amount = 0
27 tickets_count = 0
28
29 orders_summary = {
30 'completed': {
31 'class': 'success',
32 'tickets_count': 0,
33 'orders_count': 0,
34 'total_sales': 0
35 },
36 'pending': {
37 'class': 'warning',
38 'tickets_count': 0,
39 'orders_count': 0,
40 'total_sales': 0
41 },
42 'expired': {
43 'class': 'danger',
44 'tickets_count': 0,
45 'orders_count': 0,
46 'total_sales': 0
47 }
48 }
49
50 tickets_summary = {}
51
52 for ticket in event.tickets:
53 tickets_summary[str(ticket.id)] = {
54 'name': ticket.name,
55 'quantity': ticket.quantity,
56 'completed': {
57 'tickets_count': 0,
58 'sales': 0
59 },
60 'pending': {
61 'tickets_count': 0,
62 'sales': 0
63 },
64 'expired': {
65 'class': 'danger',
66 'tickets_count': 0,
67 'sales': 0
68 }
69 }
70
71 for order in orders:
72 if order.status == 'initialized':
73 order.status = 'pending'
74 orders_summary[str(order.status)]['orders_count'] += 1
75 orders_summary[str(order.status)]['total_sales'] += order.amount
76 for order_ticket in order.tickets:
77 orders_summary[str(order.status)]['tickets_count'] += order_ticket.quantity
78 ticket = self.get_ticket(order_ticket.ticket_id)
79 tickets_summary[str(ticket.id)][str(order.status)]['tickets_count'] += order_ticket.quantity
80 tickets_summary[str(ticket.id)][str(order.status)]['sales'] += order_ticket.quantity * ticket.price
81
82 return self.render('/gentelella/admin/event/tickets/tickets.html', event=event, event_id=event_id,
83 orders_summary=orders_summary, tickets_summary=tickets_summary)
84
85 @expose('/orders/')
86 @flask_login.login_required
87 def display_orders(self, event_id):
88 event = DataGetter.get_event(event_id)
89 orders = TicketingManager.get_orders(event_id)
90 return self.render('/gentelella/admin/event/tickets/orders.html', event=event, event_id=event_id, orders=orders)
91
92 @expose('/attendees/')
93 @flask_login.login_required
94 def display_attendees(self, event_id):
95 event = DataGetter.get_event(event_id)
96 orders = TicketingManager.get_orders(event_id)
97 return self.render('/gentelella/admin/event/tickets/attendees.html', event=event,
98 event_id=event_id, orders=orders)
99
100 @expose('/add-order/', methods=('GET', 'POST'))
101 @flask_login.login_required
102 def add_order(self, event_id):
103
104 if request.method == 'POST':
105 order = TicketingManager.create_order(request.form, True)
106 return redirect(url_for('.proceed_order', event_id=event_id, order_identifier=order.identifier))
107
108 event = DataGetter.get_event(event_id)
109 return self.render('/gentelella/admin/event/tickets/add_order.html', event=event, event_id=event_id)
110
111 @expose('/<order_identifier>/', methods=('GET',))
112 def proceed_order(self, event_id, order_identifier):
113 order = TicketingManager.get_order_by_identifier(order_identifier)
114 if order.status == 'completed':
115 return redirect(url_for('ticketing.view_order_after_payment', order_identifier=order_identifier))
116 return self.render('/gentelella/guest/ticketing/order_pre_payment.html', order=order, event=order.event,
117 countries=list(pycountry.countries),
118 from_organizer=True,
119 pay_via=order.paid_via,
120 stripe_publishable_key=get_settings()['stripe_publishable_key'])
121
[end of app/views/admin/models_views/ticket_sales.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/views/admin/models_views/ticket_sales.py b/app/views/admin/models_views/ticket_sales.py
--- a/app/views/admin/models_views/ticket_sales.py
+++ b/app/views/admin/models_views/ticket_sales.py
@@ -1,6 +1,6 @@
import flask_login
import pycountry
-from flask import redirect
+from flask import redirect, flash
from flask import request
from flask import url_for
from flask_admin import BaseView, expose
@@ -111,10 +111,17 @@
@expose('/<order_identifier>/', methods=('GET',))
def proceed_order(self, event_id, order_identifier):
order = TicketingManager.get_order_by_identifier(order_identifier)
- if order.status == 'completed':
- return redirect(url_for('ticketing.view_order_after_payment', order_identifier=order_identifier))
- return self.render('/gentelella/guest/ticketing/order_pre_payment.html', order=order, event=order.event,
- countries=list(pycountry.countries),
- from_organizer=True,
- pay_via=order.paid_via,
- stripe_publishable_key=get_settings()['stripe_publishable_key'])
+ if order:
+ if self.is_order_completed(order):
+ return redirect(url_for('ticketing.view_order_after_payment', order_identifier=order_identifier))
+ return self.render('/gentelella/guest/ticketing/order_pre_payment.html', order=order, event=order.event,
+ countries=list(pycountry.countries),
+ from_organizer=True,
+ pay_via=order.paid_via,
+ stripe_publishable_key=get_settings()['stripe_publishable_key'])
+ flash("Can't find order", 'warning')
+ return redirect(url_for('.display_ticket_stats', event_id=event_id))
+
+ @staticmethod
+ def is_order_completed(order):
+ return order.status == 'completed'
| {"golden_diff": "diff --git a/app/views/admin/models_views/ticket_sales.py b/app/views/admin/models_views/ticket_sales.py\n--- a/app/views/admin/models_views/ticket_sales.py\n+++ b/app/views/admin/models_views/ticket_sales.py\n@@ -1,6 +1,6 @@\n import flask_login\n import pycountry\n-from flask import redirect\n+from flask import redirect, flash\n from flask import request\n from flask import url_for\n from flask_admin import BaseView, expose\n@@ -111,10 +111,17 @@\n @expose('/<order_identifier>/', methods=('GET',))\n def proceed_order(self, event_id, order_identifier):\n order = TicketingManager.get_order_by_identifier(order_identifier)\n- if order.status == 'completed':\n- return redirect(url_for('ticketing.view_order_after_payment', order_identifier=order_identifier))\n- return self.render('/gentelella/guest/ticketing/order_pre_payment.html', order=order, event=order.event,\n- countries=list(pycountry.countries),\n- from_organizer=True,\n- pay_via=order.paid_via,\n- stripe_publishable_key=get_settings()['stripe_publishable_key'])\n+ if order:\n+ if self.is_order_completed(order):\n+ return redirect(url_for('ticketing.view_order_after_payment', order_identifier=order_identifier))\n+ return self.render('/gentelella/guest/ticketing/order_pre_payment.html', order=order, event=order.event,\n+ countries=list(pycountry.countries),\n+ from_organizer=True,\n+ pay_via=order.paid_via,\n+ stripe_publishable_key=get_settings()['stripe_publishable_key'])\n+ flash(\"Can't find order\", 'warning')\n+ return redirect(url_for('.display_ticket_stats', event_id=event_id))\n+\n+ @staticmethod\n+ def is_order_completed(order):\n+ return order.status == 'completed'\n", "issue": "Internal server error in promotion link\nhttps://open-event-dev.herokuapp.com/events/110/tickets/promotion/\n\n", "before_files": [{"content": "import flask_login\nimport pycountry\nfrom flask import redirect\nfrom flask import request\nfrom flask import url_for\nfrom flask_admin import BaseView, expose\n\nfrom app import get_settings\nfrom app.helpers.cache import cache\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.ticketing import TicketingManager\nfrom app.models.ticket import Ticket\n\nclass TicketSalesView(BaseView):\n @cache.memoize(50)\n def get_ticket(self, ticket_id):\n return Ticket.query.get(ticket_id)\n\n @expose('/')\n @flask_login.login_required\n def display_ticket_stats(self, event_id):\n event = DataGetter.get_event(event_id)\n orders = TicketingManager.get_orders(event_id)\n\n completed_count = 0\n completed_amount = 0\n tickets_count = 0\n\n orders_summary = {\n 'completed': {\n 'class': 'success',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n },\n 'pending': {\n 'class': 'warning',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n },\n 'expired': {\n 'class': 'danger',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n }\n }\n\n tickets_summary = {}\n\n for ticket in event.tickets:\n tickets_summary[str(ticket.id)] = {\n 'name': ticket.name,\n 'quantity': ticket.quantity,\n 'completed': {\n 'tickets_count': 0,\n 'sales': 0\n },\n 'pending': {\n 'tickets_count': 0,\n 'sales': 0\n },\n 'expired': {\n 'class': 'danger',\n 'tickets_count': 0,\n 'sales': 0\n }\n }\n\n for order in orders:\n if order.status == 'initialized':\n order.status = 'pending'\n orders_summary[str(order.status)]['orders_count'] += 1\n orders_summary[str(order.status)]['total_sales'] += order.amount\n for order_ticket in order.tickets:\n orders_summary[str(order.status)]['tickets_count'] += order_ticket.quantity\n ticket = self.get_ticket(order_ticket.ticket_id)\n tickets_summary[str(ticket.id)][str(order.status)]['tickets_count'] += order_ticket.quantity\n tickets_summary[str(ticket.id)][str(order.status)]['sales'] += order_ticket.quantity * ticket.price\n\n return self.render('/gentelella/admin/event/tickets/tickets.html', event=event, event_id=event_id,\n orders_summary=orders_summary, tickets_summary=tickets_summary)\n\n @expose('/orders/')\n @flask_login.login_required\n def display_orders(self, event_id):\n event = DataGetter.get_event(event_id)\n orders = TicketingManager.get_orders(event_id)\n return self.render('/gentelella/admin/event/tickets/orders.html', event=event, event_id=event_id, orders=orders)\n\n @expose('/attendees/')\n @flask_login.login_required\n def display_attendees(self, event_id):\n event = DataGetter.get_event(event_id)\n orders = TicketingManager.get_orders(event_id)\n return self.render('/gentelella/admin/event/tickets/attendees.html', event=event,\n event_id=event_id, orders=orders)\n\n @expose('/add-order/', methods=('GET', 'POST'))\n @flask_login.login_required\n def add_order(self, event_id):\n\n if request.method == 'POST':\n order = TicketingManager.create_order(request.form, True)\n return redirect(url_for('.proceed_order', event_id=event_id, order_identifier=order.identifier))\n\n event = DataGetter.get_event(event_id)\n return self.render('/gentelella/admin/event/tickets/add_order.html', event=event, event_id=event_id)\n\n @expose('/<order_identifier>/', methods=('GET',))\n def proceed_order(self, event_id, order_identifier):\n order = TicketingManager.get_order_by_identifier(order_identifier)\n if order.status == 'completed':\n return redirect(url_for('ticketing.view_order_after_payment', order_identifier=order_identifier))\n return self.render('/gentelella/guest/ticketing/order_pre_payment.html', order=order, event=order.event,\n countries=list(pycountry.countries),\n from_organizer=True,\n pay_via=order.paid_via,\n stripe_publishable_key=get_settings()['stripe_publishable_key'])\n", "path": "app/views/admin/models_views/ticket_sales.py"}]} | 1,802 | 412 |
gh_patches_debug_15073 | rasdani/github-patches | git_diff | allegro__ralph-3365 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
min_ip, max_ip fields erroring despite being specified via JSON REST API
As of commit 4794c4c04fced29b397b58a7689ff725c20ff6bd I'm having a problem where the JSON rest API won't let me create networks - https://22.0.0.2/api/networks/ (docker container instance of Ralph)
a request sent to
```json
{
"name":"test3",
"address":"2.0.0.0/8",
"remarks":"",
"vlan":1,
"dhcp_broadcast":false,
"reserved_from_beginning":0,
"reserved_from_end":0,
"gateway":null,
"network_environment":null,
"kind":null,
"service_env":null,
"terminators":[
],
"racks":[
],
"dns_servers":[
],
"min_ip":16777216,
"max_ip":33554431
}
```
produces the following result:
```json
{
"min_ip": [
"This field is required."
],
"max_ip": [
"This field is required."
]
}
```
This seems to be a known issue with Django REST API, and is possibly a result of min_ip and max_ip being set as non-editable in the model?
The same request works via the form request mechanism.
</issue>
<code>
[start of src/ralph/networks/api.py]
1 # -*- coding: utf-8 -*-
2 from django.conf import settings
3 from rest_framework.exceptions import ValidationError
4
5 from ralph.api import RalphAPISerializer, RalphAPIViewSet, router
6 from ralph.api.serializers import RalphAPISaveSerializer
7 from ralph.assets.api.serializers import EthernetSerializer
8 from ralph.networks.models import (
9 IPAddress,
10 Network,
11 NetworkEnvironment,
12 NetworkKind
13 )
14
15
16 class NetworkEnvironmentSerializer(RalphAPISerializer):
17 class Meta:
18 model = NetworkEnvironment
19 depth = 1
20
21
22 class NetworkKindSerializer(RalphAPISerializer):
23 class Meta:
24 model = NetworkKind
25 depth = 1
26
27
28 class NetworkSimpleSerializer(RalphAPISerializer):
29 class Meta:
30 model = Network
31 fields = (
32 'id', 'url', 'name', 'remarks', 'vlan', 'dhcp_broadcast', 'parent',
33 'network_environment'
34 )
35
36
37 class NetworkSerializer(RalphAPISerializer):
38 class Meta:
39 model = Network
40 depth = 1
41
42
43 class IPAddressSerializer(RalphAPISerializer):
44 network = NetworkSimpleSerializer()
45 ethernet = EthernetSerializer()
46
47 class Meta:
48 model = IPAddress
49 depth = 1
50 exclude = ('number',)
51
52
53 class IPAddressSaveSerializer(RalphAPISaveSerializer):
54 class Meta:
55 model = IPAddress
56
57 def validate_dhcp_expose(self, value):
58 """
59 Check if dhcp_expose value has changed from True to False.
60 """
61 if (
62 settings.DHCP_ENTRY_FORBID_CHANGE and
63 self.instance and
64 self.instance.dhcp_expose and
65 not value
66 ):
67 raise ValidationError(
68 'Cannot remove entry from DHCP. Use transition to do this.'
69 )
70 return value
71
72
73 class IPAddressViewSet(RalphAPIViewSet):
74 queryset = IPAddress.objects.all()
75 serializer_class = IPAddressSerializer
76 save_serializer_class = IPAddressSaveSerializer
77 prefetch_related = [
78 'ethernet', 'ethernet__base_object', 'ethernet__base_object__tags',
79 'network',
80 ]
81 filter_fields = [
82 'hostname', 'ethernet__base_object', 'network', 'network__address',
83 'status', 'is_public', 'is_management', 'dhcp_expose', 'ethernet__mac',
84 ]
85
86 def destroy(self, request, *args, **kwargs):
87 instance = self.get_object()
88 if instance and instance.dhcp_expose:
89 raise ValidationError(
90 'Could not delete IPAddress when it is exposed in DHCP'
91 )
92 return super().destroy(request, *args, **kwargs)
93
94
95 class NetworkViewSet(RalphAPIViewSet):
96 queryset = Network.objects.all()
97 serializer_class = NetworkSerializer
98 select_related = ['network_environment', 'kind']
99 prefetch_related = ['racks']
100 extended_filter_fields = {
101 # workaround for custom field for address field defined in admin
102 'address': ['address'],
103 }
104
105
106 class NetworkEnvironmentViewSet(RalphAPIViewSet):
107 queryset = NetworkEnvironment.objects.all()
108 serializer_class = NetworkEnvironmentSerializer
109
110
111 class NetworkKindViewSet(RalphAPIViewSet):
112 queryset = NetworkKind.objects.all()
113 serializer_class = NetworkKindSerializer
114
115 router.register(r'ipaddresses', IPAddressViewSet)
116 router.register(r'networks', NetworkViewSet)
117 router.register(r'network-environments', NetworkEnvironmentViewSet)
118 router.register(r'network-kinds', NetworkKindViewSet)
119 urlpatterns = []
120
[end of src/ralph/networks/api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/ralph/networks/api.py b/src/ralph/networks/api.py
--- a/src/ralph/networks/api.py
+++ b/src/ralph/networks/api.py
@@ -34,6 +34,13 @@
)
+class NetworkSaveSerializer(RalphAPISerializer):
+ class Meta:
+ model = Network
+ depth = 1
+ exclude = ('min_ip', 'max_ip')
+
+
class NetworkSerializer(RalphAPISerializer):
class Meta:
model = Network
@@ -95,6 +102,7 @@
class NetworkViewSet(RalphAPIViewSet):
queryset = Network.objects.all()
serializer_class = NetworkSerializer
+ save_serializer_class = NetworkSaveSerializer
select_related = ['network_environment', 'kind']
prefetch_related = ['racks']
extended_filter_fields = {
| {"golden_diff": "diff --git a/src/ralph/networks/api.py b/src/ralph/networks/api.py\n--- a/src/ralph/networks/api.py\n+++ b/src/ralph/networks/api.py\n@@ -34,6 +34,13 @@\n )\n \n \n+class NetworkSaveSerializer(RalphAPISerializer):\n+ class Meta:\n+ model = Network\n+ depth = 1\n+ exclude = ('min_ip', 'max_ip')\n+\n+\n class NetworkSerializer(RalphAPISerializer):\n class Meta:\n model = Network\n@@ -95,6 +102,7 @@\n class NetworkViewSet(RalphAPIViewSet):\n queryset = Network.objects.all()\n serializer_class = NetworkSerializer\n+ save_serializer_class = NetworkSaveSerializer\n select_related = ['network_environment', 'kind']\n prefetch_related = ['racks']\n extended_filter_fields = {\n", "issue": "min_ip, max_ip fields erroring despite being specified via JSON REST API\nAs of commit 4794c4c04fced29b397b58a7689ff725c20ff6bd I'm having a problem where the JSON rest API won't let me create networks - https://22.0.0.2/api/networks/ (docker container instance of Ralph)\r\na request sent to \r\n```json\r\n{\r\n \"name\":\"test3\",\r\n \"address\":\"2.0.0.0/8\",\r\n \"remarks\":\"\",\r\n \"vlan\":1,\r\n \"dhcp_broadcast\":false,\r\n \"reserved_from_beginning\":0,\r\n \"reserved_from_end\":0,\r\n \"gateway\":null,\r\n \"network_environment\":null,\r\n \"kind\":null,\r\n \"service_env\":null,\r\n \"terminators\":[\r\n\r\n ],\r\n \"racks\":[\r\n\r\n ],\r\n \"dns_servers\":[\r\n\r\n ],\r\n \"min_ip\":16777216,\r\n \"max_ip\":33554431\r\n}\r\n```\r\nproduces the following result:\r\n```json\r\n{\r\n \"min_ip\": [\r\n \"This field is required.\"\r\n ],\r\n \"max_ip\": [\r\n \"This field is required.\"\r\n ]\r\n}\r\n```\r\n\r\nThis seems to be a known issue with Django REST API, and is possibly a result of min_ip and max_ip being set as non-editable in the model? \r\n\r\nThe same request works via the form request mechanism.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.conf import settings\nfrom rest_framework.exceptions import ValidationError\n\nfrom ralph.api import RalphAPISerializer, RalphAPIViewSet, router\nfrom ralph.api.serializers import RalphAPISaveSerializer\nfrom ralph.assets.api.serializers import EthernetSerializer\nfrom ralph.networks.models import (\n IPAddress,\n Network,\n NetworkEnvironment,\n NetworkKind\n)\n\n\nclass NetworkEnvironmentSerializer(RalphAPISerializer):\n class Meta:\n model = NetworkEnvironment\n depth = 1\n\n\nclass NetworkKindSerializer(RalphAPISerializer):\n class Meta:\n model = NetworkKind\n depth = 1\n\n\nclass NetworkSimpleSerializer(RalphAPISerializer):\n class Meta:\n model = Network\n fields = (\n 'id', 'url', 'name', 'remarks', 'vlan', 'dhcp_broadcast', 'parent',\n 'network_environment'\n )\n\n\nclass NetworkSerializer(RalphAPISerializer):\n class Meta:\n model = Network\n depth = 1\n\n\nclass IPAddressSerializer(RalphAPISerializer):\n network = NetworkSimpleSerializer()\n ethernet = EthernetSerializer()\n\n class Meta:\n model = IPAddress\n depth = 1\n exclude = ('number',)\n\n\nclass IPAddressSaveSerializer(RalphAPISaveSerializer):\n class Meta:\n model = IPAddress\n\n def validate_dhcp_expose(self, value):\n \"\"\"\n Check if dhcp_expose value has changed from True to False.\n \"\"\"\n if (\n settings.DHCP_ENTRY_FORBID_CHANGE and\n self.instance and\n self.instance.dhcp_expose and\n not value\n ):\n raise ValidationError(\n 'Cannot remove entry from DHCP. Use transition to do this.'\n )\n return value\n\n\nclass IPAddressViewSet(RalphAPIViewSet):\n queryset = IPAddress.objects.all()\n serializer_class = IPAddressSerializer\n save_serializer_class = IPAddressSaveSerializer\n prefetch_related = [\n 'ethernet', 'ethernet__base_object', 'ethernet__base_object__tags',\n 'network',\n ]\n filter_fields = [\n 'hostname', 'ethernet__base_object', 'network', 'network__address',\n 'status', 'is_public', 'is_management', 'dhcp_expose', 'ethernet__mac',\n ]\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if instance and instance.dhcp_expose:\n raise ValidationError(\n 'Could not delete IPAddress when it is exposed in DHCP'\n )\n return super().destroy(request, *args, **kwargs)\n\n\nclass NetworkViewSet(RalphAPIViewSet):\n queryset = Network.objects.all()\n serializer_class = NetworkSerializer\n select_related = ['network_environment', 'kind']\n prefetch_related = ['racks']\n extended_filter_fields = {\n # workaround for custom field for address field defined in admin\n 'address': ['address'],\n }\n\n\nclass NetworkEnvironmentViewSet(RalphAPIViewSet):\n queryset = NetworkEnvironment.objects.all()\n serializer_class = NetworkEnvironmentSerializer\n\n\nclass NetworkKindViewSet(RalphAPIViewSet):\n queryset = NetworkKind.objects.all()\n serializer_class = NetworkKindSerializer\n\nrouter.register(r'ipaddresses', IPAddressViewSet)\nrouter.register(r'networks', NetworkViewSet)\nrouter.register(r'network-environments', NetworkEnvironmentViewSet)\nrouter.register(r'network-kinds', NetworkKindViewSet)\nurlpatterns = []\n", "path": "src/ralph/networks/api.py"}]} | 1,849 | 192 |
gh_patches_debug_24344 | rasdani/github-patches | git_diff | getnikola__nikola-1068 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nikola auto not working
I'm a newb so suspect user error, but I installed nikola v6.3.0 (pip install nikola) and have not been able to get **nikola auto** to work.
Initially I was getting errors due to a more recent version of livereload (which, isn't this marked as fixed?) and resolved by forcing livereload=2.0.0.
The next problem is that the site appears not to rebuild when I save edits to a post. I have not RTFM yet (as instructed :) but when I do:
nikola auto -b
I should be able to edit a post file (e.g. 1.rst) and on save, see the changes immediately, correct? What I see in the server log output and in Chrome dev tools shows that the browser is re-requesting a bunch of files, but I have to manually do **nikola build** to get the site to rebuild.
I'm using OS X 10.9.1, Python 2.7. Let me know if I can provide any other info; if I get it worked out I'll update the issue.
</issue>
<code>
[start of nikola/plugins/command/auto.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 from __future__ import print_function, unicode_literals
28
29 import os
30 import subprocess
31 import webbrowser
32
33 from nikola.plugin_categories import Command
34 from nikola.utils import req_missing
35
36
37 class CommandAuto(Command):
38 """Start debugging console."""
39 name = "auto"
40 doc_purpose = "automatically detect site changes, rebuild and optionally refresh a browser"
41 cmd_options = [
42 {
43 'name': 'browser',
44 'short': 'b',
45 'type': bool,
46 'help': 'Start a web browser.',
47 'default': False,
48 },
49 {
50 'name': 'port',
51 'short': 'p',
52 'long': 'port',
53 'default': 8000,
54 'type': int,
55 'help': 'Port nummber (default: 8000)',
56 },
57 ]
58
59 def _execute(self, options, args):
60 """Start the watcher."""
61 try:
62 from livereload import Server
63 except ImportError:
64 req_missing(['livereload>=2.0.0'], 'use the "auto" command')
65 return
66
67 # Run an initial build so we are uptodate
68 subprocess.call(("nikola", "build"))
69
70 port = options and options.get('port')
71
72 server = Server()
73 server.watch('conf.py')
74 server.watch('themes/')
75 server.watch('templates/')
76 server.watch(self.site.config['GALLERY_PATH'])
77 for item in self.site.config['post_pages']:
78 server.watch(os.path.dirname(item[0]))
79 for item in self.site.config['FILES_FOLDERS']:
80 server.watch(os.path.dirname(item))
81
82 out_folder = self.site.config['OUTPUT_FOLDER']
83 if options and options.get('browser'):
84 webbrowser.open('http://localhost:{0}'.format(port))
85
86 server.serve(port, None, out_folder)
87
[end of nikola/plugins/command/auto.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nikola/plugins/command/auto.py b/nikola/plugins/command/auto.py
--- a/nikola/plugins/command/auto.py
+++ b/nikola/plugins/command/auto.py
@@ -64,20 +64,20 @@
req_missing(['livereload>=2.0.0'], 'use the "auto" command')
return
- # Run an initial build so we are uptodate
+ # Run an initial build so we are up-to-date
subprocess.call(("nikola", "build"))
port = options and options.get('port')
server = Server()
- server.watch('conf.py')
- server.watch('themes/')
- server.watch('templates/')
+ server.watch('conf.py', 'nikola build')
+ server.watch('themes/', 'nikola build')
+ server.watch('templates/', 'nikola build')
server.watch(self.site.config['GALLERY_PATH'])
for item in self.site.config['post_pages']:
- server.watch(os.path.dirname(item[0]))
+ server.watch(os.path.dirname(item[0]), 'nikola build')
for item in self.site.config['FILES_FOLDERS']:
- server.watch(os.path.dirname(item))
+ server.watch(os.path.dirname(item), 'nikola build')
out_folder = self.site.config['OUTPUT_FOLDER']
if options and options.get('browser'):
| {"golden_diff": "diff --git a/nikola/plugins/command/auto.py b/nikola/plugins/command/auto.py\n--- a/nikola/plugins/command/auto.py\n+++ b/nikola/plugins/command/auto.py\n@@ -64,20 +64,20 @@\n req_missing(['livereload>=2.0.0'], 'use the \"auto\" command')\n return\n \n- # Run an initial build so we are uptodate\n+ # Run an initial build so we are up-to-date\n subprocess.call((\"nikola\", \"build\"))\n \n port = options and options.get('port')\n \n server = Server()\n- server.watch('conf.py')\n- server.watch('themes/')\n- server.watch('templates/')\n+ server.watch('conf.py', 'nikola build')\n+ server.watch('themes/', 'nikola build')\n+ server.watch('templates/', 'nikola build')\n server.watch(self.site.config['GALLERY_PATH'])\n for item in self.site.config['post_pages']:\n- server.watch(os.path.dirname(item[0]))\n+ server.watch(os.path.dirname(item[0]), 'nikola build')\n for item in self.site.config['FILES_FOLDERS']:\n- server.watch(os.path.dirname(item))\n+ server.watch(os.path.dirname(item), 'nikola build')\n \n out_folder = self.site.config['OUTPUT_FOLDER']\n if options and options.get('browser'):\n", "issue": "nikola auto not working \nI'm a newb so suspect user error, but I installed nikola v6.3.0 (pip install nikola) and have not been able to get **nikola auto** to work.\n\nInitially I was getting errors due to a more recent version of livereload (which, isn't this marked as fixed?) and resolved by forcing livereload=2.0.0. \n\nThe next problem is that the site appears not to rebuild when I save edits to a post. I have not RTFM yet (as instructed :) but when I do:\n nikola auto -b \n\nI should be able to edit a post file (e.g. 1.rst) and on save, see the changes immediately, correct? What I see in the server log output and in Chrome dev tools shows that the browser is re-requesting a bunch of files, but I have to manually do **nikola build** to get the site to rebuild. \n\nI'm using OS X 10.9.1, Python 2.7. Let me know if I can provide any other info; if I get it worked out I'll update the issue. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import print_function, unicode_literals\n\nimport os\nimport subprocess\nimport webbrowser\n\nfrom nikola.plugin_categories import Command\nfrom nikola.utils import req_missing\n\n\nclass CommandAuto(Command):\n \"\"\"Start debugging console.\"\"\"\n name = \"auto\"\n doc_purpose = \"automatically detect site changes, rebuild and optionally refresh a browser\"\n cmd_options = [\n {\n 'name': 'browser',\n 'short': 'b',\n 'type': bool,\n 'help': 'Start a web browser.',\n 'default': False,\n },\n {\n 'name': 'port',\n 'short': 'p',\n 'long': 'port',\n 'default': 8000,\n 'type': int,\n 'help': 'Port nummber (default: 8000)',\n },\n ]\n\n def _execute(self, options, args):\n \"\"\"Start the watcher.\"\"\"\n try:\n from livereload import Server\n except ImportError:\n req_missing(['livereload>=2.0.0'], 'use the \"auto\" command')\n return\n\n # Run an initial build so we are uptodate\n subprocess.call((\"nikola\", \"build\"))\n\n port = options and options.get('port')\n\n server = Server()\n server.watch('conf.py')\n server.watch('themes/')\n server.watch('templates/')\n server.watch(self.site.config['GALLERY_PATH'])\n for item in self.site.config['post_pages']:\n server.watch(os.path.dirname(item[0]))\n for item in self.site.config['FILES_FOLDERS']:\n server.watch(os.path.dirname(item))\n\n out_folder = self.site.config['OUTPUT_FOLDER']\n if options and options.get('browser'):\n webbrowser.open('http://localhost:{0}'.format(port))\n\n server.serve(port, None, out_folder)\n", "path": "nikola/plugins/command/auto.py"}]} | 1,603 | 299 |
gh_patches_debug_39039 | rasdani/github-patches | git_diff | Mailu__Mailu-1392 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
certwatcher.py is not using TLS_CERT_FILENAME nor TLS_KEYPAIR_FILENAME
certwatcher.py is assuming that the certificate and the key are in /certs and named respectively cert.pem and key.pem
However, Mailu offer two environment variables to allow specific path & filename for the certificate and the key which are used in config.py:
TLS_CERT_FILENAME
TLS_KEYPAIR_FILENAME
I would add that in my use case (https-portal as a reverse proxy), those files are not directly in /certs. My environment variables look like:
TLS_CERT_FILENAME=mail.example.net/production/signed.crt
TLS_KEYPAIR_FILENAME=mail.example.net/production/domain.key
Currently, certwatcher.py is monitoring `/certs`, whereas I would need to monitor
`/certs/mail.example.net/production`
</issue>
<code>
[start of core/nginx/certwatcher.py]
1 #!/usr/bin/python3
2 """
3 Certificate watcher which reloads nginx or reconfigures it, depending on what
4 happens to externally supplied certificates. Only executed by start.py in case
5 of TLS_FLAVOR=[mail, cert]
6 """
7
8 from os.path import exists, split as path_split
9 from os import system
10 import time
11 from watchdog.observers.polling import PollingObserver
12 from watchdog.events import FileSystemEventHandler, FileDeletedEvent, \
13 FileCreatedEvent, FileModifiedEvent, FileMovedEvent
14
15 class ChangeHandler(FileSystemEventHandler):
16 "watchdog-handler listening on any event, executing the correct configuration/reload steps"
17 @staticmethod
18 def reload_nginx():
19 "merely reload nginx without re-configuring everything"
20 if exists("/var/run/nginx.pid"):
21 print("Reloading a running nginx")
22 system("nginx -s reload")
23
24 @staticmethod
25 def reexec_config():
26 "execute a reconfiguration of the system, which also reloads"
27 print("Reconfiguring system")
28 system("/config.py")
29
30 def on_any_event(self, event):
31 "event-listener checking if the affected files are the cert-files we're interested in"
32 if event.is_directory:
33 return
34
35 filename = path_split(event.src_path)[-1]
36 if isinstance(event, FileMovedEvent):
37 filename = path_split(event.dest_path)[-1]
38
39 if filename in ['cert.pem', 'key.pem']:
40 # all cases except for FileModified need re-configure
41 if isinstance(event, (FileCreatedEvent, FileMovedEvent, FileDeletedEvent)):
42 ChangeHandler.reexec_config()
43 # file modification needs only a nginx reload without config.py
44 elif isinstance(event, FileModifiedEvent):
45 ChangeHandler.reload_nginx()
46 # cert files have been moved away, re-configure
47 elif isinstance(event, FileMovedEvent) and path_split(event.src_path)[-1] in ['cert.pem', 'key.pem']:
48 ChangeHandler.reexec_config()
49
50
51 if __name__ == '__main__':
52 observer = PollingObserver()
53 handler = ChangeHandler()
54 observer.schedule(handler, "/certs", recursive=False)
55 observer.start()
56
57 try:
58 while True:
59 time.sleep(1)
60 except KeyboardInterrupt:
61 observer.stop()
62
63 observer.join()
64
[end of core/nginx/certwatcher.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/nginx/certwatcher.py b/core/nginx/certwatcher.py
--- a/core/nginx/certwatcher.py
+++ b/core/nginx/certwatcher.py
@@ -5,8 +5,8 @@
of TLS_FLAVOR=[mail, cert]
"""
-from os.path import exists, split as path_split
-from os import system
+from os.path import exists, split as path_split, join as path_join
+from os import system, getenv
import time
from watchdog.observers.polling import PollingObserver
from watchdog.events import FileSystemEventHandler, FileDeletedEvent, \
@@ -14,6 +14,13 @@
class ChangeHandler(FileSystemEventHandler):
"watchdog-handler listening on any event, executing the correct configuration/reload steps"
+
+ def __init__(self, cert_path, keypair_path):
+ "Initialize a new changehandler"""
+ super().__init__()
+ self.cert_path = cert_path
+ self.keypair_path = keypair_path
+
@staticmethod
def reload_nginx():
"merely reload nginx without re-configuring everything"
@@ -32,11 +39,11 @@
if event.is_directory:
return
- filename = path_split(event.src_path)[-1]
+ filename = event.src_path
if isinstance(event, FileMovedEvent):
- filename = path_split(event.dest_path)[-1]
+ filename = event.dest_path
- if filename in ['cert.pem', 'key.pem']:
+ if filename in [self.cert_path, self.keypair_path]:
# all cases except for FileModified need re-configure
if isinstance(event, (FileCreatedEvent, FileMovedEvent, FileDeletedEvent)):
ChangeHandler.reexec_config()
@@ -44,14 +51,21 @@
elif isinstance(event, FileModifiedEvent):
ChangeHandler.reload_nginx()
# cert files have been moved away, re-configure
- elif isinstance(event, FileMovedEvent) and path_split(event.src_path)[-1] in ['cert.pem', 'key.pem']:
+ elif isinstance(event, FileMovedEvent) and event.src_path in [self.cert_path, self.keypair_path]:
ChangeHandler.reexec_config()
if __name__ == '__main__':
+ cert_path = path_join("/certs/", getenv("TLS_CERT_FILENAME", default="cert.pem"))
+ cert_dir = path_split(cert_path)[0]
+ keypair_path = path_join("/certs/", getenv("TLS_KEYPAIR_FILENAME", default="key.pem"))
+ keypair_dir = path_split(keypair_path)[0]
+
observer = PollingObserver()
- handler = ChangeHandler()
- observer.schedule(handler, "/certs", recursive=False)
+ handler = ChangeHandler(cert_path, keypair_path)
+ observer.schedule(handler, cert_dir, recursive=False)
+ if keypair_dir != cert_dir:
+ observer.schedule(handler, keypair_dir, recursive=False)
observer.start()
try:
| {"golden_diff": "diff --git a/core/nginx/certwatcher.py b/core/nginx/certwatcher.py\n--- a/core/nginx/certwatcher.py\n+++ b/core/nginx/certwatcher.py\n@@ -5,8 +5,8 @@\n of TLS_FLAVOR=[mail, cert]\n \"\"\"\n \n-from os.path import exists, split as path_split\n-from os import system\n+from os.path import exists, split as path_split, join as path_join\n+from os import system, getenv\n import time\n from watchdog.observers.polling import PollingObserver\n from watchdog.events import FileSystemEventHandler, FileDeletedEvent, \\\n@@ -14,6 +14,13 @@\n \n class ChangeHandler(FileSystemEventHandler):\n \"watchdog-handler listening on any event, executing the correct configuration/reload steps\"\n+\n+ def __init__(self, cert_path, keypair_path):\n+ \"Initialize a new changehandler\"\"\"\n+ super().__init__()\n+ self.cert_path = cert_path\n+ self.keypair_path = keypair_path\n+\n @staticmethod\n def reload_nginx():\n \"merely reload nginx without re-configuring everything\"\n@@ -32,11 +39,11 @@\n if event.is_directory:\n return\n \n- filename = path_split(event.src_path)[-1]\n+ filename = event.src_path\n if isinstance(event, FileMovedEvent):\n- filename = path_split(event.dest_path)[-1]\n+ filename = event.dest_path\n \n- if filename in ['cert.pem', 'key.pem']:\n+ if filename in [self.cert_path, self.keypair_path]:\n # all cases except for FileModified need re-configure\n if isinstance(event, (FileCreatedEvent, FileMovedEvent, FileDeletedEvent)):\n ChangeHandler.reexec_config()\n@@ -44,14 +51,21 @@\n elif isinstance(event, FileModifiedEvent):\n ChangeHandler.reload_nginx()\n # cert files have been moved away, re-configure\n- elif isinstance(event, FileMovedEvent) and path_split(event.src_path)[-1] in ['cert.pem', 'key.pem']:\n+ elif isinstance(event, FileMovedEvent) and event.src_path in [self.cert_path, self.keypair_path]:\n ChangeHandler.reexec_config()\n \n \n if __name__ == '__main__':\n+ cert_path = path_join(\"/certs/\", getenv(\"TLS_CERT_FILENAME\", default=\"cert.pem\"))\n+ cert_dir = path_split(cert_path)[0]\n+ keypair_path = path_join(\"/certs/\", getenv(\"TLS_KEYPAIR_FILENAME\", default=\"key.pem\"))\n+ keypair_dir = path_split(keypair_path)[0]\n+\n observer = PollingObserver()\n- handler = ChangeHandler()\n- observer.schedule(handler, \"/certs\", recursive=False)\n+ handler = ChangeHandler(cert_path, keypair_path)\n+ observer.schedule(handler, cert_dir, recursive=False)\n+ if keypair_dir != cert_dir:\n+ observer.schedule(handler, keypair_dir, recursive=False)\n observer.start()\n \n try:\n", "issue": "certwatcher.py is not using TLS_CERT_FILENAME nor TLS_KEYPAIR_FILENAME\ncertwatcher.py is assuming that the certificate and the key are in /certs and named respectively cert.pem and key.pem\r\n\r\nHowever, Mailu offer two environment variables to allow specific path & filename for the certificate and the key which are used in config.py:\r\nTLS_CERT_FILENAME\r\nTLS_KEYPAIR_FILENAME\r\n\r\nI would add that in my use case (https-portal as a reverse proxy), those files are not directly in /certs. My environment variables look like:\r\nTLS_CERT_FILENAME=mail.example.net/production/signed.crt\r\nTLS_KEYPAIR_FILENAME=mail.example.net/production/domain.key\r\nCurrently, certwatcher.py is monitoring `/certs`, whereas I would need to monitor \r\n`/certs/mail.example.net/production`\n", "before_files": [{"content": "#!/usr/bin/python3\n\"\"\"\nCertificate watcher which reloads nginx or reconfigures it, depending on what\nhappens to externally supplied certificates. Only executed by start.py in case\nof TLS_FLAVOR=[mail, cert]\n\"\"\"\n\nfrom os.path import exists, split as path_split\nfrom os import system\nimport time\nfrom watchdog.observers.polling import PollingObserver\nfrom watchdog.events import FileSystemEventHandler, FileDeletedEvent, \\\n FileCreatedEvent, FileModifiedEvent, FileMovedEvent\n\nclass ChangeHandler(FileSystemEventHandler):\n \"watchdog-handler listening on any event, executing the correct configuration/reload steps\"\n @staticmethod\n def reload_nginx():\n \"merely reload nginx without re-configuring everything\"\n if exists(\"/var/run/nginx.pid\"):\n print(\"Reloading a running nginx\")\n system(\"nginx -s reload\")\n\n @staticmethod\n def reexec_config():\n \"execute a reconfiguration of the system, which also reloads\"\n print(\"Reconfiguring system\")\n system(\"/config.py\")\n\n def on_any_event(self, event):\n \"event-listener checking if the affected files are the cert-files we're interested in\"\n if event.is_directory:\n return\n\n filename = path_split(event.src_path)[-1]\n if isinstance(event, FileMovedEvent):\n filename = path_split(event.dest_path)[-1]\n\n if filename in ['cert.pem', 'key.pem']:\n # all cases except for FileModified need re-configure\n if isinstance(event, (FileCreatedEvent, FileMovedEvent, FileDeletedEvent)):\n ChangeHandler.reexec_config()\n # file modification needs only a nginx reload without config.py\n elif isinstance(event, FileModifiedEvent):\n ChangeHandler.reload_nginx()\n # cert files have been moved away, re-configure\n elif isinstance(event, FileMovedEvent) and path_split(event.src_path)[-1] in ['cert.pem', 'key.pem']:\n ChangeHandler.reexec_config()\n\n\nif __name__ == '__main__':\n observer = PollingObserver()\n handler = ChangeHandler()\n observer.schedule(handler, \"/certs\", recursive=False)\n observer.start()\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n\n observer.join()\n", "path": "core/nginx/certwatcher.py"}]} | 1,307 | 648 |
gh_patches_debug_5433 | rasdani/github-patches | git_diff | open-mmlab__mmocr-221 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make default ingnored class to -100 in SDMGRLoss
In most cases, class 0 should not be ignored.
ref https://github.com/open-mmlab/mmocr/blob/main/mmocr/models/kie/losses/sdmgr_loss.py#L17
</issue>
<code>
[start of mmocr/models/kie/losses/sdmgr_loss.py]
1 import torch
2 from torch import nn
3
4 from mmdet.models.builder import LOSSES
5 from mmdet.models.losses import accuracy
6
7
8 @LOSSES.register_module()
9 class SDMGRLoss(nn.Module):
10 """The implementation the loss of key information extraction proposed in
11 the paper: Spatial Dual-Modality Graph Reasoning for Key Information
12 Extraction.
13
14 https://arxiv.org/abs/2103.14470.
15 """
16
17 def __init__(self, node_weight=1.0, edge_weight=1.0, ignore=0):
18 super().__init__()
19 self.loss_node = nn.CrossEntropyLoss(ignore_index=ignore)
20 self.loss_edge = nn.CrossEntropyLoss(ignore_index=-1)
21 self.node_weight = node_weight
22 self.edge_weight = edge_weight
23 self.ignore = ignore
24
25 def forward(self, node_preds, edge_preds, gts):
26 node_gts, edge_gts = [], []
27 for gt in gts:
28 node_gts.append(gt[:, 0])
29 edge_gts.append(gt[:, 1:].contiguous().view(-1))
30 node_gts = torch.cat(node_gts).long()
31 edge_gts = torch.cat(edge_gts).long()
32
33 node_valids = torch.nonzero(node_gts != self.ignore).view(-1)
34 edge_valids = torch.nonzero(edge_gts != -1).view(-1)
35 return dict(
36 loss_node=self.node_weight * self.loss_node(node_preds, node_gts),
37 loss_edge=self.edge_weight * self.loss_edge(edge_preds, edge_gts),
38 acc_node=accuracy(node_preds[node_valids], node_gts[node_valids]),
39 acc_edge=accuracy(edge_preds[edge_valids], edge_gts[edge_valids]))
40
[end of mmocr/models/kie/losses/sdmgr_loss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmocr/models/kie/losses/sdmgr_loss.py b/mmocr/models/kie/losses/sdmgr_loss.py
--- a/mmocr/models/kie/losses/sdmgr_loss.py
+++ b/mmocr/models/kie/losses/sdmgr_loss.py
@@ -14,7 +14,7 @@
https://arxiv.org/abs/2103.14470.
"""
- def __init__(self, node_weight=1.0, edge_weight=1.0, ignore=0):
+ def __init__(self, node_weight=1.0, edge_weight=1.0, ignore=-100):
super().__init__()
self.loss_node = nn.CrossEntropyLoss(ignore_index=ignore)
self.loss_edge = nn.CrossEntropyLoss(ignore_index=-1)
| {"golden_diff": "diff --git a/mmocr/models/kie/losses/sdmgr_loss.py b/mmocr/models/kie/losses/sdmgr_loss.py\n--- a/mmocr/models/kie/losses/sdmgr_loss.py\n+++ b/mmocr/models/kie/losses/sdmgr_loss.py\n@@ -14,7 +14,7 @@\n https://arxiv.org/abs/2103.14470.\n \"\"\"\n \n- def __init__(self, node_weight=1.0, edge_weight=1.0, ignore=0):\n+ def __init__(self, node_weight=1.0, edge_weight=1.0, ignore=-100):\n super().__init__()\n self.loss_node = nn.CrossEntropyLoss(ignore_index=ignore)\n self.loss_edge = nn.CrossEntropyLoss(ignore_index=-1)\n", "issue": "Make default ingnored class to -100 in SDMGRLoss\nIn most cases, class 0 should not be ignored.\r\n\r\nref https://github.com/open-mmlab/mmocr/blob/main/mmocr/models/kie/losses/sdmgr_loss.py#L17\n", "before_files": [{"content": "import torch\nfrom torch import nn\n\nfrom mmdet.models.builder import LOSSES\nfrom mmdet.models.losses import accuracy\n\n\[email protected]_module()\nclass SDMGRLoss(nn.Module):\n \"\"\"The implementation the loss of key information extraction proposed in\n the paper: Spatial Dual-Modality Graph Reasoning for Key Information\n Extraction.\n\n https://arxiv.org/abs/2103.14470.\n \"\"\"\n\n def __init__(self, node_weight=1.0, edge_weight=1.0, ignore=0):\n super().__init__()\n self.loss_node = nn.CrossEntropyLoss(ignore_index=ignore)\n self.loss_edge = nn.CrossEntropyLoss(ignore_index=-1)\n self.node_weight = node_weight\n self.edge_weight = edge_weight\n self.ignore = ignore\n\n def forward(self, node_preds, edge_preds, gts):\n node_gts, edge_gts = [], []\n for gt in gts:\n node_gts.append(gt[:, 0])\n edge_gts.append(gt[:, 1:].contiguous().view(-1))\n node_gts = torch.cat(node_gts).long()\n edge_gts = torch.cat(edge_gts).long()\n\n node_valids = torch.nonzero(node_gts != self.ignore).view(-1)\n edge_valids = torch.nonzero(edge_gts != -1).view(-1)\n return dict(\n loss_node=self.node_weight * self.loss_node(node_preds, node_gts),\n loss_edge=self.edge_weight * self.loss_edge(edge_preds, edge_gts),\n acc_node=accuracy(node_preds[node_valids], node_gts[node_valids]),\n acc_edge=accuracy(edge_preds[edge_valids], edge_gts[edge_valids]))\n", "path": "mmocr/models/kie/losses/sdmgr_loss.py"}]} | 1,063 | 187 |
gh_patches_debug_23961 | rasdani/github-patches | git_diff | mindsdb__mindsdb-1858 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Feat] convert query to string using new mindsdb_sql feature
Since [that works](https://github.com/mindsdb/mindsdb_sql/issues/130) is finished, we can add this changes to mindsdb. So ve can delete dirtyfix for escape symbols and that should prevent from other hidden errors.
</issue>
<code>
[start of mindsdb/api/mysql/mysql_proxy/utilities/sql.py]
1 import duckdb
2 import pandas as pd
3 from mindsdb_sql import parse_sql
4 from mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy
5
6
7 def _remove_table_name(root):
8 if isinstance(root, BinaryOperation):
9 _remove_table_name(root.args[0])
10 _remove_table_name(root.args[1])
11 elif isinstance(root, Identifier):
12 root.parts = [root.parts[-1]]
13
14
15 def query_df(df, query):
16 """ Perform simple query ('select' from one table, without subqueries and joins) on DataFrame.
17
18 Args:
19 df (pandas.DataFrame): data
20 query (mindsdb_sql.parser.ast.Select | str): select query
21
22 Returns:
23 pandas.DataFrame
24 """
25
26 query = parse_sql(str(query), dialect='mysql')
27 if isinstance(query, Select) is False or isinstance(query.from_table, Identifier) is False:
28 raise Exception("Only 'SELECT from TABLE' statements supported for internal query")
29
30 query.from_table.parts = ['df_table']
31 for identifier in query.targets:
32 if isinstance(identifier, Identifier):
33 identifier.parts = [identifier.parts[-1]]
34 if isinstance(query.order_by, list):
35 for orderby in query.order_by:
36 if isinstance(orderby, OrderBy) and isinstance(orderby.field, Identifier):
37 orderby.field.parts = [orderby.field.parts[-1]]
38 _remove_table_name(query.where)
39
40 # FIXME https://github.com/mindsdb/mindsdb_sql/issues/130
41 # we need way to dump suery in postgres dialect
42 sql_query = str(query).replace('`', '')
43 res = duckdb.query_df(df, 'df_table', sql_query)
44 result_df = res.df()
45 result_df = result_df.where(pd.notnull(result_df), None)
46 return result_df
47
[end of mindsdb/api/mysql/mysql_proxy/utilities/sql.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py
--- a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py
+++ b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py
@@ -2,6 +2,9 @@
import pandas as pd
from mindsdb_sql import parse_sql
from mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
+
+from mindsdb.utilities.log import log
def _remove_table_name(root):
@@ -37,10 +40,14 @@
orderby.field.parts = [orderby.field.parts[-1]]
_remove_table_name(query.where)
- # FIXME https://github.com/mindsdb/mindsdb_sql/issues/130
- # we need way to dump suery in postgres dialect
- sql_query = str(query).replace('`', '')
- res = duckdb.query_df(df, 'df_table', sql_query)
+ render = SqlalchemyRender('postgres')
+ try:
+ query_str = render.get_string(query, with_failback=False)
+ except Exception as e:
+ log.error(f"Exception during query casting to 'postgres' dialect. Query: {query}. Error: {e}")
+ query_str = render.get_string(query, with_failback=True)
+
+ res = duckdb.query_df(df, 'df_table', query_str)
result_df = res.df()
result_df = result_df.where(pd.notnull(result_df), None)
return result_df
| {"golden_diff": "diff --git a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n--- a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n+++ b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n@@ -2,6 +2,9 @@\n import pandas as pd\n from mindsdb_sql import parse_sql\n from mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy\n+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\n+\n+from mindsdb.utilities.log import log\n \n \n def _remove_table_name(root):\n@@ -37,10 +40,14 @@\n orderby.field.parts = [orderby.field.parts[-1]]\n _remove_table_name(query.where)\n \n- # FIXME https://github.com/mindsdb/mindsdb_sql/issues/130\n- # we need way to dump suery in postgres dialect\n- sql_query = str(query).replace('`', '')\n- res = duckdb.query_df(df, 'df_table', sql_query)\n+ render = SqlalchemyRender('postgres')\n+ try:\n+ query_str = render.get_string(query, with_failback=False)\n+ except Exception as e:\n+ log.error(f\"Exception during query casting to 'postgres' dialect. Query: {query}. Error: {e}\")\n+ query_str = render.get_string(query, with_failback=True)\n+\n+ res = duckdb.query_df(df, 'df_table', query_str)\n result_df = res.df()\n result_df = result_df.where(pd.notnull(result_df), None)\n return result_df\n", "issue": "[Feat] convert query to string using new mindsdb_sql feature\nSince [that works](https://github.com/mindsdb/mindsdb_sql/issues/130) is finished, we can add this changes to mindsdb. So ve can delete dirtyfix for escape symbols and that should prevent from other hidden errors.\r\n\n", "before_files": [{"content": "import duckdb\nimport pandas as pd\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy\n\n\ndef _remove_table_name(root):\n if isinstance(root, BinaryOperation):\n _remove_table_name(root.args[0])\n _remove_table_name(root.args[1])\n elif isinstance(root, Identifier):\n root.parts = [root.parts[-1]]\n\n\ndef query_df(df, query):\n \"\"\" Perform simple query ('select' from one table, without subqueries and joins) on DataFrame.\n\n Args:\n df (pandas.DataFrame): data\n query (mindsdb_sql.parser.ast.Select | str): select query\n\n Returns:\n pandas.DataFrame\n \"\"\"\n\n query = parse_sql(str(query), dialect='mysql')\n if isinstance(query, Select) is False or isinstance(query.from_table, Identifier) is False:\n raise Exception(\"Only 'SELECT from TABLE' statements supported for internal query\")\n\n query.from_table.parts = ['df_table']\n for identifier in query.targets:\n if isinstance(identifier, Identifier):\n identifier.parts = [identifier.parts[-1]]\n if isinstance(query.order_by, list):\n for orderby in query.order_by:\n if isinstance(orderby, OrderBy) and isinstance(orderby.field, Identifier):\n orderby.field.parts = [orderby.field.parts[-1]]\n _remove_table_name(query.where)\n\n # FIXME https://github.com/mindsdb/mindsdb_sql/issues/130\n # we need way to dump suery in postgres dialect\n sql_query = str(query).replace('`', '')\n res = duckdb.query_df(df, 'df_table', sql_query)\n result_df = res.df()\n result_df = result_df.where(pd.notnull(result_df), None)\n return result_df\n", "path": "mindsdb/api/mysql/mysql_proxy/utilities/sql.py"}]} | 1,083 | 351 |
gh_patches_debug_30403 | rasdani/github-patches | git_diff | pytorch__pytorch-2645 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug in legacy padding layer
I've found a bug in [Padding.py](https://github.com/pytorch/pytorch/blob/master/torch/legacy/nn/Padding.py).
It doesn't handle nInputDim original Torch Padding layer's parameter . As far as I've unterstood, you've rewritten lua code from [here](https://github.com/torch/nn/blob/master/Padding.lua), but not fully. 19th line in this file contains code for dealing with nInputDim parameter.
To reproduce issue, you need to download ENet model from [here](https://www.dropbox.com/sh/dywzk3gyb12hpe5/AAD5YkUa8XgMpHs2gCRgmCVCa)(original model from ENet authors) and make inference:
`import numpy as np`
`import torch`
`from torch.utils.serialization import load_lua`
`image = np.ones((1,3,1024,2048))`
`tensor = torch.FloatTensor(image)`
`net_torch = load_lua(torch_model)`
`out_torch = net_torch.forward(tensor)`
Now I've got en exception:
Traceback (most recent call last):
```
File "/hdd/PycharmProjects/untitled/test.py", line 110, in <module>
out_torch = net_torch.forward(tensor).numpy()
File "/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/Module.py", line 33, in forward
return self.updateOutput(input)
File "/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/Sequential.py", line 36, in updateOutput
currentOutput = module.updateOutput(currentOutput)
File "/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/Sequential.py", line 36, in updateOutput
currentOutput = module.updateOutput(currentOutput)
File "/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/CAddTable.py", line 20, in updateOutput
self.output.add_(input[i])
RuntimeError: inconsistent tensor size at /b/wheel/pytorch-src/torch/lib/TH/generic/THTensorMath.c:827
```
Padding is added to wrong axis, what's why CAddTable can't sum blobs with different shapes and throws exception.
If I edit the code of Padding.py by adding one to self.dim variables (lines 21 and 23 in Padding.py), all works correctly.
</issue>
<code>
[start of torch/legacy/nn/Padding.py]
1 import torch
2 from .Module import Module
3
4
5 class Padding(Module):
6 # pad puts in [pad] amount of [value] over dimension [dim], starting at
7 # index [index] in that dimension. If pad<0, index counts from the left.
8 # If pad>0 index counts from the right index = 1 pads before index 1.
9 # index = 2 pads starting before index 2 and after index 1 in dimension [dim]
10
11 def __init__(self, dim, pad, value=0, index=0):
12 self.value = value
13 self.index = index
14 self.dim = dim
15 self.pad = pad
16 self.outputSize = torch.Size()
17 super(Padding, self).__init__()
18
19 def updateOutput(self, input):
20 outputSize = list(input.size())
21 outputSize[self.dim] += abs(self.pad)
22 self.outputSize = torch.Size(outputSize)
23 dim = self.dim
24
25 self.output.resize_(self.outputSize)
26 self.output.fill_(self.value)
27 index = self.index
28 pad = self.pad
29 if pad > 0:
30 index = input.size(dim) - index
31 else:
32 pad = -pad
33
34 if index == 0:
35 self.output.narrow(dim, pad, input.size(dim)).copy_(input)
36 elif index == input.size(dim):
37 self.output.narrow(dim, 0, input.size(dim)).copy_(input)
38 else:
39 self.output.narrow(dim, 0, index).copy_(input.narrow(dim, 0, index))
40 self.output.narrow(dim, index + pad, input.size(dim) -
41 index).copy_(input.narrow(dim, index, input.size(dim) - index))
42
43 return self.output
44
45 def updateGradInput(self, input, gradOutput):
46 self.gradInput.resize_as_(input)
47 dim = self.dim
48
49 index = self.index
50 pad = self.pad
51 if pad > 0:
52 index = input.size(dim) - index
53 else:
54 pad = -pad
55
56 if index == 0:
57 self.gradInput.copy_(gradOutput.narrow(dim, pad, input.size(dim)))
58 elif index == input.size(dim):
59 self.gradInput.copy_(gradOutput.narrow(dim, 0, input.size(dim)))
60 else:
61 self.gradInput.narrow(dim, 0, index).copy_(gradOutput.narrow(dim, 0, index))
62 self.gradInput.narrow(dim, index, input.size(
63 dim) - index).copy_(gradOutput.narrow(dim, index + pad, input.size(dim) - index))
64
65 return self.gradInput
66
[end of torch/legacy/nn/Padding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torch/legacy/nn/Padding.py b/torch/legacy/nn/Padding.py
--- a/torch/legacy/nn/Padding.py
+++ b/torch/legacy/nn/Padding.py
@@ -7,12 +7,15 @@
# index [index] in that dimension. If pad<0, index counts from the left.
# If pad>0 index counts from the right index = 1 pads before index 1.
# index = 2 pads starting before index 2 and after index 1 in dimension [dim]
+ # When nInputDim is provided, inputs larger than that value will be considered batches
+ # where the actual dim to be padded will be dimension dim + 1.
- def __init__(self, dim, pad, value=0, index=0):
+ def __init__(self, dim, pad, value=0, index=0, nInputDim=0):
self.value = value
self.index = index
self.dim = dim
self.pad = pad
+ self.nInputDim = nInputDim
self.outputSize = torch.Size()
super(Padding, self).__init__()
@@ -22,6 +25,9 @@
self.outputSize = torch.Size(outputSize)
dim = self.dim
+ if hasattr(self, "nInputDim") and self.nInputDim > 0 and input.dim() != self.nInputDim:
+ dim = dim + 1
+
self.output.resize_(self.outputSize)
self.output.fill_(self.value)
index = self.index
@@ -46,6 +52,9 @@
self.gradInput.resize_as_(input)
dim = self.dim
+ if hasattr(self, "nInputDim") and self.nInputDim > 0 and input.dim() != self.nInputDim:
+ dim = dim + 1
+
index = self.index
pad = self.pad
if pad > 0:
| {"golden_diff": "diff --git a/torch/legacy/nn/Padding.py b/torch/legacy/nn/Padding.py\n--- a/torch/legacy/nn/Padding.py\n+++ b/torch/legacy/nn/Padding.py\n@@ -7,12 +7,15 @@\n # index [index] in that dimension. If pad<0, index counts from the left.\n # If pad>0 index counts from the right index = 1 pads before index 1.\n # index = 2 pads starting before index 2 and after index 1 in dimension [dim]\n+ # When nInputDim is provided, inputs larger than that value will be considered batches\n+ # where the actual dim to be padded will be dimension dim + 1.\n \n- def __init__(self, dim, pad, value=0, index=0):\n+ def __init__(self, dim, pad, value=0, index=0, nInputDim=0):\n self.value = value\n self.index = index\n self.dim = dim\n self.pad = pad\n+ self.nInputDim = nInputDim\n self.outputSize = torch.Size()\n super(Padding, self).__init__()\n \n@@ -22,6 +25,9 @@\n self.outputSize = torch.Size(outputSize)\n dim = self.dim\n \n+ if hasattr(self, \"nInputDim\") and self.nInputDim > 0 and input.dim() != self.nInputDim:\n+ dim = dim + 1\n+\n self.output.resize_(self.outputSize)\n self.output.fill_(self.value)\n index = self.index\n@@ -46,6 +52,9 @@\n self.gradInput.resize_as_(input)\n dim = self.dim\n \n+ if hasattr(self, \"nInputDim\") and self.nInputDim > 0 and input.dim() != self.nInputDim:\n+ dim = dim + 1\n+\n index = self.index\n pad = self.pad\n if pad > 0:\n", "issue": "Bug in legacy padding layer\nI've found a bug in [Padding.py](https://github.com/pytorch/pytorch/blob/master/torch/legacy/nn/Padding.py).\r\nIt doesn't handle nInputDim original Torch Padding layer's parameter . As far as I've unterstood, you've rewritten lua code from [here](https://github.com/torch/nn/blob/master/Padding.lua), but not fully. 19th line in this file contains code for dealing with nInputDim parameter.\r\nTo reproduce issue, you need to download ENet model from [here](https://www.dropbox.com/sh/dywzk3gyb12hpe5/AAD5YkUa8XgMpHs2gCRgmCVCa)(original model from ENet authors) and make inference:\r\n`import numpy as np`\r\n`import torch`\r\n`from torch.utils.serialization import load_lua`\r\n`image = np.ones((1,3,1024,2048))`\r\n`tensor = torch.FloatTensor(image)`\r\n`net_torch = load_lua(torch_model)`\r\n`out_torch = net_torch.forward(tensor)`\r\n\r\nNow I've got en exception: \r\nTraceback (most recent call last):\r\n```\r\n File \"/hdd/PycharmProjects/untitled/test.py\", line 110, in <module>\r\n out_torch = net_torch.forward(tensor).numpy()\r\n File \"/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/Module.py\", line 33, in forward\r\n return self.updateOutput(input)\r\n File \"/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/Sequential.py\", line 36, in updateOutput\r\n currentOutput = module.updateOutput(currentOutput)\r\n File \"/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/Sequential.py\", line 36, in updateOutput\r\n currentOutput = module.updateOutput(currentOutput)\r\n File \"/usr/local/lib/python2.7/dist-packages/torch/legacy/nn/CAddTable.py\", line 20, in updateOutput\r\n self.output.add_(input[i])\r\nRuntimeError: inconsistent tensor size at /b/wheel/pytorch-src/torch/lib/TH/generic/THTensorMath.c:827\r\n```\r\n\r\nPadding is added to wrong axis, what's why CAddTable can't sum blobs with different shapes and throws exception.\r\nIf I edit the code of Padding.py by adding one to self.dim variables (lines 21 and 23 in Padding.py), all works correctly.\n", "before_files": [{"content": "import torch\nfrom .Module import Module\n\n\nclass Padding(Module):\n # pad puts in [pad] amount of [value] over dimension [dim], starting at\n # index [index] in that dimension. If pad<0, index counts from the left.\n # If pad>0 index counts from the right index = 1 pads before index 1.\n # index = 2 pads starting before index 2 and after index 1 in dimension [dim]\n\n def __init__(self, dim, pad, value=0, index=0):\n self.value = value\n self.index = index\n self.dim = dim\n self.pad = pad\n self.outputSize = torch.Size()\n super(Padding, self).__init__()\n\n def updateOutput(self, input):\n outputSize = list(input.size())\n outputSize[self.dim] += abs(self.pad)\n self.outputSize = torch.Size(outputSize)\n dim = self.dim\n\n self.output.resize_(self.outputSize)\n self.output.fill_(self.value)\n index = self.index\n pad = self.pad\n if pad > 0:\n index = input.size(dim) - index\n else:\n pad = -pad\n\n if index == 0:\n self.output.narrow(dim, pad, input.size(dim)).copy_(input)\n elif index == input.size(dim):\n self.output.narrow(dim, 0, input.size(dim)).copy_(input)\n else:\n self.output.narrow(dim, 0, index).copy_(input.narrow(dim, 0, index))\n self.output.narrow(dim, index + pad, input.size(dim) -\n index).copy_(input.narrow(dim, index, input.size(dim) - index))\n\n return self.output\n\n def updateGradInput(self, input, gradOutput):\n self.gradInput.resize_as_(input)\n dim = self.dim\n\n index = self.index\n pad = self.pad\n if pad > 0:\n index = input.size(dim) - index\n else:\n pad = -pad\n\n if index == 0:\n self.gradInput.copy_(gradOutput.narrow(dim, pad, input.size(dim)))\n elif index == input.size(dim):\n self.gradInput.copy_(gradOutput.narrow(dim, 0, input.size(dim)))\n else:\n self.gradInput.narrow(dim, 0, index).copy_(gradOutput.narrow(dim, 0, index))\n self.gradInput.narrow(dim, index, input.size(\n dim) - index).copy_(gradOutput.narrow(dim, index + pad, input.size(dim) - index))\n\n return self.gradInput\n", "path": "torch/legacy/nn/Padding.py"}]} | 1,782 | 438 |
gh_patches_debug_22994 | rasdani/github-patches | git_diff | numba__numba-2610 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lowering error when removing element from the middle of linked list
Hi
I am having a hard time trying to figure out an error when I reassign "element_0.next = element_1.next" of a linked list. I posted my error at stackoverflow: https://stackoverflow.com/questions/47232035/numba-lowering-error-when-reassigning-next-link-of-a-linked-list-element
I am creating this issue to make it closer to you.
I have visited most of your examples and documentation pages, no ideas whatsoever.
What am I missing?
I am using numba **0.35.0** (from Anaconda, python 3.6.2).
Thanks
</issue>
<code>
[start of numba/targets/optional.py]
1 from __future__ import print_function, absolute_import, division
2
3 from numba import types, cgutils
4
5 from .imputils import (lower_cast, lower_builtin, lower_getattr_generic,
6 impl_ret_untracked)
7
8
9 def always_return_true_impl(context, builder, sig, args):
10 return cgutils.true_bit
11
12
13 def always_return_false_impl(context, builder, sig, args):
14 return cgutils.false_bit
15
16
17 def optional_is_none(context, builder, sig, args):
18 """
19 Check if an Optional value is invalid
20 """
21 [lty, rty] = sig.args
22 [lval, rval] = args
23
24 # Make sure None is on the right
25 if lty == types.none:
26 lty, rty = rty, lty
27 lval, rval = rval, lval
28
29 opt_type = lty
30 opt_val = lval
31
32 opt = context.make_helper(builder, opt_type, opt_val)
33 res = builder.not_(cgutils.as_bool_bit(builder, opt.valid))
34 return impl_ret_untracked(context, builder, sig.return_type, res)
35
36
37 # None is/not None
38 lower_builtin('is', types.none, types.none)(always_return_true_impl)
39
40 # Optional is None
41 lower_builtin('is', types.Optional, types.none)(optional_is_none)
42 lower_builtin('is', types.none, types.Optional)(optional_is_none)
43
44
45 @lower_getattr_generic(types.Optional)
46 def optional_getattr(context, builder, typ, value, attr):
47 """
48 Optional.__getattr__ => redirect to the wrapped type.
49 """
50 inner_type = typ.type
51 val = context.cast(builder, value, typ, inner_type)
52 imp = context.get_getattr(inner_type, attr)
53 return imp(context, builder, inner_type, val, attr)
54
55
56 @lower_cast(types.Optional, types.Optional)
57 def optional_to_optional(context, builder, fromty, toty, val):
58 """
59 The handling of optional->optional cast must be special cased for
60 correct propagation of None value. Given type T and U. casting of
61 T? to U? (? denotes optional) should always succeed. If the from-value
62 is None, the None value the casted value (U?) should be None; otherwise,
63 the from-value is casted to U. This is different from casting T? to U,
64 which requires the from-value must not be None.
65 """
66 optval = context.make_helper(builder, fromty, value=val)
67 validbit = cgutils.as_bool_bit(builder, optval.valid)
68 # Create uninitialized optional value
69 outoptval = context.make_helper(builder, toty)
70
71 with builder.if_else(validbit) as (is_valid, is_not_valid):
72 with is_valid:
73 # Cast internal value
74 outoptval.valid = cgutils.true_bit
75 outoptval.data = context.cast(builder, optval.data,
76 fromty.type, toty.type)
77
78 with is_not_valid:
79 # Store None to result
80 outoptval.valid = cgutils.false_bit
81 outoptval.data = cgutils.get_null_value(
82 outoptval.data.type)
83
84 return outoptval._getvalue()
85
86
87 @lower_cast(types.Any, types.Optional)
88 def any_to_optional(context, builder, fromty, toty, val):
89 if fromty == types.none:
90 return context.make_optional_none(builder, toty.type)
91 else:
92 val = context.cast(builder, val, fromty, toty.type)
93 return context.make_optional_value(builder, toty.type, val)
94
95
96 @lower_cast(types.Optional, types.Any)
97 @lower_cast(types.Optional, types.Boolean)
98 def optional_to_any(context, builder, fromty, toty, val):
99 optval = context.make_helper(builder, fromty, value=val)
100 validbit = cgutils.as_bool_bit(builder, optval.valid)
101 with builder.if_then(builder.not_(validbit), likely=False):
102 msg = "expected %s, got None" % (fromty.type,)
103 context.call_conv.return_user_exc(builder, TypeError, (msg,))
104
105 return context.cast(builder, optval.data, fromty.type, toty)
106
[end of numba/targets/optional.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numba/targets/optional.py b/numba/targets/optional.py
--- a/numba/targets/optional.py
+++ b/numba/targets/optional.py
@@ -1,9 +1,9 @@
from __future__ import print_function, absolute_import, division
-from numba import types, cgutils
+from numba import types, cgutils, typing
from .imputils import (lower_cast, lower_builtin, lower_getattr_generic,
- impl_ret_untracked)
+ impl_ret_untracked, lower_setattr_generic)
def always_return_true_impl(context, builder, sig, args):
@@ -53,6 +53,21 @@
return imp(context, builder, inner_type, val, attr)
+@lower_setattr_generic(types.Optional)
+def optional_setattr(context, builder, sig, args, attr):
+ """
+ Optional.__setattr__ => redirect to the wrapped type.
+ """
+ basety, valty = sig.args
+ target, val = args
+ target_type = basety.type
+ target = context.cast(builder, target, basety, target_type)
+
+ newsig = typing.signature(sig.return_type, target_type, valty)
+ imp = context.get_setattr(attr, newsig)
+ return imp(builder, (target, val))
+
+
@lower_cast(types.Optional, types.Optional)
def optional_to_optional(context, builder, fromty, toty, val):
"""
| {"golden_diff": "diff --git a/numba/targets/optional.py b/numba/targets/optional.py\n--- a/numba/targets/optional.py\n+++ b/numba/targets/optional.py\n@@ -1,9 +1,9 @@\n from __future__ import print_function, absolute_import, division\n \n-from numba import types, cgutils\n+from numba import types, cgutils, typing\n \n from .imputils import (lower_cast, lower_builtin, lower_getattr_generic,\n- impl_ret_untracked)\n+ impl_ret_untracked, lower_setattr_generic)\n \n \n def always_return_true_impl(context, builder, sig, args):\n@@ -53,6 +53,21 @@\n return imp(context, builder, inner_type, val, attr)\n \n \n+@lower_setattr_generic(types.Optional)\n+def optional_setattr(context, builder, sig, args, attr):\n+ \"\"\"\n+ Optional.__setattr__ => redirect to the wrapped type.\n+ \"\"\"\n+ basety, valty = sig.args\n+ target, val = args\n+ target_type = basety.type\n+ target = context.cast(builder, target, basety, target_type)\n+\n+ newsig = typing.signature(sig.return_type, target_type, valty)\n+ imp = context.get_setattr(attr, newsig)\n+ return imp(builder, (target, val))\n+\n+\n @lower_cast(types.Optional, types.Optional)\n def optional_to_optional(context, builder, fromty, toty, val):\n \"\"\"\n", "issue": "Lowering error when removing element from the middle of linked list\nHi\r\n\r\nI am having a hard time trying to figure out an error when I reassign \"element_0.next = element_1.next\" of a linked list. I posted my error at stackoverflow: https://stackoverflow.com/questions/47232035/numba-lowering-error-when-reassigning-next-link-of-a-linked-list-element\r\n\r\nI am creating this issue to make it closer to you.\r\n\r\nI have visited most of your examples and documentation pages, no ideas whatsoever.\r\n\r\nWhat am I missing?\r\n\r\nI am using numba **0.35.0** (from Anaconda, python 3.6.2).\r\n\r\nThanks\n", "before_files": [{"content": "from __future__ import print_function, absolute_import, division\n\nfrom numba import types, cgutils\n\nfrom .imputils import (lower_cast, lower_builtin, lower_getattr_generic,\n impl_ret_untracked)\n\n\ndef always_return_true_impl(context, builder, sig, args):\n return cgutils.true_bit\n\n\ndef always_return_false_impl(context, builder, sig, args):\n return cgutils.false_bit\n\n\ndef optional_is_none(context, builder, sig, args):\n \"\"\"\n Check if an Optional value is invalid\n \"\"\"\n [lty, rty] = sig.args\n [lval, rval] = args\n\n # Make sure None is on the right\n if lty == types.none:\n lty, rty = rty, lty\n lval, rval = rval, lval\n\n opt_type = lty\n opt_val = lval\n\n opt = context.make_helper(builder, opt_type, opt_val)\n res = builder.not_(cgutils.as_bool_bit(builder, opt.valid))\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n\n# None is/not None\nlower_builtin('is', types.none, types.none)(always_return_true_impl)\n\n# Optional is None\nlower_builtin('is', types.Optional, types.none)(optional_is_none)\nlower_builtin('is', types.none, types.Optional)(optional_is_none)\n\n\n@lower_getattr_generic(types.Optional)\ndef optional_getattr(context, builder, typ, value, attr):\n \"\"\"\n Optional.__getattr__ => redirect to the wrapped type.\n \"\"\"\n inner_type = typ.type\n val = context.cast(builder, value, typ, inner_type)\n imp = context.get_getattr(inner_type, attr)\n return imp(context, builder, inner_type, val, attr)\n\n\n@lower_cast(types.Optional, types.Optional)\ndef optional_to_optional(context, builder, fromty, toty, val):\n \"\"\"\n The handling of optional->optional cast must be special cased for\n correct propagation of None value. Given type T and U. casting of\n T? to U? (? denotes optional) should always succeed. If the from-value\n is None, the None value the casted value (U?) should be None; otherwise,\n the from-value is casted to U. This is different from casting T? to U,\n which requires the from-value must not be None.\n \"\"\"\n optval = context.make_helper(builder, fromty, value=val)\n validbit = cgutils.as_bool_bit(builder, optval.valid)\n # Create uninitialized optional value\n outoptval = context.make_helper(builder, toty)\n\n with builder.if_else(validbit) as (is_valid, is_not_valid):\n with is_valid:\n # Cast internal value\n outoptval.valid = cgutils.true_bit\n outoptval.data = context.cast(builder, optval.data,\n fromty.type, toty.type)\n\n with is_not_valid:\n # Store None to result\n outoptval.valid = cgutils.false_bit\n outoptval.data = cgutils.get_null_value(\n outoptval.data.type)\n\n return outoptval._getvalue()\n\n\n@lower_cast(types.Any, types.Optional)\ndef any_to_optional(context, builder, fromty, toty, val):\n if fromty == types.none:\n return context.make_optional_none(builder, toty.type)\n else:\n val = context.cast(builder, val, fromty, toty.type)\n return context.make_optional_value(builder, toty.type, val)\n\n\n@lower_cast(types.Optional, types.Any)\n@lower_cast(types.Optional, types.Boolean)\ndef optional_to_any(context, builder, fromty, toty, val):\n optval = context.make_helper(builder, fromty, value=val)\n validbit = cgutils.as_bool_bit(builder, optval.valid)\n with builder.if_then(builder.not_(validbit), likely=False):\n msg = \"expected %s, got None\" % (fromty.type,)\n context.call_conv.return_user_exc(builder, TypeError, (msg,))\n\n return context.cast(builder, optval.data, fromty.type, toty)\n", "path": "numba/targets/optional.py"}]} | 1,803 | 326 |
gh_patches_debug_8179 | rasdani/github-patches | git_diff | Gallopsled__pwntools-1659 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fatal error when $TERM is not set
I am trying to use `pwntools` in a SageMath script. This works great interactively, but when I run my script with `sage foo.sage`, it fails somewhere in terminal code. I have traced this back to sage unsetting $TERM in non-interactive calls due to https://trac.sagemath.org/ticket/12263. Thus, issue can be easily reproduced without SageMath:
```sh
~$ docker run -it pwntools/pwntools:stable
pwntools@bce19e99e965:~$ TERM= python -c 'import pwn'
Warning: error: setupterm: could not find terminfo database
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/pwn/__init__.py", line 4, in <module>
from pwn.toplevel import *
File "/usr/local/lib/python2.7/dist-packages/pwn/toplevel.py", line 20, in <module>
import pwnlib
File "/usr/local/lib/python2.7/dist-packages/pwnlib/__init__.py", line 43, in <module>
importlib.import_module('.%s' % module, 'pwnlib')
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/usr/local/lib/python2.7/dist-packages/pwnlib/args.py", line 62, in <module>
from pwnlib import term
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/__init__.py", line 6, in <module>
from pwnlib.term import completer
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/completer.py", line 7, in <module>
from pwnlib.term import readline
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/readline.py", line 13, in <module>
from pwnlib.term import text
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py", line 126, in <module>
sys.modules[__name__] = Module()
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py", line 49, in __init__
s = termcap.get(y)
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/unix_termcap.py", line 28, in get
s = curses.tigetstr(cap)
_curses.error: must call (at least) setupterm() first
```
</issue>
<code>
[start of pwnlib/term/unix_termcap.py]
1 from __future__ import division
2 from __future__ import print_function
3
4 __all__ = ['get']
5 import curses
6 import os
7 import sys
8
9 cache = None
10
11 def get(cap, *args, **kwargs):
12 default = kwargs.pop('default', '')
13
14 if 'PWNLIB_NOTERM' in os.environ:
15 return ''
16
17 # Hack for readthedocs.org
18 if 'READTHEDOCS' in os.environ:
19 return ''
20
21 if kwargs != {}:
22 raise TypeError("get(): No such argument %r" % kwargs.popitem()[0])
23
24 if cache is None:
25 init()
26 s = cache.get(cap)
27 if not s:
28 s = curses.tigetstr(cap)
29 if s is None:
30 s = curses.tigetnum(cap)
31 if s == -2:
32 s = curses.tigetflag(cap)
33 if s == -1:
34 # default to empty string so tparm doesn't fail
35 s = ''
36 else:
37 s = bool(s)
38 cache[cap] = s
39 # if `s' is not set `curses.tparm' will throw an error if given arguments
40 if args and s:
41 return curses.tparm(s, *args)
42 else:
43 return s
44
45 def init():
46 global cache
47
48 # Detect running under Jupyter
49 try:
50 if get_ipython().__class__.__name__ == 'ZMQInteractiveShell':
51 os.environ['PWNLIB_NOTERM'] = '1'
52 os.environ['JUPYTER_DETECTED'] ='yes'
53 except NameError:
54 pass
55
56 if 'PWNLIB_NOTERM' not in os.environ:
57 # Fix for BPython
58 try:
59 curses.setupterm()
60 except curses.error as e:
61 import traceback
62 print('Warning:', ''.join(traceback.format_exception_only(e.__class__, e)), file=sys.stderr)
63
64 cache = {}
65 # Manually add reset sequence into the cache.
66 # Can't look it up using tigetstr.
67 cache['reset'] = '\x1b[m'
68
[end of pwnlib/term/unix_termcap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwnlib/term/unix_termcap.py b/pwnlib/term/unix_termcap.py
--- a/pwnlib/term/unix_termcap.py
+++ b/pwnlib/term/unix_termcap.py
@@ -60,6 +60,8 @@
except curses.error as e:
import traceback
print('Warning:', ''.join(traceback.format_exception_only(e.__class__, e)), file=sys.stderr)
+ print('Terminal features will not be available. Consider setting TERM variable to your current terminal name (or xterm).', file=sys.stderr)
+ os.environ['PWNLIB_NOTERM'] = '1'
cache = {}
# Manually add reset sequence into the cache.
| {"golden_diff": "diff --git a/pwnlib/term/unix_termcap.py b/pwnlib/term/unix_termcap.py\n--- a/pwnlib/term/unix_termcap.py\n+++ b/pwnlib/term/unix_termcap.py\n@@ -60,6 +60,8 @@\n except curses.error as e:\n import traceback\n print('Warning:', ''.join(traceback.format_exception_only(e.__class__, e)), file=sys.stderr)\n+ print('Terminal features will not be available. Consider setting TERM variable to your current terminal name (or xterm).', file=sys.stderr)\n+ os.environ['PWNLIB_NOTERM'] = '1'\n \n cache = {}\n # Manually add reset sequence into the cache.\n", "issue": "Fatal error when $TERM is not set\nI am trying to use `pwntools` in a SageMath script. This works great interactively, but when I run my script with `sage foo.sage`, it fails somewhere in terminal code. I have traced this back to sage unsetting $TERM in non-interactive calls due to https://trac.sagemath.org/ticket/12263. Thus, issue can be easily reproduced without SageMath:\r\n\r\n```sh\r\n~$ docker run -it pwntools/pwntools:stable\r\npwntools@bce19e99e965:~$ TERM= python -c 'import pwn'\r\nWarning: error: setupterm: could not find terminfo database\r\n\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/usr/local/lib/python2.7/dist-packages/pwn/__init__.py\", line 4, in <module>\r\n from pwn.toplevel import *\r\n File \"/usr/local/lib/python2.7/dist-packages/pwn/toplevel.py\", line 20, in <module>\r\n import pwnlib\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/__init__.py\", line 43, in <module>\r\n importlib.import_module('.%s' % module, 'pwnlib')\r\n File \"/usr/lib/python2.7/importlib/__init__.py\", line 37, in import_module\r\n __import__(name)\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/args.py\", line 62, in <module>\r\n from pwnlib import term\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/__init__.py\", line 6, in <module>\r\n from pwnlib.term import completer\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/completer.py\", line 7, in <module>\r\n from pwnlib.term import readline\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/readline.py\", line 13, in <module>\r\n from pwnlib.term import text\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py\", line 126, in <module>\r\n sys.modules[__name__] = Module()\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py\", line 49, in __init__\r\n s = termcap.get(y)\r\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/unix_termcap.py\", line 28, in get\r\n s = curses.tigetstr(cap)\r\n_curses.error: must call (at least) setupterm() first\r\n```\n", "before_files": [{"content": "from __future__ import division\nfrom __future__ import print_function\n\n__all__ = ['get']\nimport curses\nimport os\nimport sys\n\ncache = None\n\ndef get(cap, *args, **kwargs):\n default = kwargs.pop('default', '')\n\n if 'PWNLIB_NOTERM' in os.environ:\n return ''\n\n # Hack for readthedocs.org\n if 'READTHEDOCS' in os.environ:\n return ''\n\n if kwargs != {}:\n raise TypeError(\"get(): No such argument %r\" % kwargs.popitem()[0])\n\n if cache is None:\n init()\n s = cache.get(cap)\n if not s:\n s = curses.tigetstr(cap)\n if s is None:\n s = curses.tigetnum(cap)\n if s == -2:\n s = curses.tigetflag(cap)\n if s == -1:\n # default to empty string so tparm doesn't fail\n s = ''\n else:\n s = bool(s)\n cache[cap] = s\n # if `s' is not set `curses.tparm' will throw an error if given arguments\n if args and s:\n return curses.tparm(s, *args)\n else:\n return s\n\ndef init():\n global cache\n\n # Detect running under Jupyter\n try:\n if get_ipython().__class__.__name__ == 'ZMQInteractiveShell':\n os.environ['PWNLIB_NOTERM'] = '1'\n os.environ['JUPYTER_DETECTED'] ='yes'\n except NameError:\n pass\n\n if 'PWNLIB_NOTERM' not in os.environ:\n # Fix for BPython\n try:\n curses.setupterm()\n except curses.error as e:\n import traceback\n print('Warning:', ''.join(traceback.format_exception_only(e.__class__, e)), file=sys.stderr)\n\n cache = {}\n # Manually add reset sequence into the cache.\n # Can't look it up using tigetstr.\n cache['reset'] = '\\x1b[m'\n", "path": "pwnlib/term/unix_termcap.py"}]} | 1,745 | 160 |
gh_patches_debug_30175 | rasdani/github-patches | git_diff | microsoft__ptvsd-480 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PyPI package missing information
Looking at https://pypi.org/project/ptvsd/4.0.0a1/ we are currently missing:
* link to GitHub
* long description
* specific classifiers
* any mention of VS Code
* link to doc pages
Would be nice to get these added.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # Copyright (c) Microsoft Corporation. All rights reserved.
4 # Licensed under the MIT License. See LICENSE in the project root
5 # for license information.
6
7 import os
8 import os.path
9 import subprocess
10 import sys
11
12 from setuptools import setup
13
14 import versioneer
15 import ptvsd
16 import ptvsd._vendored
17
18
19 PYDEVD_ROOT = ptvsd._vendored.project_root('pydevd')
20 PTVSD_ROOT = os.path.dirname(os.path.abspath(ptvsd.__file__))
21
22
23 def cython_build():
24 print('Compiling extension modules (set SKIP_CYTHON_BUILD=1 to omit)')
25 subprocess.call([
26 sys.executable,
27 os.path.join(PYDEVD_ROOT, 'setup_cython.py'),
28 'build_ext',
29 '-i',
30 ])
31
32
33 def iter_vendored_files():
34 # Add pydevd files as data files for this package. They are not
35 # treated as a package of their own, because we don't actually
36 # want to provide pydevd - just use our own copy internally.
37 for project in ptvsd._vendored.list_all():
38 for filename in ptvsd._vendored.iter_packaging_files(project):
39 yield filename
40
41
42 if __name__ == '__main__':
43 if not os.getenv('SKIP_CYTHON_BUILD'):
44 cython_build()
45
46 setup(
47 name='ptvsd',
48 version=versioneer.get_version(),
49 description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa
50 #long_description=open('DESCRIPTION.md').read(),
51 #long_description_content_type='text/markdown',
52 license='MIT',
53 author='Microsoft Corporation',
54 author_email='[email protected]',
55 url='https://aka.ms/ptvs',
56 classifiers=[
57 'Development Status :: 3 - Alpha',
58 'Programming Language :: Python',
59 'Programming Language :: Python :: 2',
60 'Programming Language :: Python :: 3',
61 'License :: OSI Approved :: MIT License',
62 ],
63 packages=[
64 'ptvsd',
65 'ptvsd._vendored',
66 ],
67 package_data={
68 'ptvsd': ['ThirdPartyNotices.txt'],
69 'ptvsd._vendored': list(iter_vendored_files()),
70 },
71 cmdclass=versioneer.get_cmdclass(),
72 )
73
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -39,6 +39,10 @@
yield filename
+with open('DESCRIPTION.md', 'r') as fh:
+ long_description = fh.read()
+
+
if __name__ == '__main__':
if not os.getenv('SKIP_CYTHON_BUILD'):
cython_build()
@@ -47,17 +51,22 @@
name='ptvsd',
version=versioneer.get_version(),
description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa
- #long_description=open('DESCRIPTION.md').read(),
- #long_description_content_type='text/markdown',
+ long_description=long_description,
+ long_description_content_type='text/markdown',
license='MIT',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://aka.ms/ptvs',
+ python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*",
classifiers=[
'Development Status :: 3 - Alpha',
- 'Programming Language :: Python',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Topic :: Software Development :: Debuggers',
+ 'Operating System :: OS Independent',
+ 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)',
'License :: OSI Approved :: MIT License',
],
packages=[
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -39,6 +39,10 @@\n yield filename\n \n \n+with open('DESCRIPTION.md', 'r') as fh:\n+ long_description = fh.read()\n+\n+\n if __name__ == '__main__':\n if not os.getenv('SKIP_CYTHON_BUILD'):\n cython_build()\n@@ -47,17 +51,22 @@\n name='ptvsd',\n version=versioneer.get_version(),\n description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa\n- #long_description=open('DESCRIPTION.md').read(),\n- #long_description_content_type='text/markdown',\n+ long_description=long_description,\n+ long_description_content_type='text/markdown',\n license='MIT',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://aka.ms/ptvs',\n+ python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n- 'Programming Language :: Python',\n- 'Programming Language :: Python :: 2',\n- 'Programming Language :: Python :: 3',\n+ 'Programming Language :: Python :: 2.7',\n+ 'Programming Language :: Python :: 3.5',\n+ 'Programming Language :: Python :: 3.6',\n+ 'Programming Language :: Python :: 3.7',\n+ 'Topic :: Software Development :: Debuggers',\n+ 'Operating System :: OS Independent',\n+ 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)',\n 'License :: OSI Approved :: MIT License',\n ],\n packages=[\n", "issue": "PyPI package missing information\nLooking at https://pypi.org/project/ptvsd/4.0.0a1/ we are currently missing:\r\n* link to GitHub\r\n* long description\r\n* specific classifiers\r\n* any mention of VS Code\r\n* link to doc pages\r\n\r\nWould be nice to get these added.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport os\nimport os.path\nimport subprocess\nimport sys\n\nfrom setuptools import setup\n\nimport versioneer\nimport ptvsd\nimport ptvsd._vendored\n\n\nPYDEVD_ROOT = ptvsd._vendored.project_root('pydevd')\nPTVSD_ROOT = os.path.dirname(os.path.abspath(ptvsd.__file__))\n\n\ndef cython_build():\n print('Compiling extension modules (set SKIP_CYTHON_BUILD=1 to omit)')\n subprocess.call([\n sys.executable,\n os.path.join(PYDEVD_ROOT, 'setup_cython.py'),\n 'build_ext',\n '-i',\n ])\n\n\ndef iter_vendored_files():\n # Add pydevd files as data files for this package. They are not\n # treated as a package of their own, because we don't actually\n # want to provide pydevd - just use our own copy internally.\n for project in ptvsd._vendored.list_all():\n for filename in ptvsd._vendored.iter_packaging_files(project):\n yield filename\n\n\nif __name__ == '__main__':\n if not os.getenv('SKIP_CYTHON_BUILD'):\n cython_build()\n\n setup(\n name='ptvsd',\n version=versioneer.get_version(),\n description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa\n #long_description=open('DESCRIPTION.md').read(),\n #long_description_content_type='text/markdown',\n license='MIT',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://aka.ms/ptvs',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n ],\n packages=[\n 'ptvsd',\n 'ptvsd._vendored',\n ],\n package_data={\n 'ptvsd': ['ThirdPartyNotices.txt'],\n 'ptvsd._vendored': list(iter_vendored_files()),\n },\n cmdclass=versioneer.get_cmdclass(),\n )\n", "path": "setup.py"}]} | 1,246 | 401 |
gh_patches_debug_18596 | rasdani/github-patches | git_diff | mkdocs__mkdocs-647 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mailto links not correctly interpreted
Hi,
I've noticed "mailto" links are not correctly interpreted.
- This code:
```
Contact us at <foo@bar>
```
- Renders the following link:
http://localhost//mailto:foo@bar
---
- This code:
```
You can [contact us](foo@bar)
```
- Renders the following link:
http://localhost/page/permalink/foo@bar
</issue>
<code>
[start of mkdocs/relative_path_ext.py]
1 """
2 # Relative Path Markdown Extension
3
4 During the MkDocs build we rewrite URLs that link to local
5 Markdown or media files. Using the following pages configuration
6 we can look at how the output is changed.
7
8 pages:
9 - ['index.md']
10 - ['tutorial/install.md']
11 - ['tutorial/intro.md']
12
13 ## Markdown URLs
14
15 When linking from `install.md` to `intro.md` the link would
16 simply be `[intro](intro.md)`. However, when we build
17 `install.md` we place it in a directory to create nicer URLs.
18 This means that the path to `intro.md` becomes `../intro/`
19
20 ## Media URLs
21
22 To make it easier to work with media files and store them all
23 under one directory we re-write those to all be based on the
24 root. So, with the following markdown to add an image.
25
26 
27
28 The output would depend on the location of the Markdown file it
29 was added too.
30
31 Source file | Generated Path | Image Path |
32 ------------------- | ----------------- | ---------------------------- |
33 index.md | / | ./img/initial-layout.png |
34 tutorial/install.md | tutorial/install/ | ../img/initial-layout.png |
35 tutorial/intro.md | tutorial/intro/ | ../../img/initial-layout.png |
36
37 """
38
39 from __future__ import unicode_literals
40
41 import logging
42 import os
43
44 from markdown.extensions import Extension
45 from markdown.treeprocessors import Treeprocessor
46
47 from mkdocs import utils
48 from mkdocs.exceptions import MarkdownNotFound
49
50 log = logging.getLogger(__name__)
51
52
53 def _iter(node):
54 # TODO: Remove when dropping Python 2.6. Replace this
55 # function call with note.iter()
56 return [node] + node.findall('.//*')
57
58
59 def path_to_url(url, nav, strict):
60
61 scheme, netloc, path, params, query, fragment = (
62 utils.urlparse(url))
63
64 if scheme or netloc or not path:
65 # Ignore URLs unless they are a relative link to a markdown file.
66 return url
67
68 if nav and not utils.is_markdown_file(path):
69 path = utils.create_relative_media_url(nav, path)
70 elif nav:
71 # If the site navigation has been provided, then validate
72 # the internal hyperlink, making sure the target actually exists.
73 target_file = nav.file_context.make_absolute(path)
74
75 if target_file.startswith(os.path.sep):
76 target_file = target_file[1:]
77
78 if target_file not in nav.source_files:
79 source_file = nav.file_context.current_file
80 msg = (
81 'The page "%s" contained a hyperlink to "%s" which '
82 'is not listed in the "pages" configuration.'
83 ) % (source_file, target_file)
84
85 # In strict mode raise an error at this point.
86 if strict:
87 raise MarkdownNotFound(msg)
88 # Otherwise, when strict mode isn't enabled, log a warning
89 # to the user and leave the URL as it is.
90 log.warning(msg)
91 return url
92 path = utils.get_url_path(target_file, nav.use_directory_urls)
93 path = nav.url_context.make_relative(path)
94 else:
95 path = utils.get_url_path(path).lstrip('/')
96
97 # Convert the .md hyperlink to a relative hyperlink to the HTML page.
98 fragments = (scheme, netloc, path, params, query, fragment)
99 url = utils.urlunparse(fragments)
100 return url
101
102
103 class RelativePathTreeprocessor(Treeprocessor):
104
105 def __init__(self, site_navigation, strict):
106 self.site_navigation = site_navigation
107 self.strict = strict
108
109 def run(self, root):
110 """Update urls on anchors and images to make them relative
111
112 Iterates through the full document tree looking for specific
113 tags and then makes them relative based on the site navigation
114 """
115
116 for element in _iter(root):
117
118 if element.tag == 'a':
119 key = 'href'
120 elif element.tag == 'img':
121 key = 'src'
122 else:
123 continue
124
125 url = element.get(key)
126 new_url = path_to_url(url, self.site_navigation, self.strict)
127 element.set(key, new_url)
128
129 return root
130
131
132 class RelativePathExtension(Extension):
133 """
134 The Extension class is what we pass to markdown, it then
135 registers the Treeprocessor.
136 """
137
138 def __init__(self, site_navigation, strict):
139 self.site_navigation = site_navigation
140 self.strict = strict
141
142 def extendMarkdown(self, md, md_globals):
143 relpath = RelativePathTreeprocessor(self.site_navigation, self.strict)
144 md.treeprocessors.add("relpath", relpath, "_end")
145
[end of mkdocs/relative_path_ext.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkdocs/relative_path_ext.py b/mkdocs/relative_path_ext.py
--- a/mkdocs/relative_path_ext.py
+++ b/mkdocs/relative_path_ext.py
@@ -43,6 +43,7 @@
from markdown.extensions import Extension
from markdown.treeprocessors import Treeprocessor
+from markdown.util import AMP_SUBSTITUTE
from mkdocs import utils
from mkdocs.exceptions import MarkdownNotFound
@@ -61,8 +62,10 @@
scheme, netloc, path, params, query, fragment = (
utils.urlparse(url))
- if scheme or netloc or not path:
+ if scheme or netloc or not path or AMP_SUBSTITUTE in url:
# Ignore URLs unless they are a relative link to a markdown file.
+ # AMP_SUBSTITUTE is used internally by Markdown only for email,which is
+ # not a relative link. As urlparse errors on them, skip explicitly
return url
if nav and not utils.is_markdown_file(path):
| {"golden_diff": "diff --git a/mkdocs/relative_path_ext.py b/mkdocs/relative_path_ext.py\n--- a/mkdocs/relative_path_ext.py\n+++ b/mkdocs/relative_path_ext.py\n@@ -43,6 +43,7 @@\n \n from markdown.extensions import Extension\n from markdown.treeprocessors import Treeprocessor\n+from markdown.util import AMP_SUBSTITUTE\n \n from mkdocs import utils\n from mkdocs.exceptions import MarkdownNotFound\n@@ -61,8 +62,10 @@\n scheme, netloc, path, params, query, fragment = (\n utils.urlparse(url))\n \n- if scheme or netloc or not path:\n+ if scheme or netloc or not path or AMP_SUBSTITUTE in url:\n # Ignore URLs unless they are a relative link to a markdown file.\n+ # AMP_SUBSTITUTE is used internally by Markdown only for email,which is\n+ # not a relative link. As urlparse errors on them, skip explicitly\n return url\n \n if nav and not utils.is_markdown_file(path):\n", "issue": "Mailto links not correctly interpreted\nHi, \nI've noticed \"mailto\" links are not correctly interpreted.\n- This code:\n\n```\nContact us at <foo@bar>\n```\n- Renders the following link: \n http://localhost//mailto:foo@bar \n\n---\n- This code:\n\n```\nYou can [contact us](foo@bar)\n```\n- Renders the following link: \n http://localhost/page/permalink/foo@bar\n\n", "before_files": [{"content": "\"\"\"\n# Relative Path Markdown Extension\n\nDuring the MkDocs build we rewrite URLs that link to local\nMarkdown or media files. Using the following pages configuration\nwe can look at how the output is changed.\n\n pages:\n - ['index.md']\n - ['tutorial/install.md']\n - ['tutorial/intro.md']\n\n## Markdown URLs\n\nWhen linking from `install.md` to `intro.md` the link would\nsimply be `[intro](intro.md)`. However, when we build\n`install.md` we place it in a directory to create nicer URLs.\nThis means that the path to `intro.md` becomes `../intro/`\n\n## Media URLs\n\nTo make it easier to work with media files and store them all\nunder one directory we re-write those to all be based on the\nroot. So, with the following markdown to add an image.\n\n \n\nThe output would depend on the location of the Markdown file it\nwas added too.\n\nSource file | Generated Path | Image Path |\n------------------- | ----------------- | ---------------------------- |\nindex.md | / | ./img/initial-layout.png |\ntutorial/install.md | tutorial/install/ | ../img/initial-layout.png |\ntutorial/intro.md | tutorial/intro/ | ../../img/initial-layout.png |\n\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\n\nfrom markdown.extensions import Extension\nfrom markdown.treeprocessors import Treeprocessor\n\nfrom mkdocs import utils\nfrom mkdocs.exceptions import MarkdownNotFound\n\nlog = logging.getLogger(__name__)\n\n\ndef _iter(node):\n # TODO: Remove when dropping Python 2.6. Replace this\n # function call with note.iter()\n return [node] + node.findall('.//*')\n\n\ndef path_to_url(url, nav, strict):\n\n scheme, netloc, path, params, query, fragment = (\n utils.urlparse(url))\n\n if scheme or netloc or not path:\n # Ignore URLs unless they are a relative link to a markdown file.\n return url\n\n if nav and not utils.is_markdown_file(path):\n path = utils.create_relative_media_url(nav, path)\n elif nav:\n # If the site navigation has been provided, then validate\n # the internal hyperlink, making sure the target actually exists.\n target_file = nav.file_context.make_absolute(path)\n\n if target_file.startswith(os.path.sep):\n target_file = target_file[1:]\n\n if target_file not in nav.source_files:\n source_file = nav.file_context.current_file\n msg = (\n 'The page \"%s\" contained a hyperlink to \"%s\" which '\n 'is not listed in the \"pages\" configuration.'\n ) % (source_file, target_file)\n\n # In strict mode raise an error at this point.\n if strict:\n raise MarkdownNotFound(msg)\n # Otherwise, when strict mode isn't enabled, log a warning\n # to the user and leave the URL as it is.\n log.warning(msg)\n return url\n path = utils.get_url_path(target_file, nav.use_directory_urls)\n path = nav.url_context.make_relative(path)\n else:\n path = utils.get_url_path(path).lstrip('/')\n\n # Convert the .md hyperlink to a relative hyperlink to the HTML page.\n fragments = (scheme, netloc, path, params, query, fragment)\n url = utils.urlunparse(fragments)\n return url\n\n\nclass RelativePathTreeprocessor(Treeprocessor):\n\n def __init__(self, site_navigation, strict):\n self.site_navigation = site_navigation\n self.strict = strict\n\n def run(self, root):\n \"\"\"Update urls on anchors and images to make them relative\n\n Iterates through the full document tree looking for specific\n tags and then makes them relative based on the site navigation\n \"\"\"\n\n for element in _iter(root):\n\n if element.tag == 'a':\n key = 'href'\n elif element.tag == 'img':\n key = 'src'\n else:\n continue\n\n url = element.get(key)\n new_url = path_to_url(url, self.site_navigation, self.strict)\n element.set(key, new_url)\n\n return root\n\n\nclass RelativePathExtension(Extension):\n \"\"\"\n The Extension class is what we pass to markdown, it then\n registers the Treeprocessor.\n \"\"\"\n\n def __init__(self, site_navigation, strict):\n self.site_navigation = site_navigation\n self.strict = strict\n\n def extendMarkdown(self, md, md_globals):\n relpath = RelativePathTreeprocessor(self.site_navigation, self.strict)\n md.treeprocessors.add(\"relpath\", relpath, \"_end\")\n", "path": "mkdocs/relative_path_ext.py"}]} | 1,991 | 229 |
gh_patches_debug_30640 | rasdani/github-patches | git_diff | sanic-org__sanic-326 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
During handling of the above exception, another exception occurred
In the examples/exception_monitoring.py, when i try to run the app and hit the default route, it prints out the exception and then it calls "return super.default(self, request, exception)", it returns with the following exception:
AttributeError: type object 'super' has no attribute 'default'
looks like in the exceptions.py, this line is the culprit (i could be wrong):
`handler = self.handlers.get(type(exception), self.default)
`
since __init__ does not have that attribute defined. I am running python 3.6
</issue>
<code>
[start of examples/exception_monitoring.py]
1 """
2 Example intercepting uncaught exceptions using Sanic's error handler framework.
3
4 This may be useful for developers wishing to use Sentry, Airbrake, etc.
5 or a custom system to log and monitor unexpected errors in production.
6
7 First we create our own class inheriting from Handler in sanic.exceptions,
8 and pass in an instance of it when we create our Sanic instance. Inside this
9 class' default handler, we can do anything including sending exceptions to
10 an external service.
11 """
12
13
14
15 """
16 Imports and code relevant for our CustomHandler class
17 (Ordinarily this would be in a separate file)
18 """
19 from sanic.response import text
20 from sanic.exceptions import Handler, SanicException
21
22 class CustomHandler(Handler):
23 def default(self, request, exception):
24 # Here, we have access to the exception object
25 # and can do anything with it (log, send to external service, etc)
26
27 # Some exceptions are trivial and built into Sanic (404s, etc)
28 if not issubclass(type(exception), SanicException):
29 print(exception)
30
31 # Then, we must finish handling the exception by returning
32 # our response to the client
33 # For this we can just call the super class' default handler
34 return super.default(self, request, exception)
35
36
37
38
39 """
40 This is an ordinary Sanic server, with the exception that we set the
41 server's error_handler to an instance of our CustomHandler
42 """
43
44 from sanic import Sanic
45 from sanic.response import json
46
47 app = Sanic(__name__)
48
49 handler = CustomHandler(sanic=app)
50 app.error_handler = handler
51
52 @app.route("/")
53 async def test(request):
54 # Here, something occurs which causes an unexpected exception
55 # This exception will flow to our custom handler.
56 x = 1 / 0
57 return json({"test": True})
58
59
60 app.run(host="0.0.0.0", port=8000, debug=True)
61
[end of examples/exception_monitoring.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/exception_monitoring.py b/examples/exception_monitoring.py
--- a/examples/exception_monitoring.py
+++ b/examples/exception_monitoring.py
@@ -9,17 +9,15 @@
class' default handler, we can do anything including sending exceptions to
an external service.
"""
-
-
-
+from sanic.exceptions import Handler, SanicException
"""
Imports and code relevant for our CustomHandler class
(Ordinarily this would be in a separate file)
"""
-from sanic.response import text
-from sanic.exceptions import Handler, SanicException
+
class CustomHandler(Handler):
+
def default(self, request, exception):
# Here, we have access to the exception object
# and can do anything with it (log, send to external service, etc)
@@ -31,9 +29,7 @@
# Then, we must finish handling the exception by returning
# our response to the client
# For this we can just call the super class' default handler
- return super.default(self, request, exception)
-
-
+ return super().default(request, exception)
"""
@@ -49,11 +45,12 @@
handler = CustomHandler(sanic=app)
app.error_handler = handler
+
@app.route("/")
async def test(request):
# Here, something occurs which causes an unexpected exception
# This exception will flow to our custom handler.
- x = 1 / 0
+ 1 / 0
return json({"test": True})
| {"golden_diff": "diff --git a/examples/exception_monitoring.py b/examples/exception_monitoring.py\n--- a/examples/exception_monitoring.py\n+++ b/examples/exception_monitoring.py\n@@ -9,17 +9,15 @@\n class' default handler, we can do anything including sending exceptions to\n an external service.\n \"\"\"\n-\n-\n-\n+from sanic.exceptions import Handler, SanicException\n \"\"\"\n Imports and code relevant for our CustomHandler class\n (Ordinarily this would be in a separate file)\n \"\"\"\n-from sanic.response import text\n-from sanic.exceptions import Handler, SanicException\n+\n \n class CustomHandler(Handler):\n+\n def default(self, request, exception):\n # Here, we have access to the exception object\n # and can do anything with it (log, send to external service, etc)\n@@ -31,9 +29,7 @@\n # Then, we must finish handling the exception by returning\n # our response to the client\n # For this we can just call the super class' default handler\n- return super.default(self, request, exception)\n-\n-\n+ return super().default(request, exception)\n \n \n \"\"\"\n@@ -49,11 +45,12 @@\n handler = CustomHandler(sanic=app)\n app.error_handler = handler\n \n+\n @app.route(\"/\")\n async def test(request):\n # Here, something occurs which causes an unexpected exception\n # This exception will flow to our custom handler.\n- x = 1 / 0\n+ 1 / 0\n return json({\"test\": True})\n", "issue": "During handling of the above exception, another exception occurred\nIn the examples/exception_monitoring.py, when i try to run the app and hit the default route, it prints out the exception and then it calls \"return super.default(self, request, exception)\", it returns with the following exception:\r\n\r\nAttributeError: type object 'super' has no attribute 'default'\r\n\r\nlooks like in the exceptions.py, this line is the culprit (i could be wrong):\r\n\r\n`handler = self.handlers.get(type(exception), self.default)\r\n`\r\n since __init__ does not have that attribute defined. I am running python 3.6\n", "before_files": [{"content": "\"\"\"\nExample intercepting uncaught exceptions using Sanic's error handler framework.\n\nThis may be useful for developers wishing to use Sentry, Airbrake, etc.\nor a custom system to log and monitor unexpected errors in production.\n\nFirst we create our own class inheriting from Handler in sanic.exceptions,\nand pass in an instance of it when we create our Sanic instance. Inside this\nclass' default handler, we can do anything including sending exceptions to\nan external service.\n\"\"\"\n\n\n\n\"\"\"\nImports and code relevant for our CustomHandler class\n(Ordinarily this would be in a separate file)\n\"\"\"\nfrom sanic.response import text\nfrom sanic.exceptions import Handler, SanicException\n\nclass CustomHandler(Handler):\n def default(self, request, exception):\n # Here, we have access to the exception object\n # and can do anything with it (log, send to external service, etc)\n\n # Some exceptions are trivial and built into Sanic (404s, etc)\n if not issubclass(type(exception), SanicException):\n print(exception)\n\n # Then, we must finish handling the exception by returning\n # our response to the client\n # For this we can just call the super class' default handler\n return super.default(self, request, exception)\n\n\n\n\n\"\"\"\nThis is an ordinary Sanic server, with the exception that we set the\nserver's error_handler to an instance of our CustomHandler\n\"\"\"\n\nfrom sanic import Sanic\nfrom sanic.response import json\n\napp = Sanic(__name__)\n\nhandler = CustomHandler(sanic=app)\napp.error_handler = handler\n\[email protected](\"/\")\nasync def test(request):\n # Here, something occurs which causes an unexpected exception\n # This exception will flow to our custom handler.\n x = 1 / 0\n return json({\"test\": True})\n\n\napp.run(host=\"0.0.0.0\", port=8000, debug=True)\n", "path": "examples/exception_monitoring.py"}]} | 1,196 | 334 |
gh_patches_debug_13210 | rasdani/github-patches | git_diff | fossasia__open-event-server-7862 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Calendar link in calendar not working properly
In the description, "Join using link: https://eventyay.com/e/fa96ae2c/video/Main Hall/187"
The space is treated as a break, clipping the link at Main"
"Main Hall" should be HTTP encoded to Main%20Hall
</issue>
<code>
[start of app/api/helpers/calendar/ical.py]
1 import pytz
2 from flask import jsonify
3 from flask_jwt_extended import current_user
4 from icalendar import Calendar, Event
5 from sqlalchemy import or_
6 from sqlalchemy.orm import joinedload
7
8 from app.models.session import Session
9
10
11 def to_ical(event, include_sessions=False, my_schedule=False, user_id=None):
12 cal = Calendar()
13 cal.add('version', '2.0')
14 cal.add('METHOD', 'PUBLISH')
15 cal.add('X-WR-CALNAME', event.name)
16 cal.add('X-WR-CALDESC', 'Event Calendar')
17
18 event_component = Event()
19 event_component.add('uid', event.identifier)
20 event_component.add('summary', event.name)
21 event_component.add('url', event.site_link)
22 event_component.add('dtstart', event.starts_at_tz)
23 event_component.add('dtend', event.ends_at_tz)
24 event_component.add('location', event.normalized_location)
25 event_component.add('description', event.description)
26 if event.has_coordinates:
27 event_component.add('geo', (event.latitude, event.longitude))
28 if event.owner_description:
29 event_component.add('organizer', event.owner_description)
30
31 cal.add_component(event_component)
32
33 if include_sessions:
34 sessions_query = (
35 Session.query.filter_by(event_id=event.id)
36 .options(joinedload(Session.microlocation))
37 .filter_by(deleted_at=None)
38 .filter(or_(Session.state == 'accepted', Session.state == 'confirmed'))
39 .order_by(Session.starts_at.asc())
40 )
41 if my_schedule:
42 if not (current_user or user_id):
43 return jsonify(error='Login or User ID required'), 401
44
45 user_id = user_id or current_user.id
46 sessions_query = sessions_query.join(Session.favourites).filter_by(
47 user_id=user_id
48 )
49 sessions = sessions_query.all()
50
51 for session in sessions:
52
53 if not (session and session.starts_at and session.ends_at):
54 continue
55
56 session_video_url = (
57 " "
58 + event.site_link
59 + '/video/'
60 + session.microlocation.video_stream.name
61 + "/"
62 + str(session.microlocation.video_stream.id)
63 if session.microlocation.video_stream
64 else ""
65 )
66 session_link_heading = (
67 "Join using link: " + session_video_url + "<br/>"
68 if session_video_url
69 else ""
70 )
71 session_description = (
72 " "
73 + "Room: "
74 + session.microlocation.name
75 + "<br/>"
76 + session_link_heading
77 + "<br/>"
78 + session.short_abstract
79 )
80 session_component = Event()
81 session_component.add('summary', session.title)
82 session_component.add('uid', str(session.id) + "-" + event.identifier)
83 session_component.add('geo', (event.latitude, event.longitude))
84 session_component.add(
85 'location',
86 session_video_url
87 or (session.microlocation and session.microlocation.name)
88 or '' + " " + event.location_name,
89 )
90 session_component.add(
91 'dtstart', session.starts_at.astimezone(pytz.timezone(event.timezone))
92 )
93 session_component.add(
94 'dtend', session.ends_at.astimezone(pytz.timezone(event.timezone))
95 )
96 session_component.add('description', session_description)
97 session_component.add('url', event.site_link + '/session/' + str(session.id))
98
99 cal.add_component(session_component)
100
101 return cal.to_ical()
102
[end of app/api/helpers/calendar/ical.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/helpers/calendar/ical.py b/app/api/helpers/calendar/ical.py
--- a/app/api/helpers/calendar/ical.py
+++ b/app/api/helpers/calendar/ical.py
@@ -1,3 +1,5 @@
+from urllib.parse import quote
+
import pytz
from flask import jsonify
from flask_jwt_extended import current_user
@@ -57,7 +59,7 @@
" "
+ event.site_link
+ '/video/'
- + session.microlocation.video_stream.name
+ + quote(session.microlocation.video_stream.name)
+ "/"
+ str(session.microlocation.video_stream.id)
if session.microlocation.video_stream
| {"golden_diff": "diff --git a/app/api/helpers/calendar/ical.py b/app/api/helpers/calendar/ical.py\n--- a/app/api/helpers/calendar/ical.py\n+++ b/app/api/helpers/calendar/ical.py\n@@ -1,3 +1,5 @@\n+from urllib.parse import quote\n+\n import pytz\n from flask import jsonify\n from flask_jwt_extended import current_user\n@@ -57,7 +59,7 @@\n \" \"\n + event.site_link\n + '/video/'\n- + session.microlocation.video_stream.name\n+ + quote(session.microlocation.video_stream.name)\n + \"/\"\n + str(session.microlocation.video_stream.id)\n if session.microlocation.video_stream\n", "issue": "Calendar link in calendar not working properly\nIn the description, \"Join using link: https://eventyay.com/e/fa96ae2c/video/Main Hall/187\"\r\n\r\nThe space is treated as a break, clipping the link at Main\"\r\n\r\n\"Main Hall\" should be HTTP encoded to Main%20Hall\n", "before_files": [{"content": "import pytz\nfrom flask import jsonify\nfrom flask_jwt_extended import current_user\nfrom icalendar import Calendar, Event\nfrom sqlalchemy import or_\nfrom sqlalchemy.orm import joinedload\n\nfrom app.models.session import Session\n\n\ndef to_ical(event, include_sessions=False, my_schedule=False, user_id=None):\n cal = Calendar()\n cal.add('version', '2.0')\n cal.add('METHOD', 'PUBLISH')\n cal.add('X-WR-CALNAME', event.name)\n cal.add('X-WR-CALDESC', 'Event Calendar')\n\n event_component = Event()\n event_component.add('uid', event.identifier)\n event_component.add('summary', event.name)\n event_component.add('url', event.site_link)\n event_component.add('dtstart', event.starts_at_tz)\n event_component.add('dtend', event.ends_at_tz)\n event_component.add('location', event.normalized_location)\n event_component.add('description', event.description)\n if event.has_coordinates:\n event_component.add('geo', (event.latitude, event.longitude))\n if event.owner_description:\n event_component.add('organizer', event.owner_description)\n\n cal.add_component(event_component)\n\n if include_sessions:\n sessions_query = (\n Session.query.filter_by(event_id=event.id)\n .options(joinedload(Session.microlocation))\n .filter_by(deleted_at=None)\n .filter(or_(Session.state == 'accepted', Session.state == 'confirmed'))\n .order_by(Session.starts_at.asc())\n )\n if my_schedule:\n if not (current_user or user_id):\n return jsonify(error='Login or User ID required'), 401\n\n user_id = user_id or current_user.id\n sessions_query = sessions_query.join(Session.favourites).filter_by(\n user_id=user_id\n )\n sessions = sessions_query.all()\n\n for session in sessions:\n\n if not (session and session.starts_at and session.ends_at):\n continue\n\n session_video_url = (\n \" \"\n + event.site_link\n + '/video/'\n + session.microlocation.video_stream.name\n + \"/\"\n + str(session.microlocation.video_stream.id)\n if session.microlocation.video_stream\n else \"\"\n )\n session_link_heading = (\n \"Join using link: \" + session_video_url + \"<br/>\"\n if session_video_url\n else \"\"\n )\n session_description = (\n \" \"\n + \"Room: \"\n + session.microlocation.name\n + \"<br/>\"\n + session_link_heading\n + \"<br/>\"\n + session.short_abstract\n )\n session_component = Event()\n session_component.add('summary', session.title)\n session_component.add('uid', str(session.id) + \"-\" + event.identifier)\n session_component.add('geo', (event.latitude, event.longitude))\n session_component.add(\n 'location',\n session_video_url\n or (session.microlocation and session.microlocation.name)\n or '' + \" \" + event.location_name,\n )\n session_component.add(\n 'dtstart', session.starts_at.astimezone(pytz.timezone(event.timezone))\n )\n session_component.add(\n 'dtend', session.ends_at.astimezone(pytz.timezone(event.timezone))\n )\n session_component.add('description', session_description)\n session_component.add('url', event.site_link + '/session/' + str(session.id))\n\n cal.add_component(session_component)\n\n return cal.to_ical()\n", "path": "app/api/helpers/calendar/ical.py"}]} | 1,546 | 144 |
gh_patches_debug_33369 | rasdani/github-patches | git_diff | OCA__stock-logistics-warehouse-1192 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[12.0] stock_secondary_unit "secondary qty" value in picking not affecting "initial demand"
AFFECTED VERSIONS
12.0 (it works ok on 13.0)
STEPS TO REPRODUCE
Activate Units of Measure in general settings > inventory
Inventory > Master Data > Products > Set a secondary unit in a product

Create a new transfer > add product > select secondary UoM > input secondary qty

CURRENT BEHAVIOR
Value "Initial demand" is not affected by secondary qty input
In the same way, if "Initial demand" is set, "secondary qty" does not change - basically there is no relation between the two fields

REQUIRED BEHAVIOR
When "secondary qty" is updated, also "initial demand" should update - and viceversa, as it happens in SO with secondary unit modules
VIDEO
https://recordit.co/zcuDUx6xco
</issue>
<code>
[start of stock_secondary_unit/models/stock_move.py]
1 # Copyright 2018 Tecnativa - Sergio Teruel
2 # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
3 from odoo import api, fields, models
4 from odoo.addons import decimal_precision as dp
5 from odoo.tools.float_utils import float_round
6
7
8 class StockSecondaryUnitMixin(models.AbstractModel):
9 _name = 'stock.secondary.unit.mixin'
10 _description = 'Stock Secondary Unit Mixin'
11
12 secondary_uom_id = fields.Many2one(
13 comodel_name='product.secondary.unit',
14 string='Second unit',
15 )
16 secondary_uom_qty = fields.Float(
17 string='Secondary Qty',
18 digits=dp.get_precision('Product Unit of Measure'),
19 )
20
21
22 class StockMove(models.Model):
23 _inherit = ['stock.move', 'stock.secondary.unit.mixin']
24 _name = 'stock.move'
25
26 def _merge_moves_fields(self):
27 res = super(StockMove, self)._merge_moves_fields()
28 res['secondary_uom_qty'] = self[-1:].secondary_uom_qty
29 return res
30
31
32 class StockMoveLine(models.Model):
33 _inherit = ['stock.move.line', 'stock.secondary.unit.mixin']
34 _name = 'stock.move.line'
35
36 @api.model
37 def create(self, vals):
38 move = self.env['stock.move'].browse(vals.get('move_id', False))
39 if move.secondary_uom_id:
40 uom = self.env['uom.uom'].browse(vals['product_uom_id'])
41 factor = move.secondary_uom_id.factor * uom.factor
42 move_line_qty = vals.get(
43 'product_uom_qty', vals.get('qty_done', 0.0))
44 qty = float_round(
45 move_line_qty / (factor or 1.0),
46 precision_rounding=move.secondary_uom_id.uom_id.rounding
47 )
48 vals.update({
49 'secondary_uom_qty': qty,
50 'secondary_uom_id': move.secondary_uom_id.id,
51 })
52 return super().create(vals)
53
[end of stock_secondary_unit/models/stock_move.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/stock_secondary_unit/models/stock_move.py b/stock_secondary_unit/models/stock_move.py
--- a/stock_secondary_unit/models/stock_move.py
+++ b/stock_secondary_unit/models/stock_move.py
@@ -2,7 +2,7 @@
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
from odoo.addons import decimal_precision as dp
-from odoo.tools.float_utils import float_round
+from odoo.tools.float_utils import float_compare, float_round
class StockSecondaryUnitMixin(models.AbstractModel):
@@ -28,6 +28,52 @@
res['secondary_uom_qty'] = self[-1:].secondary_uom_qty
return res
+ @api.onchange('secondary_uom_id', 'secondary_uom_qty')
+ def onchange_secondary_uom(self):
+ if not self.secondary_uom_id:
+ return
+ factor = self.secondary_uom_id.factor * self.product_uom.factor
+
+ qty = float_round(
+ self.secondary_uom_qty * factor,
+ precision_rounding=self.product_uom.rounding
+ )
+ if float_compare(
+ self.product_uom_qty, qty, precision_rounding=self.product_uom.rounding
+ ) != 0:
+ self.product_uom_qty = qty
+
+ @api.onchange('product_uom_qty')
+ def onchange_secondary_unit_product_uom_qty(self):
+ if not self.secondary_uom_id:
+ return
+ factor = self.secondary_uom_id.factor * self.product_uom.factor
+
+ qty = float_round(
+ self.product_uom_qty / (factor or 1.0),
+ precision_rounding=self.secondary_uom_id.uom_id.rounding
+ )
+ if float_compare(
+ self.secondary_uom_qty,
+ qty,
+ precision_rounding=self.secondary_uom_id.uom_id.rounding
+ ) != 0:
+ self.secondary_uom_qty = qty
+
+ @api.onchange('product_uom')
+ def onchange_product_uom_for_secondary(self):
+ if not self.secondary_uom_id:
+ return
+ factor = self.product_uom.factor * self.secondary_uom_id.factor
+ qty = float_round(
+ self.product_uom_qty / (factor or 1.0),
+ precision_rounding=self.product_uom.rounding
+ )
+ if float_compare(
+ self.secondary_uom_qty, qty, precision_rounding=self.product_uom.rounding
+ ) != 0:
+ self.secondary_uom_qty = qty
+
class StockMoveLine(models.Model):
_inherit = ['stock.move.line', 'stock.secondary.unit.mixin']
| {"golden_diff": "diff --git a/stock_secondary_unit/models/stock_move.py b/stock_secondary_unit/models/stock_move.py\n--- a/stock_secondary_unit/models/stock_move.py\n+++ b/stock_secondary_unit/models/stock_move.py\n@@ -2,7 +2,7 @@\n # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).\n from odoo import api, fields, models\n from odoo.addons import decimal_precision as dp\n-from odoo.tools.float_utils import float_round\n+from odoo.tools.float_utils import float_compare, float_round\n \n \n class StockSecondaryUnitMixin(models.AbstractModel):\n@@ -28,6 +28,52 @@\n res['secondary_uom_qty'] = self[-1:].secondary_uom_qty\n return res\n \n+ @api.onchange('secondary_uom_id', 'secondary_uom_qty')\n+ def onchange_secondary_uom(self):\n+ if not self.secondary_uom_id:\n+ return\n+ factor = self.secondary_uom_id.factor * self.product_uom.factor\n+\n+ qty = float_round(\n+ self.secondary_uom_qty * factor,\n+ precision_rounding=self.product_uom.rounding\n+ )\n+ if float_compare(\n+ self.product_uom_qty, qty, precision_rounding=self.product_uom.rounding\n+ ) != 0:\n+ self.product_uom_qty = qty\n+\n+ @api.onchange('product_uom_qty')\n+ def onchange_secondary_unit_product_uom_qty(self):\n+ if not self.secondary_uom_id:\n+ return\n+ factor = self.secondary_uom_id.factor * self.product_uom.factor\n+\n+ qty = float_round(\n+ self.product_uom_qty / (factor or 1.0),\n+ precision_rounding=self.secondary_uom_id.uom_id.rounding\n+ )\n+ if float_compare(\n+ self.secondary_uom_qty,\n+ qty,\n+ precision_rounding=self.secondary_uom_id.uom_id.rounding\n+ ) != 0:\n+ self.secondary_uom_qty = qty\n+\n+ @api.onchange('product_uom')\n+ def onchange_product_uom_for_secondary(self):\n+ if not self.secondary_uom_id:\n+ return\n+ factor = self.product_uom.factor * self.secondary_uom_id.factor\n+ qty = float_round(\n+ self.product_uom_qty / (factor or 1.0),\n+ precision_rounding=self.product_uom.rounding\n+ )\n+ if float_compare(\n+ self.secondary_uom_qty, qty, precision_rounding=self.product_uom.rounding\n+ ) != 0:\n+ self.secondary_uom_qty = qty\n+\n \n class StockMoveLine(models.Model):\n _inherit = ['stock.move.line', 'stock.secondary.unit.mixin']\n", "issue": "[12.0] stock_secondary_unit \"secondary qty\" value in picking not affecting \"initial demand\"\nAFFECTED VERSIONS\r\n\r\n12.0 (it works ok on 13.0)\r\n\r\nSTEPS TO REPRODUCE\r\n\r\nActivate Units of Measure in general settings > inventory\r\n\r\nInventory > Master Data > Products > Set a secondary unit in a product\r\n\r\n\r\n\r\nCreate a new transfer > add product > select secondary UoM > input secondary qty\r\n\r\n\r\n\r\nCURRENT BEHAVIOR\r\n\r\nValue \"Initial demand\" is not affected by secondary qty input\r\nIn the same way, if \"Initial demand\" is set, \"secondary qty\" does not change - basically there is no relation between the two fields\r\n\r\n\r\n\r\nREQUIRED BEHAVIOR \r\n\r\nWhen \"secondary qty\" is updated, also \"initial demand\" should update - and viceversa, as it happens in SO with secondary unit modules\r\n\r\nVIDEO\r\n\r\nhttps://recordit.co/zcuDUx6xco\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 Tecnativa - Sergio Teruel\n# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).\nfrom odoo import api, fields, models\nfrom odoo.addons import decimal_precision as dp\nfrom odoo.tools.float_utils import float_round\n\n\nclass StockSecondaryUnitMixin(models.AbstractModel):\n _name = 'stock.secondary.unit.mixin'\n _description = 'Stock Secondary Unit Mixin'\n\n secondary_uom_id = fields.Many2one(\n comodel_name='product.secondary.unit',\n string='Second unit',\n )\n secondary_uom_qty = fields.Float(\n string='Secondary Qty',\n digits=dp.get_precision('Product Unit of Measure'),\n )\n\n\nclass StockMove(models.Model):\n _inherit = ['stock.move', 'stock.secondary.unit.mixin']\n _name = 'stock.move'\n\n def _merge_moves_fields(self):\n res = super(StockMove, self)._merge_moves_fields()\n res['secondary_uom_qty'] = self[-1:].secondary_uom_qty\n return res\n\n\nclass StockMoveLine(models.Model):\n _inherit = ['stock.move.line', 'stock.secondary.unit.mixin']\n _name = 'stock.move.line'\n\n @api.model\n def create(self, vals):\n move = self.env['stock.move'].browse(vals.get('move_id', False))\n if move.secondary_uom_id:\n uom = self.env['uom.uom'].browse(vals['product_uom_id'])\n factor = move.secondary_uom_id.factor * uom.factor\n move_line_qty = vals.get(\n 'product_uom_qty', vals.get('qty_done', 0.0))\n qty = float_round(\n move_line_qty / (factor or 1.0),\n precision_rounding=move.secondary_uom_id.uom_id.rounding\n )\n vals.update({\n 'secondary_uom_qty': qty,\n 'secondary_uom_id': move.secondary_uom_id.id,\n })\n return super().create(vals)\n", "path": "stock_secondary_unit/models/stock_move.py"}]} | 1,457 | 612 |
gh_patches_debug_11603 | rasdani/github-patches | git_diff | opsdroid__opsdroid-1648 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Migrate tests from unittest to pytest
Our existing test suite has been written with the Python [`unittest`](https://docs.python.org/3/library/unittest.html) framework. However, as the test suite has grown and opsdroid has become more complex we are running into issues with the tests. Mainly around setting up and tearing down tests.
The @opsdroid/maintainers team have decided that we want to migrate all tests to be written with the [`pytest`](https://docs.pytest.org/en/latest/contents.html) framework instead so that we can make better use of fixtures. Fixtures are more reusable and portable and should help reduce complexity all over.
There's a lot to be done but it can be done piece by piece as `pytest` can run tests in either format. So if you wish to help in the effort you can start by searching the codebase for unittest suites. These are classes which are subclassed from `unittest.TestCase` or `asynctest.TestCase`, so searching all files for `unittest.TestCase` and `asynctest.TestCase` should be a good place to start.
For detailed information on running the test suite and contributing to opsdroid [see the docs](https://docs.opsdroid.dev/en/latest/contributing/index.html). But the quickest way to get started us with [`tox`](https://tox.readthedocs.io/en/latest/).
```bash
pip install -U tox # You only need to install tox once
tox -e py36,lint # Run the Python 3.6 tests (the lowest version we support) and the linter
```
Once you have found a test suite you wish to convert there are a few steps you need to follow to convert from unittest to pytest:
- Move tests from top level `tests` directory to a nested `tests` directory in opsdroid. Create one in an appropriate place if there isn't already one.
- Remove test from class, pytest tests are just regular functions.
- Change assertions to use regular `assert` or [pytest assertions](https://docs.pytest.org/en/latest/assert.html).
- Mark async tests. In unittest we write async tests by using the `asynctest.TestCase`, but in pytest we decorate our tests with `@pytest.mark.asyncio` instead.
- Move setup operations to fixtures. If a test class contains a `setUp` method anything created here should become a fixture. Check the existing fixtures in `conftest.py` before creating new ones.
- Add docstrings to tests and fixtures to explain what they do. We have been pretty rubbish with this up until now and there are many tests which are not obvious in what they are testing.
Here's an example:
```python
# Before (unittest)
import asynctest
import asynctest.mock as mock
from opsdroid.cli.start import configure_lang
from opsdroid.core import OpsDroid
from opsdroid.events import Message
from opsdroid.matchers import match_regex
from opsdroid import constraints
class TestConstraints(asynctest.TestCase):
"""Test the opsdroid constraint decorators."""
async def setUp(self):
configure_lang({})
async def getMockSkill(self):
async def mockedskill(opsdroid, config, message):
pass
mockedskill.config = {}
return mockedskill
async def test_constrain_rooms_constrains(self):
with OpsDroid() as opsdroid:
skill = await self.getMockSkill()
skill = match_regex(r".*")(skill)
skill = constraints.constrain_rooms(["#general"])(skill)
opsdroid.skills.append(skill)
tasks = await opsdroid.parse(
Message(text="Hello", user="user", target="#random", connector=None)
)
self.assertEqual(len(tasks), 2) # Just match_always and match_event
```
```python
# After (pytest)
import pytest
from opsdroid.cli.start import configure_lang
from opsdroid.core import OpsDroid
from opsdroid.events import Message
from opsdroid.matchers import match_regex
from opsdroid import constraints
configure_lang({}) # Required for our internationalization of error messages
@pytest.fixture
def opsdroid():
"""An instance of the OpsDroid class."""
with OpsDroid() as opsdroid:
yield opsdroid
@pytest.fixture
def mock_skill():
"""A skill which does nothing but follows the skill API."""
async def mockedskill(opsdroid, config, message):
pass
mockedskill.config = {}
return mockedskill
@pytest.mark.asyncio
async def test_constrain_rooms_constrains(opsdroid, mock_skill):
"""Test that with the room constraint a skill is not called."""
skill = match_regex(r".*")(mock_skill)
skill = constraints.constrain_rooms(["#general"])(skill)
opsdroid.skills.append(skill)
tasks = await opsdroid.parse(
Message(text="Hello", user="user", target="#random", connector=None)
)
assert len(tasks) == 2 # Just match_always and match_event
```
Fixtures such as the `opsdroid` fixture will be extremely useful in many tests, so we will be creating some standard ones like this which will be available in all tests. If you write a fixture that you think could be useful in other places please don't hesitate to contribute it to the `conftest.py` file.
If you have any questions you can chat to us on [Matrix](https://riot.im/app/#/room/#opsdroid-general:matrix.org). We encourage you to get started and if you have issues or get stuck then open a [draft PR](https://github.blog/2019-02-14-introducing-draft-pull-requests/) with your changes and we can take a look.
---
When this issue is complete the documentation about the migration should be removed from the testing docs.
</issue>
<code>
[start of opsdroid/conftest.py]
1 """Pytest config for all opsdroid tests."""
2 from opsdroid.testing import opsdroid
3
4 from opsdroid.cli.start import configure_lang
5
6 __all__ = ["opsdroid"]
7
8 configure_lang({})
9
[end of opsdroid/conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/conftest.py b/opsdroid/conftest.py
--- a/opsdroid/conftest.py
+++ b/opsdroid/conftest.py
@@ -1,8 +1,29 @@
"""Pytest config for all opsdroid tests."""
+import pytest
+
+import asyncio
+
from opsdroid.testing import opsdroid
+from opsdroid.connector import Connector
from opsdroid.cli.start import configure_lang
__all__ = ["opsdroid"]
configure_lang({})
+
+
[email protected](scope="session")
+def get_connector():
+ def _get_connector(config={}):
+ return Connector(config, opsdroid=opsdroid)
+
+ return _get_connector
+
+
[email protected]_fixture
+def event_loop():
+ """Create an instance of the default event loop for each test case."""
+ loop = asyncio.get_event_loop_policy().new_event_loop()
+ yield loop
+ loop.close()
| {"golden_diff": "diff --git a/opsdroid/conftest.py b/opsdroid/conftest.py\n--- a/opsdroid/conftest.py\n+++ b/opsdroid/conftest.py\n@@ -1,8 +1,29 @@\n \"\"\"Pytest config for all opsdroid tests.\"\"\"\n+import pytest\n+\n+import asyncio\n+\n from opsdroid.testing import opsdroid\n+from opsdroid.connector import Connector\n \n from opsdroid.cli.start import configure_lang\n \n __all__ = [\"opsdroid\"]\n \n configure_lang({})\n+\n+\[email protected](scope=\"session\")\n+def get_connector():\n+ def _get_connector(config={}):\n+ return Connector(config, opsdroid=opsdroid)\n+\n+ return _get_connector\n+\n+\[email protected]_fixture\n+def event_loop():\n+ \"\"\"Create an instance of the default event loop for each test case.\"\"\"\n+ loop = asyncio.get_event_loop_policy().new_event_loop()\n+ yield loop\n+ loop.close()\n", "issue": "Migrate tests from unittest to pytest\nOur existing test suite has been written with the Python [`unittest`](https://docs.python.org/3/library/unittest.html) framework. However, as the test suite has grown and opsdroid has become more complex we are running into issues with the tests. Mainly around setting up and tearing down tests.\r\n\r\nThe @opsdroid/maintainers team have decided that we want to migrate all tests to be written with the [`pytest`](https://docs.pytest.org/en/latest/contents.html) framework instead so that we can make better use of fixtures. Fixtures are more reusable and portable and should help reduce complexity all over.\r\n\r\nThere's a lot to be done but it can be done piece by piece as `pytest` can run tests in either format. So if you wish to help in the effort you can start by searching the codebase for unittest suites. These are classes which are subclassed from `unittest.TestCase` or `asynctest.TestCase`, so searching all files for `unittest.TestCase` and `asynctest.TestCase` should be a good place to start.\r\n\r\nFor detailed information on running the test suite and contributing to opsdroid [see the docs](https://docs.opsdroid.dev/en/latest/contributing/index.html). But the quickest way to get started us with [`tox`](https://tox.readthedocs.io/en/latest/).\r\n\r\n```bash\r\npip install -U tox # You only need to install tox once\r\n\r\ntox -e py36,lint # Run the Python 3.6 tests (the lowest version we support) and the linter\r\n```\r\n\r\nOnce you have found a test suite you wish to convert there are a few steps you need to follow to convert from unittest to pytest:\r\n- Move tests from top level `tests` directory to a nested `tests` directory in opsdroid. Create one in an appropriate place if there isn't already one.\r\n- Remove test from class, pytest tests are just regular functions.\r\n- Change assertions to use regular `assert` or [pytest assertions](https://docs.pytest.org/en/latest/assert.html).\r\n- Mark async tests. In unittest we write async tests by using the `asynctest.TestCase`, but in pytest we decorate our tests with `@pytest.mark.asyncio` instead.\r\n- Move setup operations to fixtures. If a test class contains a `setUp` method anything created here should become a fixture. Check the existing fixtures in `conftest.py` before creating new ones.\r\n- Add docstrings to tests and fixtures to explain what they do. We have been pretty rubbish with this up until now and there are many tests which are not obvious in what they are testing.\r\n\r\nHere's an example:\r\n\r\n```python\r\n# Before (unittest)\r\nimport asynctest\r\nimport asynctest.mock as mock\r\n\r\nfrom opsdroid.cli.start import configure_lang\r\nfrom opsdroid.core import OpsDroid\r\nfrom opsdroid.events import Message\r\nfrom opsdroid.matchers import match_regex\r\nfrom opsdroid import constraints\r\n\r\n\r\nclass TestConstraints(asynctest.TestCase):\r\n \"\"\"Test the opsdroid constraint decorators.\"\"\"\r\n\r\n async def setUp(self):\r\n configure_lang({})\r\n\r\n async def getMockSkill(self):\r\n async def mockedskill(opsdroid, config, message):\r\n pass\r\n\r\n mockedskill.config = {}\r\n return mockedskill\r\n\r\n async def test_constrain_rooms_constrains(self):\r\n with OpsDroid() as opsdroid:\r\n skill = await self.getMockSkill()\r\n skill = match_regex(r\".*\")(skill)\r\n skill = constraints.constrain_rooms([\"#general\"])(skill)\r\n opsdroid.skills.append(skill)\r\n\r\n tasks = await opsdroid.parse(\r\n Message(text=\"Hello\", user=\"user\", target=\"#random\", connector=None)\r\n )\r\n self.assertEqual(len(tasks), 2) # Just match_always and match_event\r\n```\r\n\r\n```python\r\n# After (pytest)\r\nimport pytest\r\n\r\nfrom opsdroid.cli.start import configure_lang\r\nfrom opsdroid.core import OpsDroid\r\nfrom opsdroid.events import Message\r\nfrom opsdroid.matchers import match_regex\r\nfrom opsdroid import constraints\r\n\r\n\r\nconfigure_lang({}) # Required for our internationalization of error messages\r\n\r\n\r\[email protected]\r\ndef opsdroid():\r\n \"\"\"An instance of the OpsDroid class.\"\"\"\r\n with OpsDroid() as opsdroid:\r\n yield opsdroid\r\n\r\n\r\[email protected]\r\ndef mock_skill():\r\n \"\"\"A skill which does nothing but follows the skill API.\"\"\"\r\n\r\n async def mockedskill(opsdroid, config, message):\r\n pass\r\n\r\n mockedskill.config = {}\r\n return mockedskill\r\n\r\n\r\[email protected]\r\nasync def test_constrain_rooms_constrains(opsdroid, mock_skill):\r\n \"\"\"Test that with the room constraint a skill is not called.\"\"\"\r\n skill = match_regex(r\".*\")(mock_skill)\r\n skill = constraints.constrain_rooms([\"#general\"])(skill)\r\n opsdroid.skills.append(skill)\r\n\r\n tasks = await opsdroid.parse(\r\n Message(text=\"Hello\", user=\"user\", target=\"#random\", connector=None)\r\n )\r\n assert len(tasks) == 2 # Just match_always and match_event\r\n```\r\n\r\nFixtures such as the `opsdroid` fixture will be extremely useful in many tests, so we will be creating some standard ones like this which will be available in all tests. If you write a fixture that you think could be useful in other places please don't hesitate to contribute it to the `conftest.py` file.\r\n\r\nIf you have any questions you can chat to us on [Matrix](https://riot.im/app/#/room/#opsdroid-general:matrix.org). We encourage you to get started and if you have issues or get stuck then open a [draft PR](https://github.blog/2019-02-14-introducing-draft-pull-requests/) with your changes and we can take a look.\r\n\r\n---\r\n\r\nWhen this issue is complete the documentation about the migration should be removed from the testing docs.\n", "before_files": [{"content": "\"\"\"Pytest config for all opsdroid tests.\"\"\"\nfrom opsdroid.testing import opsdroid\n\nfrom opsdroid.cli.start import configure_lang\n\n__all__ = [\"opsdroid\"]\n\nconfigure_lang({})\n", "path": "opsdroid/conftest.py"}]} | 1,849 | 219 |
gh_patches_debug_38667 | rasdani/github-patches | git_diff | encode__uvicorn-22 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Signal handlers
In the worker class
</issue>
<code>
[start of uvicorn/worker.py]
1 import asyncio
2 import functools
3
4 import uvloop
5
6 from gunicorn.workers.base import Worker
7 from uvicorn.protocols import http
8
9
10 class UvicornWorker(Worker):
11 """
12 A worker class for Gunicorn that interfaces with an ASGI consumer callable,
13 rather than a WSGI callable.
14
15 We use a couple of packages from MagicStack in order to achieve an
16 extremely high-throughput and low-latency implementation:
17
18 * `uvloop` as the event loop policy.
19 * `httptools` as the HTTP request parser.
20 """
21
22 def init_process(self):
23 # Close any existing event loop before setting a
24 # new policy.
25 asyncio.get_event_loop().close()
26
27 # Setup uvloop policy, so that every
28 # asyncio.get_event_loop() will create an instance
29 # of uvloop event loop.
30 asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
31
32 super().init_process()
33
34 def run(self):
35 loop = asyncio.get_event_loop()
36 loop.create_task(self.create_servers(loop))
37 loop.create_task(tick(loop, self.notify))
38 loop.run_forever()
39
40 async def create_servers(self, loop):
41 cfg = self.cfg
42 consumer = self.wsgi
43
44 for sock in self.sockets:
45 protocol = functools.partial(
46 http.HttpProtocol,
47 consumer=consumer, loop=loop, sock=sock, cfg=cfg
48 )
49 await loop.create_server(protocol, sock=sock)
50
51
52 async def tick(loop, notify):
53 cycle = 0
54 while True:
55 http.set_time_and_date()
56 cycle = (cycle + 1) % 10
57 if cycle == 0:
58 notify()
59 await asyncio.sleep(1)
60
[end of uvicorn/worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/uvicorn/worker.py b/uvicorn/worker.py
--- a/uvicorn/worker.py
+++ b/uvicorn/worker.py
@@ -1,5 +1,7 @@
import asyncio
import functools
+import signal
+import sys
import uvloop
@@ -19,6 +21,11 @@
* `httptools` as the HTTP request parser.
"""
+ def __init__(self, *args, **kwargs): # pragma: no cover
+ super().__init__(*args, **kwargs)
+ self.servers = []
+ self.exit_code = 0
+
def init_process(self):
# Close any existing event loop before setting a
# new policy.
@@ -34,8 +41,45 @@
def run(self):
loop = asyncio.get_event_loop()
loop.create_task(self.create_servers(loop))
- loop.create_task(tick(loop, self.notify))
+ loop.create_task(self.tick(loop))
loop.run_forever()
+ sys.exit(self.exit_code)
+
+ def init_signals(self):
+ # Set up signals through the event loop API.
+ loop = asyncio.get_event_loop()
+
+ loop.add_signal_handler(signal.SIGQUIT, self.handle_quit,
+ signal.SIGQUIT, None)
+
+ loop.add_signal_handler(signal.SIGTERM, self.handle_exit,
+ signal.SIGTERM, None)
+
+ loop.add_signal_handler(signal.SIGINT, self.handle_quit,
+ signal.SIGINT, None)
+
+ loop.add_signal_handler(signal.SIGWINCH, self.handle_winch,
+ signal.SIGWINCH, None)
+
+ loop.add_signal_handler(signal.SIGUSR1, self.handle_usr1,
+ signal.SIGUSR1, None)
+
+ loop.add_signal_handler(signal.SIGABRT, self.handle_abort,
+ signal.SIGABRT, None)
+
+ # Don't let SIGTERM and SIGUSR1 disturb active requests
+ # by interrupting system calls
+ signal.siginterrupt(signal.SIGTERM, False)
+ signal.siginterrupt(signal.SIGUSR1, False)
+
+ def handle_quit(self, sig, frame):
+ self.alive = False
+ self.cfg.worker_int(self)
+
+ def handle_abort(self, sig, frame):
+ self.alive = False
+ self.exit_code = 1
+ self.cfg.worker_abort(self)
async def create_servers(self, loop):
cfg = self.cfg
@@ -46,14 +90,19 @@
http.HttpProtocol,
consumer=consumer, loop=loop, sock=sock, cfg=cfg
)
- await loop.create_server(protocol, sock=sock)
+ server = await loop.create_server(protocol, sock=sock)
+ self.servers.append(server)
+ async def tick(self, loop):
+ cycle = 0
+ while self.alive:
+ http.set_time_and_date()
+ cycle = (cycle + 1) % 10
+ if cycle == 0:
+ self.notify()
+ await asyncio.sleep(1)
-async def tick(loop, notify):
- cycle = 0
- while True:
- http.set_time_and_date()
- cycle = (cycle + 1) % 10
- if cycle == 0:
- notify()
- await asyncio.sleep(1)
+ for server in self.servers:
+ server.close()
+ await server.wait_closed()
+ loop.stop()
| {"golden_diff": "diff --git a/uvicorn/worker.py b/uvicorn/worker.py\n--- a/uvicorn/worker.py\n+++ b/uvicorn/worker.py\n@@ -1,5 +1,7 @@\n import asyncio\n import functools\n+import signal\n+import sys\n \n import uvloop\n \n@@ -19,6 +21,11 @@\n * `httptools` as the HTTP request parser.\n \"\"\"\n \n+ def __init__(self, *args, **kwargs): # pragma: no cover\n+ super().__init__(*args, **kwargs)\n+ self.servers = []\n+ self.exit_code = 0\n+\n def init_process(self):\n # Close any existing event loop before setting a\n # new policy.\n@@ -34,8 +41,45 @@\n def run(self):\n loop = asyncio.get_event_loop()\n loop.create_task(self.create_servers(loop))\n- loop.create_task(tick(loop, self.notify))\n+ loop.create_task(self.tick(loop))\n loop.run_forever()\n+ sys.exit(self.exit_code)\n+\n+ def init_signals(self):\n+ # Set up signals through the event loop API.\n+ loop = asyncio.get_event_loop()\n+\n+ loop.add_signal_handler(signal.SIGQUIT, self.handle_quit,\n+ signal.SIGQUIT, None)\n+\n+ loop.add_signal_handler(signal.SIGTERM, self.handle_exit,\n+ signal.SIGTERM, None)\n+\n+ loop.add_signal_handler(signal.SIGINT, self.handle_quit,\n+ signal.SIGINT, None)\n+\n+ loop.add_signal_handler(signal.SIGWINCH, self.handle_winch,\n+ signal.SIGWINCH, None)\n+\n+ loop.add_signal_handler(signal.SIGUSR1, self.handle_usr1,\n+ signal.SIGUSR1, None)\n+\n+ loop.add_signal_handler(signal.SIGABRT, self.handle_abort,\n+ signal.SIGABRT, None)\n+\n+ # Don't let SIGTERM and SIGUSR1 disturb active requests\n+ # by interrupting system calls\n+ signal.siginterrupt(signal.SIGTERM, False)\n+ signal.siginterrupt(signal.SIGUSR1, False)\n+\n+ def handle_quit(self, sig, frame):\n+ self.alive = False\n+ self.cfg.worker_int(self)\n+\n+ def handle_abort(self, sig, frame):\n+ self.alive = False\n+ self.exit_code = 1\n+ self.cfg.worker_abort(self)\n \n async def create_servers(self, loop):\n cfg = self.cfg\n@@ -46,14 +90,19 @@\n http.HttpProtocol,\n consumer=consumer, loop=loop, sock=sock, cfg=cfg\n )\n- await loop.create_server(protocol, sock=sock)\n+ server = await loop.create_server(protocol, sock=sock)\n+ self.servers.append(server)\n \n+ async def tick(self, loop):\n+ cycle = 0\n+ while self.alive:\n+ http.set_time_and_date()\n+ cycle = (cycle + 1) % 10\n+ if cycle == 0:\n+ self.notify()\n+ await asyncio.sleep(1)\n \n-async def tick(loop, notify):\n- cycle = 0\n- while True:\n- http.set_time_and_date()\n- cycle = (cycle + 1) % 10\n- if cycle == 0:\n- notify()\n- await asyncio.sleep(1)\n+ for server in self.servers:\n+ server.close()\n+ await server.wait_closed()\n+ loop.stop()\n", "issue": "Signal handlers\nIn the worker class\n", "before_files": [{"content": "import asyncio\nimport functools\n\nimport uvloop\n\nfrom gunicorn.workers.base import Worker\nfrom uvicorn.protocols import http\n\n\nclass UvicornWorker(Worker):\n \"\"\"\n A worker class for Gunicorn that interfaces with an ASGI consumer callable,\n rather than a WSGI callable.\n\n We use a couple of packages from MagicStack in order to achieve an\n extremely high-throughput and low-latency implementation:\n\n * `uvloop` as the event loop policy.\n * `httptools` as the HTTP request parser.\n \"\"\"\n\n def init_process(self):\n # Close any existing event loop before setting a\n # new policy.\n asyncio.get_event_loop().close()\n\n # Setup uvloop policy, so that every\n # asyncio.get_event_loop() will create an instance\n # of uvloop event loop.\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n\n super().init_process()\n\n def run(self):\n loop = asyncio.get_event_loop()\n loop.create_task(self.create_servers(loop))\n loop.create_task(tick(loop, self.notify))\n loop.run_forever()\n\n async def create_servers(self, loop):\n cfg = self.cfg\n consumer = self.wsgi\n\n for sock in self.sockets:\n protocol = functools.partial(\n http.HttpProtocol,\n consumer=consumer, loop=loop, sock=sock, cfg=cfg\n )\n await loop.create_server(protocol, sock=sock)\n\n\nasync def tick(loop, notify):\n cycle = 0\n while True:\n http.set_time_and_date()\n cycle = (cycle + 1) % 10\n if cycle == 0:\n notify()\n await asyncio.sleep(1)\n", "path": "uvicorn/worker.py"}]} | 1,030 | 767 |
gh_patches_debug_2047 | rasdani/github-patches | git_diff | conan-io__conan-center-index-7286 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] aws-c-event-stream/0.2.7: conflicting openssl versions
```
ERROR: Conflict in s2n/1.0.11:
's2n/1.0.11' requires 'openssl/1.1.1k' while 'aws-c-cal/0.5.11' requires 'openssl/1.1.1l'.
To fix this conflict you need to override the package 'openssl' in your root package.
```
seems like it was introduced by #7260
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **aws-c-event-stream/0.2.7**
* Conan version: **conan 1.39.0**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
[settings]
arch=x86_64
arch_build=x86_64
build_type=Release
compiler=gcc
compiler.libcxx=libstdc++
compiler.version=7
os=Linux
os_build=Linux
[options]
[build_requires]
[env]
```
### Steps to reproduce (Include if Applicable)
conan install --build missing aws-c-event-stream/0.2.7@
</issue>
<code>
[start of recipes/s2n/all/conanfile.py]
1 from conans import ConanFile, CMake, tools
2 from conans.errors import ConanInvalidConfiguration
3 import os
4
5 required_conan_version = ">=1.33.0"
6
7 class S2n(ConanFile):
8 name = "s2n"
9 description = "An implementation of the TLS/SSL protocols"
10 topics = ("conan", "aws", "amazon", "cloud", )
11 url = "https://github.com/conan-io/conan-center-index"
12 homepage = "https://github.com/aws/s2n-tls"
13 license = "Apache-2.0",
14 exports_sources = "CMakeLists.txt"
15 generators = "cmake", "cmake_find_package"
16 settings = "os", "arch", "compiler", "build_type"
17 options = {
18 "shared": [True, False],
19 "fPIC": [True, False],
20 }
21 default_options = {
22 "shared": False,
23 "fPIC": True,
24 }
25
26 _cmake = None
27
28 @property
29 def _source_subfolder(self):
30 return "source_subfolder"
31
32 def configure(self):
33 if self.options.shared:
34 del self.options.fPIC
35 del self.settings.compiler.cppstd
36 del self.settings.compiler.libcxx
37
38 def requirements(self):
39 self.requires("openssl/1.1.1k")
40
41 def source(self):
42 tools.get(**self.conan_data["sources"][self.version],
43 destination=self._source_subfolder, strip_root=True)
44
45 def validate(self):
46 if self.settings.os == "Windows":
47 raise ConanInvalidConfiguration("Not supported (yet)")
48
49 def _configure_cmake(self):
50 if self._cmake:
51 return self._cmake
52 self._cmake = CMake(self)
53 self._cmake.definitions["BUILD_TESTING"] = False
54 self._cmake.definitions["UNSAFE_TREAT_WARNINGS_AS_ERRORS"] = False
55 self._cmake.configure()
56 return self._cmake
57
58 def build(self):
59 cmake = self._configure_cmake()
60 cmake.build()
61
62 def package(self):
63 self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
64 cmake = self._configure_cmake()
65 cmake.install()
66 tools.rmdir(os.path.join(self.package_folder, "lib", "s2n"))
67
68 def package_info(self):
69 self.cpp_info.filenames["cmake_find_package"] = "s2n"
70 self.cpp_info.filenames["cmake_find_package_multi"] = "s2n"
71 self.cpp_info.names["cmake_find_package"] = "AWS"
72 self.cpp_info.names["cmake_find_package_multi"] = "AWS"
73 self.cpp_info.components["s2n-lib"].names["cmake_find_package"] = "s2n"
74 self.cpp_info.components["s2n-lib"].names["cmake_find_package_multi"] = "s2n"
75 self.cpp_info.components["s2n-lib"].libs = ["s2n"]
76 self.cpp_info.components["s2n-lib"].requires = ["openssl::crypto"]
77 if self.settings.os in ("FreeBSD", "Linux"):
78 self.cpp_info.components["s2n-lib"].system_libs = ["m", "pthread"]
79
[end of recipes/s2n/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/s2n/all/conanfile.py b/recipes/s2n/all/conanfile.py
--- a/recipes/s2n/all/conanfile.py
+++ b/recipes/s2n/all/conanfile.py
@@ -36,7 +36,7 @@
del self.settings.compiler.libcxx
def requirements(self):
- self.requires("openssl/1.1.1k")
+ self.requires("openssl/1.1.1l")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
| {"golden_diff": "diff --git a/recipes/s2n/all/conanfile.py b/recipes/s2n/all/conanfile.py\n--- a/recipes/s2n/all/conanfile.py\n+++ b/recipes/s2n/all/conanfile.py\n@@ -36,7 +36,7 @@\n del self.settings.compiler.libcxx\n \n def requirements(self):\n- self.requires(\"openssl/1.1.1k\")\n+ self.requires(\"openssl/1.1.1l\")\n \n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n", "issue": "[package] aws-c-event-stream/0.2.7: conflicting openssl versions\n```\r\nERROR: Conflict in s2n/1.0.11:\r\n 's2n/1.0.11' requires 'openssl/1.1.1k' while 'aws-c-cal/0.5.11' requires 'openssl/1.1.1l'.\r\n To fix this conflict you need to override the package 'openssl' in your root package.\r\n```\r\n\r\nseems like it was introduced by #7260 \r\n\r\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **aws-c-event-stream/0.2.7**\r\n * Conan version: **conan 1.39.0**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\narch=x86_64\r\narch_build=x86_64\r\nbuild_type=Release\r\ncompiler=gcc\r\ncompiler.libcxx=libstdc++\r\ncompiler.version=7\r\nos=Linux\r\nos_build=Linux\r\n[options]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\nconan install --build missing aws-c-event-stream/0.2.7@\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\nclass S2n(ConanFile):\n name = \"s2n\"\n description = \"An implementation of the TLS/SSL protocols\"\n topics = (\"conan\", \"aws\", \"amazon\", \"cloud\", )\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/aws/s2n-tls\"\n license = \"Apache-2.0\",\n exports_sources = \"CMakeLists.txt\"\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.cppstd\n del self.settings.compiler.libcxx\n\n def requirements(self):\n self.requires(\"openssl/1.1.1k\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def validate(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\"Not supported (yet)\")\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"BUILD_TESTING\"] = False\n self._cmake.definitions[\"UNSAFE_TREAT_WARNINGS_AS_ERRORS\"] = False\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"s2n\"))\n\n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"s2n\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"s2n\"\n self.cpp_info.names[\"cmake_find_package\"] = \"AWS\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"AWS\"\n self.cpp_info.components[\"s2n-lib\"].names[\"cmake_find_package\"] = \"s2n\"\n self.cpp_info.components[\"s2n-lib\"].names[\"cmake_find_package_multi\"] = \"s2n\"\n self.cpp_info.components[\"s2n-lib\"].libs = [\"s2n\"]\n self.cpp_info.components[\"s2n-lib\"].requires = [\"openssl::crypto\"]\n if self.settings.os in (\"FreeBSD\", \"Linux\"):\n self.cpp_info.components[\"s2n-lib\"].system_libs = [\"m\", \"pthread\"]\n", "path": "recipes/s2n/all/conanfile.py"}]} | 1,682 | 126 |
gh_patches_debug_8659 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-1745 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RuntimeError: Set changed size during iteration
Python Version: 3.7.7
Error:
```
RuntimeError: Set changed size during iteration
File "django/core/handlers/wsgi.py", line 141, in __call__
response = self.get_response(request)
File "ddtrace/contrib/django/patch.py", line 82, in wrapper
return func(mod, pin, wrapped, instance, args, kwargs)
File "ddtrace/contrib/django/patch.py", line 406, in traced_get_response
span_type=SpanTypes.HTTP,
File "ddtrace/tracer.py", line 638, in trace
span_type=span_type,
File "ddtrace/tracer.py", line 389, in start_span
new_ctx = self._check_new_process()
File "ddtrace/tracer.py", line 570, in _check_new_process
self._update_dogstatsd_constant_tags()
File "ddtrace/tracer.py", line 525, in _update_dogstatsd_constant_tags
for k, v in RuntimeTags()
File "ddtrace/tracer.py", line 524, in <listcomp>
'{}:{}'.format(k, v)
File "ddtrace/internal/runtime/runtime_metrics.py", line 29, in <genexpr>
collected = (collector.collect(self._enabled) for collector in self._collectors)
File "ddtrace/internal/runtime/collector.py", line 67, in collect
self.value = self.collect_fn(keys)
File "ddtrace/internal/runtime/tag_collectors.py", line 25, in collect_fn
tags = [(SERVICE, service) for service in ddtrace.tracer._services]
File "ddtrace/internal/runtime/tag_collectors.py", line 25, in <listcomp>
tags = [(SERVICE, service) for service in ddtrace.tracer._services]
```
### Which version of dd-trace-py are you using?
```
0.42.0
```
### Which version of the libraries are you using?
```
django==2.2.14
ddtrace==0.42.0
gunicorn==20.0.4
```
### How can we reproduce your problem?
It's not clear if there's anything specific about the WSGI requests that triggers this condition.
### What is the result that you get?
-
### What is the result that you expected?
-
</issue>
<code>
[start of ddtrace/internal/runtime/tag_collectors.py]
1 from .collector import ValueCollector
2 from .constants import (
3 SERVICE,
4 LANG_INTERPRETER,
5 LANG_VERSION,
6 LANG,
7 TRACER_VERSION,
8 )
9 from ...constants import ENV_KEY
10
11
12 class RuntimeTagCollector(ValueCollector):
13 periodic = False
14 value = []
15
16
17 class TracerTagCollector(RuntimeTagCollector):
18 """Tag collector for the ddtrace Tracer"""
19
20 required_modules = ["ddtrace"]
21
22 def collect_fn(self, keys):
23 ddtrace = self.modules.get("ddtrace")
24 tags = [(SERVICE, service) for service in ddtrace.tracer._services]
25 if ENV_KEY in ddtrace.tracer.tags:
26 tags.append((ENV_KEY, ddtrace.tracer.tags[ENV_KEY]))
27 return tags
28
29
30 class PlatformTagCollector(RuntimeTagCollector):
31 """Tag collector for the Python interpreter implementation.
32
33 Tags collected:
34 - ``lang_interpreter``:
35
36 * For CPython this is 'CPython'.
37 * For Pypy this is ``PyPy``
38 * For Jython this is ``Jython``
39
40 - `lang_version``, eg ``2.7.10``
41 - ``lang`` e.g. ``Python``
42 - ``tracer_version`` e.g. ``0.29.0``
43
44 """
45
46 required_modules = ("platform", "ddtrace")
47
48 def collect_fn(self, keys):
49 platform = self.modules.get("platform")
50 ddtrace = self.modules.get("ddtrace")
51 tags = [
52 (LANG, "python"),
53 (LANG_INTERPRETER, platform.python_implementation()),
54 (LANG_VERSION, platform.python_version()),
55 (TRACER_VERSION, ddtrace.__version__),
56 ]
57 return tags
58
[end of ddtrace/internal/runtime/tag_collectors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/internal/runtime/tag_collectors.py b/ddtrace/internal/runtime/tag_collectors.py
--- a/ddtrace/internal/runtime/tag_collectors.py
+++ b/ddtrace/internal/runtime/tag_collectors.py
@@ -21,7 +21,8 @@
def collect_fn(self, keys):
ddtrace = self.modules.get("ddtrace")
- tags = [(SERVICE, service) for service in ddtrace.tracer._services]
+ # make sure to copy _services to avoid RuntimeError: Set changed size during iteration
+ tags = [(SERVICE, service) for service in list(ddtrace.tracer._services)]
if ENV_KEY in ddtrace.tracer.tags:
tags.append((ENV_KEY, ddtrace.tracer.tags[ENV_KEY]))
return tags
| {"golden_diff": "diff --git a/ddtrace/internal/runtime/tag_collectors.py b/ddtrace/internal/runtime/tag_collectors.py\n--- a/ddtrace/internal/runtime/tag_collectors.py\n+++ b/ddtrace/internal/runtime/tag_collectors.py\n@@ -21,7 +21,8 @@\n \n def collect_fn(self, keys):\n ddtrace = self.modules.get(\"ddtrace\")\n- tags = [(SERVICE, service) for service in ddtrace.tracer._services]\n+ # make sure to copy _services to avoid RuntimeError: Set changed size during iteration\n+ tags = [(SERVICE, service) for service in list(ddtrace.tracer._services)]\n if ENV_KEY in ddtrace.tracer.tags:\n tags.append((ENV_KEY, ddtrace.tracer.tags[ENV_KEY]))\n return tags\n", "issue": "RuntimeError: Set changed size during iteration\nPython Version: 3.7.7\r\n\r\nError:\r\n```\r\nRuntimeError: Set changed size during iteration\r\n File \"django/core/handlers/wsgi.py\", line 141, in __call__\r\n response = self.get_response(request)\r\n File \"ddtrace/contrib/django/patch.py\", line 82, in wrapper\r\n return func(mod, pin, wrapped, instance, args, kwargs)\r\n File \"ddtrace/contrib/django/patch.py\", line 406, in traced_get_response\r\n span_type=SpanTypes.HTTP,\r\n File \"ddtrace/tracer.py\", line 638, in trace\r\n span_type=span_type,\r\n File \"ddtrace/tracer.py\", line 389, in start_span\r\n new_ctx = self._check_new_process()\r\n File \"ddtrace/tracer.py\", line 570, in _check_new_process\r\n self._update_dogstatsd_constant_tags()\r\n File \"ddtrace/tracer.py\", line 525, in _update_dogstatsd_constant_tags\r\n for k, v in RuntimeTags()\r\n File \"ddtrace/tracer.py\", line 524, in <listcomp>\r\n '{}:{}'.format(k, v)\r\n File \"ddtrace/internal/runtime/runtime_metrics.py\", line 29, in <genexpr>\r\n collected = (collector.collect(self._enabled) for collector in self._collectors)\r\n File \"ddtrace/internal/runtime/collector.py\", line 67, in collect\r\n self.value = self.collect_fn(keys)\r\n File \"ddtrace/internal/runtime/tag_collectors.py\", line 25, in collect_fn\r\n tags = [(SERVICE, service) for service in ddtrace.tracer._services]\r\n File \"ddtrace/internal/runtime/tag_collectors.py\", line 25, in <listcomp>\r\n tags = [(SERVICE, service) for service in ddtrace.tracer._services]\r\n```\r\n\r\n### Which version of dd-trace-py are you using?\r\n```\r\n0.42.0\r\n```\r\n\r\n### Which version of the libraries are you using?\r\n```\r\ndjango==2.2.14\r\nddtrace==0.42.0\r\ngunicorn==20.0.4\r\n```\r\n\r\n### How can we reproduce your problem?\r\nIt's not clear if there's anything specific about the WSGI requests that triggers this condition. \r\n\r\n### What is the result that you get?\r\n-\r\n\r\n### What is the result that you expected?\r\n-\n", "before_files": [{"content": "from .collector import ValueCollector\nfrom .constants import (\n SERVICE,\n LANG_INTERPRETER,\n LANG_VERSION,\n LANG,\n TRACER_VERSION,\n)\nfrom ...constants import ENV_KEY\n\n\nclass RuntimeTagCollector(ValueCollector):\n periodic = False\n value = []\n\n\nclass TracerTagCollector(RuntimeTagCollector):\n \"\"\"Tag collector for the ddtrace Tracer\"\"\"\n\n required_modules = [\"ddtrace\"]\n\n def collect_fn(self, keys):\n ddtrace = self.modules.get(\"ddtrace\")\n tags = [(SERVICE, service) for service in ddtrace.tracer._services]\n if ENV_KEY in ddtrace.tracer.tags:\n tags.append((ENV_KEY, ddtrace.tracer.tags[ENV_KEY]))\n return tags\n\n\nclass PlatformTagCollector(RuntimeTagCollector):\n \"\"\"Tag collector for the Python interpreter implementation.\n\n Tags collected:\n - ``lang_interpreter``:\n\n * For CPython this is 'CPython'.\n * For Pypy this is ``PyPy``\n * For Jython this is ``Jython``\n\n - `lang_version``, eg ``2.7.10``\n - ``lang`` e.g. ``Python``\n - ``tracer_version`` e.g. ``0.29.0``\n\n \"\"\"\n\n required_modules = (\"platform\", \"ddtrace\")\n\n def collect_fn(self, keys):\n platform = self.modules.get(\"platform\")\n ddtrace = self.modules.get(\"ddtrace\")\n tags = [\n (LANG, \"python\"),\n (LANG_INTERPRETER, platform.python_implementation()),\n (LANG_VERSION, platform.python_version()),\n (TRACER_VERSION, ddtrace.__version__),\n ]\n return tags\n", "path": "ddtrace/internal/runtime/tag_collectors.py"}]} | 1,560 | 166 |
gh_patches_debug_39127 | rasdani/github-patches | git_diff | ietf-tools__datatracker-4309 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
session_purpose_demo management command no longer needed
### Description
When the session purpose project was in development, we added a management command `session_purpose_demo` to add a fake meeting that exercised the new features. Since the session purposes are now in active use, I think that management command can be pruned.
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of ietf/meeting/management/commands/session_purpose_demo.py]
1 import datetime
2 import random
3
4 from django.core.management.base import BaseCommand, CommandError
5
6 from ietf.group.models import Group
7 from ietf.meeting.factories import RoomFactory, TimeSlotFactory, SessionFactory
8 from ietf.meeting.helpers import get_meeting
9 from ietf.meeting.models import Room, Session
10 from ietf.name.models import SessionPurposeName
11
12
13 class Command(BaseCommand):
14 help = 'Set up a demo of the session purpose updates'
15
16 DEMO_PREFIX='PDemo' # used to identify things added by this command
17
18 def add_arguments(self, parser):
19 parser.add_argument('--remove', action='store_true')
20
21 def handle(self, *args, **options):
22 if options['remove']:
23 self.remove_demo()
24 else:
25 self.install_demo()
26
27 def remove_demo(self):
28 self.stdout.write(f'Removing rooms with "{self.DEMO_PREFIX}" name prefix...\n')
29 Room.objects.filter(name__startswith=self.DEMO_PREFIX).delete()
30 self.stdout.write(f'Removing sessions with "{self.DEMO_PREFIX}" name prefix...\n')
31 Session.objects.filter(name__startswith=self.DEMO_PREFIX).delete()
32
33 def install_demo(self):
34 # get meeting
35 try:
36 meeting = get_meeting(days=14) # matches how secr app finds meetings
37 except:
38 raise CommandError('No upcoming meeting to modify')
39
40 # create rooms
41 self.stdout.write('Creating rooms...\n')
42 rooms = [
43 RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 1'),
44 RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 2'),
45 RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 3'),
46 ]
47
48 # get all the timeslot types used by a session purpose
49 type_ids = set()
50 for purpose in SessionPurposeName.objects.filter(used=True):
51 type_ids.update(purpose.timeslot_types)
52
53 # set up timeslots
54 self.stdout.write('Creating timeslots...\n')
55 for room in rooms:
56 for day in range(meeting.days):
57 date = meeting.get_meeting_date(day)
58 for n, type_id in enumerate(type_ids):
59 TimeSlotFactory(
60 type_id=type_id,
61 meeting=meeting,
62 location=room,
63 time=datetime.datetime.combine(date, datetime.time(10, 0, 0)) + datetime.timedelta(hours=n),
64 duration=datetime.timedelta(hours=1),
65 )
66
67 # set up sessions
68 self.stdout.write('Creating sessions...')
69 groups_for_session_purpose = {
70 purpose.slug: list(
71 Group.objects.filter(
72 type__features__session_purposes__contains=f'"{purpose.slug}"',
73 state_id='active',
74 )
75 )
76 for purpose in SessionPurposeName.objects.filter(used=True)
77 }
78 for purpose in SessionPurposeName.objects.filter(used=True):
79 for type_id in purpose.timeslot_types:
80 group=random.choice(groups_for_session_purpose[purpose.slug])
81 SessionFactory(
82 meeting=meeting,
83 purpose=purpose,
84 type_id=type_id,
85 group=group,
86 name=f'{self.DEMO_PREFIX} for {group.acronym}',
87 status_id='schedw',
88 add_to_schedule=False,
89 )
90
91 self.stdout.write(f'\nRooms and sessions created with "{self.DEMO_PREFIX}" as name prefix\n')
[end of ietf/meeting/management/commands/session_purpose_demo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ietf/meeting/management/commands/session_purpose_demo.py b/ietf/meeting/management/commands/session_purpose_demo.py
deleted file mode 100644
--- a/ietf/meeting/management/commands/session_purpose_demo.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import datetime
-import random
-
-from django.core.management.base import BaseCommand, CommandError
-
-from ietf.group.models import Group
-from ietf.meeting.factories import RoomFactory, TimeSlotFactory, SessionFactory
-from ietf.meeting.helpers import get_meeting
-from ietf.meeting.models import Room, Session
-from ietf.name.models import SessionPurposeName
-
-
-class Command(BaseCommand):
- help = 'Set up a demo of the session purpose updates'
-
- DEMO_PREFIX='PDemo' # used to identify things added by this command
-
- def add_arguments(self, parser):
- parser.add_argument('--remove', action='store_true')
-
- def handle(self, *args, **options):
- if options['remove']:
- self.remove_demo()
- else:
- self.install_demo()
-
- def remove_demo(self):
- self.stdout.write(f'Removing rooms with "{self.DEMO_PREFIX}" name prefix...\n')
- Room.objects.filter(name__startswith=self.DEMO_PREFIX).delete()
- self.stdout.write(f'Removing sessions with "{self.DEMO_PREFIX}" name prefix...\n')
- Session.objects.filter(name__startswith=self.DEMO_PREFIX).delete()
-
- def install_demo(self):
- # get meeting
- try:
- meeting = get_meeting(days=14) # matches how secr app finds meetings
- except:
- raise CommandError('No upcoming meeting to modify')
-
- # create rooms
- self.stdout.write('Creating rooms...\n')
- rooms = [
- RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 1'),
- RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 2'),
- RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 3'),
- ]
-
- # get all the timeslot types used by a session purpose
- type_ids = set()
- for purpose in SessionPurposeName.objects.filter(used=True):
- type_ids.update(purpose.timeslot_types)
-
- # set up timeslots
- self.stdout.write('Creating timeslots...\n')
- for room in rooms:
- for day in range(meeting.days):
- date = meeting.get_meeting_date(day)
- for n, type_id in enumerate(type_ids):
- TimeSlotFactory(
- type_id=type_id,
- meeting=meeting,
- location=room,
- time=datetime.datetime.combine(date, datetime.time(10, 0, 0)) + datetime.timedelta(hours=n),
- duration=datetime.timedelta(hours=1),
- )
-
- # set up sessions
- self.stdout.write('Creating sessions...')
- groups_for_session_purpose = {
- purpose.slug: list(
- Group.objects.filter(
- type__features__session_purposes__contains=f'"{purpose.slug}"',
- state_id='active',
- )
- )
- for purpose in SessionPurposeName.objects.filter(used=True)
- }
- for purpose in SessionPurposeName.objects.filter(used=True):
- for type_id in purpose.timeslot_types:
- group=random.choice(groups_for_session_purpose[purpose.slug])
- SessionFactory(
- meeting=meeting,
- purpose=purpose,
- type_id=type_id,
- group=group,
- name=f'{self.DEMO_PREFIX} for {group.acronym}',
- status_id='schedw',
- add_to_schedule=False,
- )
-
- self.stdout.write(f'\nRooms and sessions created with "{self.DEMO_PREFIX}" as name prefix\n')
\ No newline at end of file
| {"golden_diff": "diff --git a/ietf/meeting/management/commands/session_purpose_demo.py b/ietf/meeting/management/commands/session_purpose_demo.py\ndeleted file mode 100644\n--- a/ietf/meeting/management/commands/session_purpose_demo.py\n+++ /dev/null\n@@ -1,91 +0,0 @@\n-import datetime\n-import random\n-\n-from django.core.management.base import BaseCommand, CommandError\n-\n-from ietf.group.models import Group\n-from ietf.meeting.factories import RoomFactory, TimeSlotFactory, SessionFactory\n-from ietf.meeting.helpers import get_meeting\n-from ietf.meeting.models import Room, Session\n-from ietf.name.models import SessionPurposeName\n-\n-\n-class Command(BaseCommand):\n- help = 'Set up a demo of the session purpose updates'\n-\n- DEMO_PREFIX='PDemo' # used to identify things added by this command\n-\n- def add_arguments(self, parser):\n- parser.add_argument('--remove', action='store_true')\n-\n- def handle(self, *args, **options):\n- if options['remove']:\n- self.remove_demo()\n- else:\n- self.install_demo()\n-\n- def remove_demo(self):\n- self.stdout.write(f'Removing rooms with \"{self.DEMO_PREFIX}\" name prefix...\\n')\n- Room.objects.filter(name__startswith=self.DEMO_PREFIX).delete()\n- self.stdout.write(f'Removing sessions with \"{self.DEMO_PREFIX}\" name prefix...\\n')\n- Session.objects.filter(name__startswith=self.DEMO_PREFIX).delete()\n-\n- def install_demo(self):\n- # get meeting\n- try:\n- meeting = get_meeting(days=14) # matches how secr app finds meetings\n- except:\n- raise CommandError('No upcoming meeting to modify')\n-\n- # create rooms\n- self.stdout.write('Creating rooms...\\n')\n- rooms = [\n- RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 1'),\n- RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 2'),\n- RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 3'),\n- ]\n-\n- # get all the timeslot types used by a session purpose\n- type_ids = set()\n- for purpose in SessionPurposeName.objects.filter(used=True):\n- type_ids.update(purpose.timeslot_types)\n-\n- # set up timeslots\n- self.stdout.write('Creating timeslots...\\n')\n- for room in rooms:\n- for day in range(meeting.days):\n- date = meeting.get_meeting_date(day)\n- for n, type_id in enumerate(type_ids):\n- TimeSlotFactory(\n- type_id=type_id,\n- meeting=meeting,\n- location=room,\n- time=datetime.datetime.combine(date, datetime.time(10, 0, 0)) + datetime.timedelta(hours=n),\n- duration=datetime.timedelta(hours=1),\n- )\n-\n- # set up sessions\n- self.stdout.write('Creating sessions...')\n- groups_for_session_purpose = {\n- purpose.slug: list(\n- Group.objects.filter(\n- type__features__session_purposes__contains=f'\"{purpose.slug}\"',\n- state_id='active',\n- )\n- )\n- for purpose in SessionPurposeName.objects.filter(used=True)\n- }\n- for purpose in SessionPurposeName.objects.filter(used=True):\n- for type_id in purpose.timeslot_types:\n- group=random.choice(groups_for_session_purpose[purpose.slug])\n- SessionFactory(\n- meeting=meeting,\n- purpose=purpose,\n- type_id=type_id,\n- group=group,\n- name=f'{self.DEMO_PREFIX} for {group.acronym}',\n- status_id='schedw',\n- add_to_schedule=False,\n- )\n-\n- self.stdout.write(f'\\nRooms and sessions created with \"{self.DEMO_PREFIX}\" as name prefix\\n')\n\\ No newline at end of file\n", "issue": "session_purpose_demo management command no longer needed\n### Description\n\nWhen the session purpose project was in development, we added a management command `session_purpose_demo` to add a fake meeting that exercised the new features. Since the session purposes are now in active use, I think that management command can be pruned.\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "import datetime\nimport random\n\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom ietf.group.models import Group\nfrom ietf.meeting.factories import RoomFactory, TimeSlotFactory, SessionFactory\nfrom ietf.meeting.helpers import get_meeting\nfrom ietf.meeting.models import Room, Session\nfrom ietf.name.models import SessionPurposeName\n\n\nclass Command(BaseCommand):\n help = 'Set up a demo of the session purpose updates'\n\n DEMO_PREFIX='PDemo' # used to identify things added by this command\n\n def add_arguments(self, parser):\n parser.add_argument('--remove', action='store_true')\n\n def handle(self, *args, **options):\n if options['remove']:\n self.remove_demo()\n else:\n self.install_demo()\n\n def remove_demo(self):\n self.stdout.write(f'Removing rooms with \"{self.DEMO_PREFIX}\" name prefix...\\n')\n Room.objects.filter(name__startswith=self.DEMO_PREFIX).delete()\n self.stdout.write(f'Removing sessions with \"{self.DEMO_PREFIX}\" name prefix...\\n')\n Session.objects.filter(name__startswith=self.DEMO_PREFIX).delete()\n\n def install_demo(self):\n # get meeting\n try:\n meeting = get_meeting(days=14) # matches how secr app finds meetings\n except:\n raise CommandError('No upcoming meeting to modify')\n\n # create rooms\n self.stdout.write('Creating rooms...\\n')\n rooms = [\n RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 1'),\n RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 2'),\n RoomFactory(meeting=meeting, name=f'{self.DEMO_PREFIX} 3'),\n ]\n\n # get all the timeslot types used by a session purpose\n type_ids = set()\n for purpose in SessionPurposeName.objects.filter(used=True):\n type_ids.update(purpose.timeslot_types)\n\n # set up timeslots\n self.stdout.write('Creating timeslots...\\n')\n for room in rooms:\n for day in range(meeting.days):\n date = meeting.get_meeting_date(day)\n for n, type_id in enumerate(type_ids):\n TimeSlotFactory(\n type_id=type_id,\n meeting=meeting,\n location=room,\n time=datetime.datetime.combine(date, datetime.time(10, 0, 0)) + datetime.timedelta(hours=n),\n duration=datetime.timedelta(hours=1),\n )\n\n # set up sessions\n self.stdout.write('Creating sessions...')\n groups_for_session_purpose = {\n purpose.slug: list(\n Group.objects.filter(\n type__features__session_purposes__contains=f'\"{purpose.slug}\"',\n state_id='active',\n )\n )\n for purpose in SessionPurposeName.objects.filter(used=True)\n }\n for purpose in SessionPurposeName.objects.filter(used=True):\n for type_id in purpose.timeslot_types:\n group=random.choice(groups_for_session_purpose[purpose.slug])\n SessionFactory(\n meeting=meeting,\n purpose=purpose,\n type_id=type_id,\n group=group,\n name=f'{self.DEMO_PREFIX} for {group.acronym}',\n status_id='schedw',\n add_to_schedule=False,\n )\n\n self.stdout.write(f'\\nRooms and sessions created with \"{self.DEMO_PREFIX}\" as name prefix\\n')", "path": "ietf/meeting/management/commands/session_purpose_demo.py"}]} | 1,568 | 890 |
gh_patches_debug_29684 | rasdani/github-patches | git_diff | secondmind-labs__trieste-194 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pareto set: hypervolume
As a user, I want functionality available to calculate the hypervolume of a Pareto front given the cell bounds, so that I can easily find the hypervolume when defining multi-objective acquisition functionality
</issue>
<code>
[start of trieste/utils/pareto.py]
1 # Copyright 2020 The Trieste Contributors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """ This module contains functions and classes for Pareto based multi-objective optimization. """
15
16 from typing import Tuple
17
18 import tensorflow as tf
19 from typing_extensions import Final
20
21 from ..type import TensorType
22
23
24 def non_dominated(observations: TensorType) -> Tuple[TensorType, TensorType]:
25 """
26 Computes the non-dominated set for a set of data points.
27 if there are duplicate point(s) in the non-dominated set, this function will return
28 as it is without removing the duplicate.
29
30 :param observations: set of points with shape [N,D]
31 :return: tf.Tensor of the non-dominated set [P,D] and the degree of dominance [N],
32 P is the number of points in pareto front
33 dominances gives the number of dominating points for each data point
34
35
36 """
37 extended = tf.tile(observations[None], [len(observations), 1, 1])
38 swapped_ext = tf.transpose(extended, [1, 0, 2])
39 dominance = tf.math.count_nonzero(
40 tf.logical_and(
41 tf.reduce_all(extended <= swapped_ext, axis=2),
42 tf.reduce_any(extended < swapped_ext, axis=2),
43 ),
44 axis=1,
45 )
46
47 return tf.boolean_mask(observations, dominance == 0), dominance
48
49
50 class BoundedVolumes:
51 """
52 A :class:`BoundedVolumes` store the index of the Pareto front to form lower and upper
53 bounds of the pseudo cells decomposition.
54 """
55
56 def __init__(self, lower_idx: tf.Tensor, upper_idx: tf.Tensor):
57 """
58 Construct bounded volumes.
59
60 :param lower_idx: the lowerbounds index of the volumes
61 :param upper_idx: the upperbounds index of the volumes
62 """
63
64 tf.debugging.assert_shapes([(lower_idx, ["N", "D"]), (upper_idx, ["N", "D"])])
65 self.lower_idx: Final[TensorType] = lower_idx
66 self.upper_idx: Final[TensorType] = upper_idx
67
68
69 class Pareto:
70 """
71 A :class:`Pareto` Construct a Pareto set.
72 Stores a Pareto set and calculates the cell bounds covering the non-dominated region.
73 The latter is needed for certain multiobjective acquisition functions.
74 """
75
76 def __init__(self, observations: TensorType):
77 """
78 :param observations: The observations for all objectives, with shape [N, 2].
79 :raise ValueError (or InvalidArgumentError): If ``observations`` has an invalid shape.
80 """
81 tf.debugging.assert_shapes([(observations, [None, 2])])
82
83 pf, _ = non_dominated(observations)
84 self.front: Final[TensorType] = tf.gather_nd(pf, tf.argsort(pf[:, :1], axis=0))
85 self.bounds: Final[BoundedVolumes] = self._bounds_2d(self.front)
86
87 @staticmethod
88 def _bounds_2d(front: TensorType) -> BoundedVolumes:
89
90 # this assumes the Pareto set has been sorted in ascending order on the first
91 # objective, which implies the second objective is sorted in descending order
92 len_front, number_of_objectives = front.shape
93
94 pf_ext_idx = tf.concat(
95 [
96 tf.zeros([1, number_of_objectives], dtype=tf.int32),
97 tf.argsort(front, axis=0) + 1,
98 tf.ones([1, number_of_objectives], dtype=tf.int32) * len_front + 1,
99 ],
100 axis=0,
101 )
102
103 range_ = tf.range(len_front + 1)[:, None]
104 lower = tf.concat([range_, tf.zeros_like(range_)], axis=-1)
105 upper = tf.concat([range_ + 1, pf_ext_idx[::-1, 1:][: pf_ext_idx[-1, 0]]], axis=-1)
106
107 return BoundedVolumes(lower, upper)
108
[end of trieste/utils/pareto.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/trieste/utils/pareto.py b/trieste/utils/pareto.py
--- a/trieste/utils/pareto.py
+++ b/trieste/utils/pareto.py
@@ -105,3 +105,46 @@
upper = tf.concat([range_ + 1, pf_ext_idx[::-1, 1:][: pf_ext_idx[-1, 0]]], axis=-1)
return BoundedVolumes(lower, upper)
+
+ def hypervolume_indicator(self, reference: TensorType) -> TensorType:
+ """
+ Calculate the hypervolume indicator
+ The hypervolume indicator is the volume of the dominated region.
+
+ :param reference: a reference point to use, with shape [D].
+ Defines the upper bound of the hypervolume.
+ Should be equal or bigger than the anti-ideal point of the Pareto set.
+ For comparing results across runs, the same reference point must be used.
+ :return: hypervolume indicator
+ :raise ValueError (or `tf.errors.InvalidArgumentError`): If ``reference`` has an invalid
+ shape.
+ :raise `tf.errors.InvalidArgumentError`: If ``reference`` is less than the anti-ideal point
+ in any dimension.
+ """
+ tf.debugging.assert_greater_equal(reference, self.front)
+
+ tf.debugging.assert_shapes(
+ [
+ (self.bounds.lower_idx, ["N", "D"]),
+ (self.bounds.upper_idx, ["N", "D"]),
+ (self.front, ["M", "D"]),
+ (reference, ["D"]),
+ ]
+ )
+
+ min_pfront = tf.reduce_min(self.front, 0, keepdims=True)
+ pseudo_pfront = tf.concat((min_pfront, self.front, reference[None]), 0)
+ N, D = tf.shape(self.bounds.upper_idx)
+
+ idx = tf.tile(tf.expand_dims(tf.range(D), -1), [1, N])
+ upper_idx = tf.reshape(
+ tf.stack([tf.transpose(self.bounds.upper_idx), idx], axis=2), [N * D, 2]
+ )
+ lower_idx = tf.reshape(
+ tf.stack([tf.transpose(self.bounds.lower_idx), idx], axis=2), [N * D, 2]
+ )
+ upper = tf.reshape(tf.gather_nd(pseudo_pfront, upper_idx), [D, N])
+ lower = tf.reshape(tf.gather_nd(pseudo_pfront, lower_idx), [D, N])
+ hypervolume = tf.reduce_sum(tf.reduce_prod(upper - lower, 0))
+
+ return tf.reduce_prod(reference[None] - min_pfront) - hypervolume
| {"golden_diff": "diff --git a/trieste/utils/pareto.py b/trieste/utils/pareto.py\n--- a/trieste/utils/pareto.py\n+++ b/trieste/utils/pareto.py\n@@ -105,3 +105,46 @@\n upper = tf.concat([range_ + 1, pf_ext_idx[::-1, 1:][: pf_ext_idx[-1, 0]]], axis=-1)\n \n return BoundedVolumes(lower, upper)\n+\n+ def hypervolume_indicator(self, reference: TensorType) -> TensorType:\n+ \"\"\"\n+ Calculate the hypervolume indicator\n+ The hypervolume indicator is the volume of the dominated region.\n+\n+ :param reference: a reference point to use, with shape [D].\n+ Defines the upper bound of the hypervolume.\n+ Should be equal or bigger than the anti-ideal point of the Pareto set.\n+ For comparing results across runs, the same reference point must be used.\n+ :return: hypervolume indicator\n+ :raise ValueError (or `tf.errors.InvalidArgumentError`): If ``reference`` has an invalid\n+ shape.\n+ :raise `tf.errors.InvalidArgumentError`: If ``reference`` is less than the anti-ideal point\n+ in any dimension.\n+ \"\"\"\n+ tf.debugging.assert_greater_equal(reference, self.front)\n+\n+ tf.debugging.assert_shapes(\n+ [\n+ (self.bounds.lower_idx, [\"N\", \"D\"]),\n+ (self.bounds.upper_idx, [\"N\", \"D\"]),\n+ (self.front, [\"M\", \"D\"]),\n+ (reference, [\"D\"]),\n+ ]\n+ )\n+\n+ min_pfront = tf.reduce_min(self.front, 0, keepdims=True)\n+ pseudo_pfront = tf.concat((min_pfront, self.front, reference[None]), 0)\n+ N, D = tf.shape(self.bounds.upper_idx)\n+\n+ idx = tf.tile(tf.expand_dims(tf.range(D), -1), [1, N])\n+ upper_idx = tf.reshape(\n+ tf.stack([tf.transpose(self.bounds.upper_idx), idx], axis=2), [N * D, 2]\n+ )\n+ lower_idx = tf.reshape(\n+ tf.stack([tf.transpose(self.bounds.lower_idx), idx], axis=2), [N * D, 2]\n+ )\n+ upper = tf.reshape(tf.gather_nd(pseudo_pfront, upper_idx), [D, N])\n+ lower = tf.reshape(tf.gather_nd(pseudo_pfront, lower_idx), [D, N])\n+ hypervolume = tf.reduce_sum(tf.reduce_prod(upper - lower, 0))\n+\n+ return tf.reduce_prod(reference[None] - min_pfront) - hypervolume\n", "issue": "Pareto set: hypervolume\nAs a user, I want functionality available to calculate the hypervolume of a Pareto front given the cell bounds, so that I can easily find the hypervolume when defining multi-objective acquisition functionality\n", "before_files": [{"content": "# Copyright 2020 The Trieste Contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" This module contains functions and classes for Pareto based multi-objective optimization. \"\"\"\n\nfrom typing import Tuple\n\nimport tensorflow as tf\nfrom typing_extensions import Final\n\nfrom ..type import TensorType\n\n\ndef non_dominated(observations: TensorType) -> Tuple[TensorType, TensorType]:\n \"\"\"\n Computes the non-dominated set for a set of data points.\n if there are duplicate point(s) in the non-dominated set, this function will return\n as it is without removing the duplicate.\n\n :param observations: set of points with shape [N,D]\n :return: tf.Tensor of the non-dominated set [P,D] and the degree of dominance [N],\n P is the number of points in pareto front\n dominances gives the number of dominating points for each data point\n\n\n \"\"\"\n extended = tf.tile(observations[None], [len(observations), 1, 1])\n swapped_ext = tf.transpose(extended, [1, 0, 2])\n dominance = tf.math.count_nonzero(\n tf.logical_and(\n tf.reduce_all(extended <= swapped_ext, axis=2),\n tf.reduce_any(extended < swapped_ext, axis=2),\n ),\n axis=1,\n )\n\n return tf.boolean_mask(observations, dominance == 0), dominance\n\n\nclass BoundedVolumes:\n \"\"\"\n A :class:`BoundedVolumes` store the index of the Pareto front to form lower and upper\n bounds of the pseudo cells decomposition.\n \"\"\"\n\n def __init__(self, lower_idx: tf.Tensor, upper_idx: tf.Tensor):\n \"\"\"\n Construct bounded volumes.\n\n :param lower_idx: the lowerbounds index of the volumes\n :param upper_idx: the upperbounds index of the volumes\n \"\"\"\n\n tf.debugging.assert_shapes([(lower_idx, [\"N\", \"D\"]), (upper_idx, [\"N\", \"D\"])])\n self.lower_idx: Final[TensorType] = lower_idx\n self.upper_idx: Final[TensorType] = upper_idx\n\n\nclass Pareto:\n \"\"\"\n A :class:`Pareto` Construct a Pareto set.\n Stores a Pareto set and calculates the cell bounds covering the non-dominated region.\n The latter is needed for certain multiobjective acquisition functions.\n \"\"\"\n\n def __init__(self, observations: TensorType):\n \"\"\"\n :param observations: The observations for all objectives, with shape [N, 2].\n :raise ValueError (or InvalidArgumentError): If ``observations`` has an invalid shape.\n \"\"\"\n tf.debugging.assert_shapes([(observations, [None, 2])])\n\n pf, _ = non_dominated(observations)\n self.front: Final[TensorType] = tf.gather_nd(pf, tf.argsort(pf[:, :1], axis=0))\n self.bounds: Final[BoundedVolumes] = self._bounds_2d(self.front)\n\n @staticmethod\n def _bounds_2d(front: TensorType) -> BoundedVolumes:\n\n # this assumes the Pareto set has been sorted in ascending order on the first\n # objective, which implies the second objective is sorted in descending order\n len_front, number_of_objectives = front.shape\n\n pf_ext_idx = tf.concat(\n [\n tf.zeros([1, number_of_objectives], dtype=tf.int32),\n tf.argsort(front, axis=0) + 1,\n tf.ones([1, number_of_objectives], dtype=tf.int32) * len_front + 1,\n ],\n axis=0,\n )\n\n range_ = tf.range(len_front + 1)[:, None]\n lower = tf.concat([range_, tf.zeros_like(range_)], axis=-1)\n upper = tf.concat([range_ + 1, pf_ext_idx[::-1, 1:][: pf_ext_idx[-1, 0]]], axis=-1)\n\n return BoundedVolumes(lower, upper)\n", "path": "trieste/utils/pareto.py"}]} | 1,781 | 605 |
gh_patches_debug_20014 | rasdani/github-patches | git_diff | huggingface__optimum-1141 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unpin onnx version to allow using v1.14
### Feature request
Remove the version constraint on onnx package to allow using onnx==1.14.
### Motivation
- The latest version of onnxruntime (v1.15 at the time of writing) supports onnx==1.14.
- onnx==1.14 introduces support for protobuf v4, which is also useful.
### Your contribution
Seems removing the pin from setup.py would be all that is needed as long as the existing tests provide enough validation.
</issue>
<code>
[start of setup.py]
1 import re
2
3 from setuptools import find_namespace_packages, setup
4
5
6 # Ensure we match the version set in src/optimum/version.py
7 try:
8 filepath = "optimum/version.py"
9 with open(filepath) as version_file:
10 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
11 except Exception as error:
12 assert False, "Error: Could not open '%s' due %s\n" % (filepath, error)
13
14
15 REQUIRED_PKGS = [
16 "coloredlogs",
17 "sympy",
18 "transformers[sentencepiece]>=4.26.0",
19 "torch>=1.9",
20 "packaging",
21 "numpy",
22 "huggingface_hub>=0.8.0",
23 "datasets",
24 ]
25
26 TESTS_REQUIRE = [
27 "pytest",
28 "requests",
29 "parameterized",
30 "pytest-xdist",
31 "Pillow",
32 "sacremoses",
33 "torchvision",
34 "diffusers>=0.17.0",
35 "torchaudio",
36 ]
37
38 QUALITY_REQUIRE = ["black~=23.1", "ruff>=0.0.241,<=0.0.259"]
39
40 BENCHMARK_REQUIRE = ["optuna", "tqdm", "scikit-learn", "seqeval", "torchvision", "evaluate>=0.2.0"]
41
42 EXTRAS_REQUIRE = {
43 "onnxruntime": [
44 "onnx<1.14.0",
45 "onnxruntime>=1.9.0",
46 "datasets>=1.2.1",
47 "evaluate",
48 "protobuf>=3.20.1",
49 ],
50 "onnxruntime-gpu": [
51 "onnx<1.14.0",
52 "onnxruntime-gpu>=1.9.0",
53 "datasets>=1.2.1",
54 "evaluate",
55 "protobuf>=3.20.1",
56 ],
57 "exporters": ["onnx<1.14.0", "onnxruntime", "timm"],
58 "exporters-gpu": ["onnx<1.14.0", "onnxruntime-gpu", "timm"],
59 "exporters-tf": ["tensorflow>=2.4,<2.11", "tf2onnx", "onnx", "onnxruntime", "timm", "h5py", "numpy<1.24.0"],
60 "intel": "optimum-intel",
61 "openvino": "optimum-intel[openvino]",
62 "nncf": "optimum-intel[nncf]",
63 "neural-compressor": "optimum-intel[neural-compressor]",
64 "graphcore": "optimum-graphcore",
65 "habana": ["transformers<4.29.0", "optimum-habana"],
66 "neuron": "optimum-neuron[neuron]",
67 "neuronx": "optimum-neuron[neuronx]",
68 "dev": TESTS_REQUIRE + QUALITY_REQUIRE,
69 "tests": TESTS_REQUIRE,
70 "quality": QUALITY_REQUIRE,
71 "benchmark": BENCHMARK_REQUIRE,
72 }
73
74 setup(
75 name="optimum",
76 version=__version__,
77 description="Optimum Library is an extension of the Hugging Face Transformers library, providing a framework to "
78 "integrate third-party libraries from Hardware Partners and interface with their specific "
79 "functionality.",
80 long_description=open("README.md", "r", encoding="utf-8").read(),
81 long_description_content_type="text/markdown",
82 classifiers=[
83 "Development Status :: 5 - Production/Stable",
84 "License :: OSI Approved :: Apache Software License",
85 "Intended Audience :: Developers",
86 "Intended Audience :: Education",
87 "Intended Audience :: Science/Research",
88 "Operating System :: OS Independent",
89 "Programming Language :: Python :: 3.7",
90 "Programming Language :: Python :: 3.8",
91 "Programming Language :: Python :: 3.9",
92 "Topic :: Scientific/Engineering :: Artificial Intelligence",
93 ],
94 keywords="transformers, quantization, pruning, optimization, training, inference, onnx, onnx runtime, intel, "
95 "habana, graphcore, neural compressor, ipu, hpu",
96 url="https://github.com/huggingface/optimum",
97 author="HuggingFace Inc. Special Ops Team",
98 author_email="[email protected]",
99 license="Apache",
100 packages=find_namespace_packages(include=["optimum*"]),
101 install_requires=REQUIRED_PKGS,
102 extras_require=EXTRAS_REQUIRE,
103 python_requires=">=3.7.0",
104 include_package_data=True,
105 zip_safe=False,
106 entry_points={"console_scripts": ["optimum-cli=optimum.commands.optimum_cli:main"]},
107 )
108
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,21 +41,21 @@
EXTRAS_REQUIRE = {
"onnxruntime": [
- "onnx<1.14.0",
+ "onnx",
"onnxruntime>=1.9.0",
"datasets>=1.2.1",
"evaluate",
"protobuf>=3.20.1",
],
"onnxruntime-gpu": [
- "onnx<1.14.0",
+ "onnx",
"onnxruntime-gpu>=1.9.0",
"datasets>=1.2.1",
"evaluate",
"protobuf>=3.20.1",
],
- "exporters": ["onnx<1.14.0", "onnxruntime", "timm"],
- "exporters-gpu": ["onnx<1.14.0", "onnxruntime-gpu", "timm"],
+ "exporters": ["onnx", "onnxruntime", "timm"],
+ "exporters-gpu": ["onnx", "onnxruntime-gpu", "timm"],
"exporters-tf": ["tensorflow>=2.4,<2.11", "tf2onnx", "onnx", "onnxruntime", "timm", "h5py", "numpy<1.24.0"],
"intel": "optimum-intel",
"openvino": "optimum-intel[openvino]",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,21 +41,21 @@\n \n EXTRAS_REQUIRE = {\n \"onnxruntime\": [\n- \"onnx<1.14.0\",\n+ \"onnx\",\n \"onnxruntime>=1.9.0\",\n \"datasets>=1.2.1\",\n \"evaluate\",\n \"protobuf>=3.20.1\",\n ],\n \"onnxruntime-gpu\": [\n- \"onnx<1.14.0\",\n+ \"onnx\",\n \"onnxruntime-gpu>=1.9.0\",\n \"datasets>=1.2.1\",\n \"evaluate\",\n \"protobuf>=3.20.1\",\n ],\n- \"exporters\": [\"onnx<1.14.0\", \"onnxruntime\", \"timm\"],\n- \"exporters-gpu\": [\"onnx<1.14.0\", \"onnxruntime-gpu\", \"timm\"],\n+ \"exporters\": [\"onnx\", \"onnxruntime\", \"timm\"],\n+ \"exporters-gpu\": [\"onnx\", \"onnxruntime-gpu\", \"timm\"],\n \"exporters-tf\": [\"tensorflow>=2.4,<2.11\", \"tf2onnx\", \"onnx\", \"onnxruntime\", \"timm\", \"h5py\", \"numpy<1.24.0\"],\n \"intel\": \"optimum-intel\",\n \"openvino\": \"optimum-intel[openvino]\",\n", "issue": "Unpin onnx version to allow using v1.14\n### Feature request\r\n\r\nRemove the version constraint on onnx package to allow using onnx==1.14.\r\n\r\n### Motivation\r\n\r\n- The latest version of onnxruntime (v1.15 at the time of writing) supports onnx==1.14.\r\n- onnx==1.14 introduces support for protobuf v4, which is also useful.\r\n\r\n### Your contribution\r\n\r\nSeems removing the pin from setup.py would be all that is needed as long as the existing tests provide enough validation.\n", "before_files": [{"content": "import re\n\nfrom setuptools import find_namespace_packages, setup\n\n\n# Ensure we match the version set in src/optimum/version.py\ntry:\n filepath = \"optimum/version.py\"\n with open(filepath) as version_file:\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\nexcept Exception as error:\n assert False, \"Error: Could not open '%s' due %s\\n\" % (filepath, error)\n\n\nREQUIRED_PKGS = [\n \"coloredlogs\",\n \"sympy\",\n \"transformers[sentencepiece]>=4.26.0\",\n \"torch>=1.9\",\n \"packaging\",\n \"numpy\",\n \"huggingface_hub>=0.8.0\",\n \"datasets\",\n]\n\nTESTS_REQUIRE = [\n \"pytest\",\n \"requests\",\n \"parameterized\",\n \"pytest-xdist\",\n \"Pillow\",\n \"sacremoses\",\n \"torchvision\",\n \"diffusers>=0.17.0\",\n \"torchaudio\",\n]\n\nQUALITY_REQUIRE = [\"black~=23.1\", \"ruff>=0.0.241,<=0.0.259\"]\n\nBENCHMARK_REQUIRE = [\"optuna\", \"tqdm\", \"scikit-learn\", \"seqeval\", \"torchvision\", \"evaluate>=0.2.0\"]\n\nEXTRAS_REQUIRE = {\n \"onnxruntime\": [\n \"onnx<1.14.0\",\n \"onnxruntime>=1.9.0\",\n \"datasets>=1.2.1\",\n \"evaluate\",\n \"protobuf>=3.20.1\",\n ],\n \"onnxruntime-gpu\": [\n \"onnx<1.14.0\",\n \"onnxruntime-gpu>=1.9.0\",\n \"datasets>=1.2.1\",\n \"evaluate\",\n \"protobuf>=3.20.1\",\n ],\n \"exporters\": [\"onnx<1.14.0\", \"onnxruntime\", \"timm\"],\n \"exporters-gpu\": [\"onnx<1.14.0\", \"onnxruntime-gpu\", \"timm\"],\n \"exporters-tf\": [\"tensorflow>=2.4,<2.11\", \"tf2onnx\", \"onnx\", \"onnxruntime\", \"timm\", \"h5py\", \"numpy<1.24.0\"],\n \"intel\": \"optimum-intel\",\n \"openvino\": \"optimum-intel[openvino]\",\n \"nncf\": \"optimum-intel[nncf]\",\n \"neural-compressor\": \"optimum-intel[neural-compressor]\",\n \"graphcore\": \"optimum-graphcore\",\n \"habana\": [\"transformers<4.29.0\", \"optimum-habana\"],\n \"neuron\": \"optimum-neuron[neuron]\",\n \"neuronx\": \"optimum-neuron[neuronx]\",\n \"dev\": TESTS_REQUIRE + QUALITY_REQUIRE,\n \"tests\": TESTS_REQUIRE,\n \"quality\": QUALITY_REQUIRE,\n \"benchmark\": BENCHMARK_REQUIRE,\n}\n\nsetup(\n name=\"optimum\",\n version=__version__,\n description=\"Optimum Library is an extension of the Hugging Face Transformers library, providing a framework to \"\n \"integrate third-party libraries from Hardware Partners and interface with their specific \"\n \"functionality.\",\n long_description=open(\"README.md\", \"r\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"transformers, quantization, pruning, optimization, training, inference, onnx, onnx runtime, intel, \"\n \"habana, graphcore, neural compressor, ipu, hpu\",\n url=\"https://github.com/huggingface/optimum\",\n author=\"HuggingFace Inc. Special Ops Team\",\n author_email=\"[email protected]\",\n license=\"Apache\",\n packages=find_namespace_packages(include=[\"optimum*\"]),\n install_requires=REQUIRED_PKGS,\n extras_require=EXTRAS_REQUIRE,\n python_requires=\">=3.7.0\",\n include_package_data=True,\n zip_safe=False,\n entry_points={\"console_scripts\": [\"optimum-cli=optimum.commands.optimum_cli:main\"]},\n)\n", "path": "setup.py"}]} | 1,909 | 353 |
gh_patches_debug_6189 | rasdani/github-patches | git_diff | facebookresearch__hydra-594 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fish completion is not working
Fish completion does not seem to work:
```
$ python my_app.py -sc install=fish | source
$ python my_app.py <TAB> string split: Unknown option “-n”
- (line 2):
string split -n ' ' $COMP_LINE
^
in command substitution
called on line 0 of file -
in function “hydra_fish_completion”
called on standard input
in command substitution
called on standard input
stringstring - manipulate strings
-
Synopsis
string escape [(-n | --no-quoted)] [--style=xxx] [STRING...]
string join [(-q | --quiet)] SEP [STRING...]
string length [(-q | --quiet)] [STRING...]
string lower [(-q | --quiet)] [STRING...]
string match [(-a | --all)] [((-e | --entire)] [(-i | --ignore-case)] [(-r | --regex)]
[(-n | --index)] [(-q |
```
Fish completion is not working
Fish completion does not seem to work:
```
$ python my_app.py -sc install=fish | source
$ python my_app.py <TAB> string split: Unknown option “-n”
- (line 2):
string split -n ' ' $COMP_LINE
^
in command substitution
called on line 0 of file -
in function “hydra_fish_completion”
called on standard input
in command substitution
called on standard input
stringstring - manipulate strings
-
Synopsis
string escape [(-n | --no-quoted)] [--style=xxx] [STRING...]
string join [(-q | --quiet)] SEP [STRING...]
string length [(-q | --quiet)] [STRING...]
string lower [(-q | --quiet)] [STRING...]
string match [(-a | --all)] [((-e | --entire)] [(-i | --ignore-case)] [(-r | --regex)]
[(-n | --index)] [(-q |
```
</issue>
<code>
[start of hydra/_internal/core_plugins/fish_completion.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import logging
3 import os
4 import sys
5 from typing import List, Optional, Tuple
6
7 from hydra.plugins.completion_plugin import CompletionPlugin
8
9 log = logging.getLogger(__name__)
10
11
12 class FishCompletion(CompletionPlugin):
13 def install(self) -> None:
14 script = """function hydra_fish_completion
15 # Hydra will access COMP_LINE to generate completion candidates
16 set -lx COMP_LINE (commandline -cp)
17
18 # Find out how to call the underlying script
19 set -l parts (string split -n ' ' $COMP_LINE)
20 if test "$parts[1]" = "python" -o "$parts[1]" = "python3"
21 set cmd "$parts[1] $parts[2]"
22 if not grep -q "@hydra.main" $parts[2]
23 return
24 end
25 else
26 set cmd "$parts[1]"
27 end
28
29 # Generate candidates
30 eval "$cmd -sc query=fish"
31 end
32 """
33 output = self._get_exec()
34 reg_cmd = []
35 for name, cond in output:
36 reg_cmd.append(
37 f"complete -c {name} {cond}-x -a '(hydra_fish_completion)'\n"
38 )
39 print(script)
40 print("".join(reg_cmd))
41
42 def uninstall(self) -> None:
43 name = self._get_uninstall_exec()
44 print(f"complete -e -c {name}")
45 print("function hydra_fish_completion\nend")
46
47 @staticmethod
48 def provides() -> str:
49 return "fish"
50
51 def query(self, config_name: Optional[str]) -> None:
52 line = os.environ["COMP_LINE"]
53 line = self.strip_python_or_app_name(line)
54 print("\n".join(self._query(config_name=config_name, line=line)))
55
56 @staticmethod
57 def help(command: str) -> str:
58 assert command in ["install", "uninstall"]
59 return f"{{}} -sc {command}=fish | source"
60
61 @staticmethod
62 def _get_exec() -> List[Tuple[str, str]]:
63 # Running as an installed app (setuptools entry point)
64 output = []
65 # User scenario 1: python script.py
66 name = os.path.basename(sys.executable)
67 cond = f"-n '__fish_seen_subcommand_from {sys.argv[0]}' "
68 output.append((name, cond))
69
70 # User scenario 2: ./script.py or src/script.py or script.py
71 name = os.path.basename(sys.argv[0])
72 cond = ""
73 output.append((name, cond))
74
75 return output
76
77 @staticmethod
78 def _get_uninstall_exec() -> str:
79 name = os.path.basename(sys.argv[0])
80
81 return name
82
[end of hydra/_internal/core_plugins/fish_completion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hydra/_internal/core_plugins/fish_completion.py b/hydra/_internal/core_plugins/fish_completion.py
--- a/hydra/_internal/core_plugins/fish_completion.py
+++ b/hydra/_internal/core_plugins/fish_completion.py
@@ -16,7 +16,7 @@
set -lx COMP_LINE (commandline -cp)
# Find out how to call the underlying script
- set -l parts (string split -n ' ' $COMP_LINE)
+ set -l parts (commandline -cpo)
if test "$parts[1]" = "python" -o "$parts[1]" = "python3"
set cmd "$parts[1] $parts[2]"
if not grep -q "@hydra.main" $parts[2]
| {"golden_diff": "diff --git a/hydra/_internal/core_plugins/fish_completion.py b/hydra/_internal/core_plugins/fish_completion.py\n--- a/hydra/_internal/core_plugins/fish_completion.py\n+++ b/hydra/_internal/core_plugins/fish_completion.py\n@@ -16,7 +16,7 @@\n set -lx COMP_LINE (commandline -cp)\n \n # Find out how to call the underlying script\n- set -l parts (string split -n ' ' $COMP_LINE)\n+ set -l parts (commandline -cpo)\n if test \"$parts[1]\" = \"python\" -o \"$parts[1]\" = \"python3\"\n set cmd \"$parts[1] $parts[2]\"\n if not grep -q \"@hydra.main\" $parts[2]\n", "issue": "Fish completion is not working\nFish completion does not seem to work:\r\n\r\n```\r\n$ python my_app.py -sc install=fish | source\r\n$ python my_app.py <TAB> string split: Unknown option \u201c-n\u201d\r\n- (line 2): \r\nstring split -n ' ' $COMP_LINE\r\n^ \r\nin command substitution \r\n called on line 0 of file -\r\n \r\nin function \u201chydra_fish_completion\u201d\r\n called on standard input\r\n \r\nin command substitution\r\n called on standard input\r\n \r\n\r\n stringstring - manipulate strings\r\n - \r\n\r\n Synopsis \r\n string escape [(-n | --no-quoted)] [--style=xxx] [STRING...]\r\n string join [(-q | --quiet)] SEP [STRING...]\r\n string length [(-q | --quiet)] [STRING...]\r\n string lower [(-q | --quiet)] [STRING...]\r\n string match [(-a | --all)] [((-e | --entire)] [(-i | --ignore-case)] [(-r | --regex)]\r\n [(-n | --index)] [(-q |\r\n```\nFish completion is not working\nFish completion does not seem to work:\r\n\r\n```\r\n$ python my_app.py -sc install=fish | source\r\n$ python my_app.py <TAB> string split: Unknown option \u201c-n\u201d\r\n- (line 2): \r\nstring split -n ' ' $COMP_LINE\r\n^ \r\nin command substitution \r\n called on line 0 of file -\r\n \r\nin function \u201chydra_fish_completion\u201d\r\n called on standard input\r\n \r\nin command substitution\r\n called on standard input\r\n \r\n\r\n stringstring - manipulate strings\r\n - \r\n\r\n Synopsis \r\n string escape [(-n | --no-quoted)] [--style=xxx] [STRING...]\r\n string join [(-q | --quiet)] SEP [STRING...]\r\n string length [(-q | --quiet)] [STRING...]\r\n string lower [(-q | --quiet)] [STRING...]\r\n string match [(-a | --all)] [((-e | --entire)] [(-i | --ignore-case)] [(-r | --regex)]\r\n [(-n | --index)] [(-q |\r\n```\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nimport os\nimport sys\nfrom typing import List, Optional, Tuple\n\nfrom hydra.plugins.completion_plugin import CompletionPlugin\n\nlog = logging.getLogger(__name__)\n\n\nclass FishCompletion(CompletionPlugin):\n def install(self) -> None:\n script = \"\"\"function hydra_fish_completion\n # Hydra will access COMP_LINE to generate completion candidates\n set -lx COMP_LINE (commandline -cp)\n\n # Find out how to call the underlying script\n set -l parts (string split -n ' ' $COMP_LINE)\n if test \"$parts[1]\" = \"python\" -o \"$parts[1]\" = \"python3\"\n set cmd \"$parts[1] $parts[2]\"\n if not grep -q \"@hydra.main\" $parts[2]\n return\n end\n else\n set cmd \"$parts[1]\"\n end\n\n # Generate candidates\n eval \"$cmd -sc query=fish\"\nend\n \"\"\"\n output = self._get_exec()\n reg_cmd = []\n for name, cond in output:\n reg_cmd.append(\n f\"complete -c {name} {cond}-x -a '(hydra_fish_completion)'\\n\"\n )\n print(script)\n print(\"\".join(reg_cmd))\n\n def uninstall(self) -> None:\n name = self._get_uninstall_exec()\n print(f\"complete -e -c {name}\")\n print(\"function hydra_fish_completion\\nend\")\n\n @staticmethod\n def provides() -> str:\n return \"fish\"\n\n def query(self, config_name: Optional[str]) -> None:\n line = os.environ[\"COMP_LINE\"]\n line = self.strip_python_or_app_name(line)\n print(\"\\n\".join(self._query(config_name=config_name, line=line)))\n\n @staticmethod\n def help(command: str) -> str:\n assert command in [\"install\", \"uninstall\"]\n return f\"{{}} -sc {command}=fish | source\"\n\n @staticmethod\n def _get_exec() -> List[Tuple[str, str]]:\n # Running as an installed app (setuptools entry point)\n output = []\n # User scenario 1: python script.py\n name = os.path.basename(sys.executable)\n cond = f\"-n '__fish_seen_subcommand_from {sys.argv[0]}' \"\n output.append((name, cond))\n\n # User scenario 2: ./script.py or src/script.py or script.py\n name = os.path.basename(sys.argv[0])\n cond = \"\"\n output.append((name, cond))\n\n return output\n\n @staticmethod\n def _get_uninstall_exec() -> str:\n name = os.path.basename(sys.argv[0])\n\n return name\n", "path": "hydra/_internal/core_plugins/fish_completion.py"}]} | 1,789 | 176 |
gh_patches_debug_29530 | rasdani/github-patches | git_diff | Flexget__Flexget-1667 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adding new column to `plugins` output
I'm trying to understand the concepts of 'phase' and 'interface' of a plugin. The output of the CLI command `flexget plugins` has been helpful. But I think I noticed that the output is missing a column. It has 'Keyword', 'Phases' and 'Flags', but no 'Interfaces'. I found out that all plugins _do_ defined a list of interfaces for themselves.
Shall I create a PR adding the column 'Interfaces' to the output?
I wanted to ask before I put the effort in.
And if someone can explain what a 'phase' and 'interface' is I'll write up a wiki page. ;)
</issue>
<code>
[start of flexget/plugins/cli/plugins.py]
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import logging
5
6 from colorclass.toggles import disable_all_colors
7 from flexget import options
8 from flexget.event import event
9 from flexget.plugin import get_plugins
10 from flexget.terminal import TerminalTable, TerminalTableError, table_parser, console, colorize
11
12 log = logging.getLogger('plugins')
13
14
15 def plugins_summary(manager, options):
16 if options.table_type == 'porcelain':
17 disable_all_colors()
18 header = ['Keyword', 'Phases', 'Flags']
19 table_data = [header]
20 for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)):
21 if options.builtins and not plugin.builtin:
22 continue
23 flags = []
24 if plugin.instance.__doc__:
25 flags.append('doc')
26 if plugin.builtin:
27 flags.append('builtin')
28 if plugin.debug:
29 if not options.debug:
30 continue
31 flags.append('developers')
32 handlers = plugin.phase_handlers
33 roles = []
34 for phase in handlers:
35 priority = handlers[phase].priority
36 roles.append('{0}({1})'.format(phase, priority))
37
38 name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name
39 table_data.append([name, ', '.join(roles), ', '.join(flags)])
40
41 try:
42 table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])
43 console(table.output)
44 except TerminalTableError as e:
45 console('ERROR: %s' % str(e))
46 return
47 console(colorize('green', ' Built-in plugins'))
48
49
50 @event('options.register')
51 def register_parser_arguments():
52 parser = options.register_command('plugins', plugins_summary, help='Print registered plugin summaries',
53 parents=[table_parser])
54 parser.add_argument('--interface', help='Show plugins belonging to this interface')
55 parser.add_argument('--phase', help='Show plugins that act on this phase')
56 parser.add_argument('--builtins', action='store_true', help='Show just builtin plugins')
57
[end of flexget/plugins/cli/plugins.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/plugins/cli/plugins.py b/flexget/plugins/cli/plugins.py
--- a/flexget/plugins/cli/plugins.py
+++ b/flexget/plugins/cli/plugins.py
@@ -15,11 +15,12 @@
def plugins_summary(manager, options):
if options.table_type == 'porcelain':
disable_all_colors()
- header = ['Keyword', 'Phases', 'Flags']
+ header = ['Keyword', 'Interfaces', 'Phases', 'Flags']
table_data = [header]
for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)):
if options.builtins and not plugin.builtin:
continue
+
flags = []
if plugin.instance.__doc__:
flags.append('doc')
@@ -29,6 +30,7 @@
if not options.debug:
continue
flags.append('developers')
+
handlers = plugin.phase_handlers
roles = []
for phase in handlers:
@@ -36,7 +38,7 @@
roles.append('{0}({1})'.format(phase, priority))
name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name
- table_data.append([name, ', '.join(roles), ', '.join(flags)])
+ table_data.append([name, ', '.join(plugin.interfaces), ', '.join(roles), ', '.join(flags)])
try:
table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])
| {"golden_diff": "diff --git a/flexget/plugins/cli/plugins.py b/flexget/plugins/cli/plugins.py\n--- a/flexget/plugins/cli/plugins.py\n+++ b/flexget/plugins/cli/plugins.py\n@@ -15,11 +15,12 @@\n def plugins_summary(manager, options):\n if options.table_type == 'porcelain':\n disable_all_colors()\n- header = ['Keyword', 'Phases', 'Flags']\n+ header = ['Keyword', 'Interfaces', 'Phases', 'Flags']\n table_data = [header]\n for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)):\n if options.builtins and not plugin.builtin:\n continue\n+\n flags = []\n if plugin.instance.__doc__:\n flags.append('doc')\n@@ -29,6 +30,7 @@\n if not options.debug:\n continue\n flags.append('developers')\n+\n handlers = plugin.phase_handlers\n roles = []\n for phase in handlers:\n@@ -36,7 +38,7 @@\n roles.append('{0}({1})'.format(phase, priority))\n \n name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name\n- table_data.append([name, ', '.join(roles), ', '.join(flags)])\n+ table_data.append([name, ', '.join(plugin.interfaces), ', '.join(roles), ', '.join(flags)])\n \n try:\n table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])\n", "issue": "Adding new column to `plugins` output\nI'm trying to understand the concepts of 'phase' and 'interface' of a plugin. The output of the CLI command `flexget plugins` has been helpful. But I think I noticed that the output is missing a column. It has 'Keyword', 'Phases' and 'Flags', but no 'Interfaces'. I found out that all plugins _do_ defined a list of interfaces for themselves. \r\n\r\nShall I create a PR adding the column 'Interfaces' to the output?\r\nI wanted to ask before I put the effort in.\r\n\r\nAnd if someone can explain what a 'phase' and 'interface' is I'll write up a wiki page. ;)\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nfrom colorclass.toggles import disable_all_colors\nfrom flexget import options\nfrom flexget.event import event\nfrom flexget.plugin import get_plugins\nfrom flexget.terminal import TerminalTable, TerminalTableError, table_parser, console, colorize\n\nlog = logging.getLogger('plugins')\n\n\ndef plugins_summary(manager, options):\n if options.table_type == 'porcelain':\n disable_all_colors()\n header = ['Keyword', 'Phases', 'Flags']\n table_data = [header]\n for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)):\n if options.builtins and not plugin.builtin:\n continue\n flags = []\n if plugin.instance.__doc__:\n flags.append('doc')\n if plugin.builtin:\n flags.append('builtin')\n if plugin.debug:\n if not options.debug:\n continue\n flags.append('developers')\n handlers = plugin.phase_handlers\n roles = []\n for phase in handlers:\n priority = handlers[phase].priority\n roles.append('{0}({1})'.format(phase, priority))\n\n name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name\n table_data.append([name, ', '.join(roles), ', '.join(flags)])\n\n try:\n table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])\n console(table.output)\n except TerminalTableError as e:\n console('ERROR: %s' % str(e))\n return\n console(colorize('green', ' Built-in plugins'))\n\n\n@event('options.register')\ndef register_parser_arguments():\n parser = options.register_command('plugins', plugins_summary, help='Print registered plugin summaries',\n parents=[table_parser])\n parser.add_argument('--interface', help='Show plugins belonging to this interface')\n parser.add_argument('--phase', help='Show plugins that act on this phase')\n parser.add_argument('--builtins', action='store_true', help='Show just builtin plugins')\n", "path": "flexget/plugins/cli/plugins.py"}]} | 1,244 | 326 |
gh_patches_debug_5568 | rasdani/github-patches | git_diff | pyodide__pyodide-689 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo?
Wondering what "wv" means or if this should read "more complex things."
https://github.com/iodide-project/pyodide/blob/163ab43b64180223d010cdcdcdecd17307cc5a45/pyodide_build/mkpkg.py#L77-L79
</issue>
<code>
[start of pyodide_build/mkpkg.py]
1 #!/usr/bin/env python3
2
3 import argparse
4 import json
5 import os
6 import shutil
7 import urllib.request
8 from pathlib import Path
9
10 PACKAGES_ROOT = Path(__file__).parent.parent / 'packages'
11
12 SDIST_EXTENSIONS = []
13
14
15 def get_sdist_extensions():
16 if SDIST_EXTENSIONS:
17 return SDIST_EXTENSIONS
18
19 for format in shutil.get_unpack_formats():
20 for ext in format[1]:
21 SDIST_EXTENSIONS.append(ext)
22
23 return SDIST_EXTENSIONS
24
25
26 def get_sdist_url_entry(json_content):
27 sdist_extensions_tuple = tuple(get_sdist_extensions())
28
29 for entry in json_content['urls']:
30 if entry['filename'].endswith(sdist_extensions_tuple):
31 return entry
32
33 raise Exception('No sdist URL found for package %s (%s)' % (
34 json_content['info'].get('name'),
35 json_content['info'].get('package_url'),
36 ))
37
38
39 def make_package(package, version=None):
40 import yaml
41
42 version = ('/' + version) if version is not None else ''
43 url = f"https://pypi.org/pypi/{package}{version}/json"
44
45 with urllib.request.urlopen(url) as fd:
46 json_content = json.load(fd)
47
48 entry = get_sdist_url_entry(json_content)
49 download_url = entry['url']
50 sha256 = entry['digests']['sha256']
51 version = json_content['info']['version']
52
53 yaml_content = {
54 'package': {
55 'name': package,
56 'version': version
57 },
58 'source': {
59 'url': download_url,
60 'sha256': sha256
61 },
62 'test': {
63 'imports': [
64 package
65 ]
66 }
67 }
68
69 if not (PACKAGES_ROOT / package).is_dir():
70 os.makedirs(PACKAGES_ROOT / package)
71 with open(PACKAGES_ROOT / package / 'meta.yaml', 'w') as fd:
72 yaml.dump(yaml_content, fd, default_flow_style=False)
73
74
75 def make_parser(parser):
76 parser.description = '''
77 Make a new pyodide package. Creates a simple template that will work
78 for most pure Python packages, but will have to be edited for more wv
79 complex things.'''.strip()
80 parser.add_argument(
81 'package', type=str, nargs=1,
82 help="The package name on PyPI")
83 parser.add_argument(
84 '--version', type=str, default=None,
85 help="Package version string, "
86 "e.g. v1.2.1 (defaults to latest stable release)")
87 return parser
88
89
90 def main(args):
91 package = args.package[0]
92 make_package(package, args.version)
93
94
95 if __name__ == '__main__':
96 parser = make_parser(argparse.ArgumentParser())
97 args = parser.parse_args()
98 main(args)
99
[end of pyodide_build/mkpkg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyodide_build/mkpkg.py b/pyodide_build/mkpkg.py
--- a/pyodide_build/mkpkg.py
+++ b/pyodide_build/mkpkg.py
@@ -75,7 +75,7 @@
def make_parser(parser):
parser.description = '''
Make a new pyodide package. Creates a simple template that will work
-for most pure Python packages, but will have to be edited for more wv
+for most pure Python packages, but will have to be edited for more
complex things.'''.strip()
parser.add_argument(
'package', type=str, nargs=1,
| {"golden_diff": "diff --git a/pyodide_build/mkpkg.py b/pyodide_build/mkpkg.py\n--- a/pyodide_build/mkpkg.py\n+++ b/pyodide_build/mkpkg.py\n@@ -75,7 +75,7 @@\n def make_parser(parser):\n parser.description = '''\n Make a new pyodide package. Creates a simple template that will work\n-for most pure Python packages, but will have to be edited for more wv\n+for most pure Python packages, but will have to be edited for more\n complex things.'''.strip()\n parser.add_argument(\n 'package', type=str, nargs=1,\n", "issue": "Typo?\nWondering what \"wv\" means or if this should read \"more complex things.\"\r\n\r\nhttps://github.com/iodide-project/pyodide/blob/163ab43b64180223d010cdcdcdecd17307cc5a45/pyodide_build/mkpkg.py#L77-L79\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\nimport shutil\nimport urllib.request\nfrom pathlib import Path\n\nPACKAGES_ROOT = Path(__file__).parent.parent / 'packages'\n\nSDIST_EXTENSIONS = []\n\n\ndef get_sdist_extensions():\n if SDIST_EXTENSIONS:\n return SDIST_EXTENSIONS\n\n for format in shutil.get_unpack_formats():\n for ext in format[1]:\n SDIST_EXTENSIONS.append(ext)\n\n return SDIST_EXTENSIONS\n\n\ndef get_sdist_url_entry(json_content):\n sdist_extensions_tuple = tuple(get_sdist_extensions())\n\n for entry in json_content['urls']:\n if entry['filename'].endswith(sdist_extensions_tuple):\n return entry\n\n raise Exception('No sdist URL found for package %s (%s)' % (\n json_content['info'].get('name'),\n json_content['info'].get('package_url'),\n ))\n\n\ndef make_package(package, version=None):\n import yaml\n\n version = ('/' + version) if version is not None else ''\n url = f\"https://pypi.org/pypi/{package}{version}/json\"\n\n with urllib.request.urlopen(url) as fd:\n json_content = json.load(fd)\n\n entry = get_sdist_url_entry(json_content)\n download_url = entry['url']\n sha256 = entry['digests']['sha256']\n version = json_content['info']['version']\n\n yaml_content = {\n 'package': {\n 'name': package,\n 'version': version\n },\n 'source': {\n 'url': download_url,\n 'sha256': sha256\n },\n 'test': {\n 'imports': [\n package\n ]\n }\n }\n\n if not (PACKAGES_ROOT / package).is_dir():\n os.makedirs(PACKAGES_ROOT / package)\n with open(PACKAGES_ROOT / package / 'meta.yaml', 'w') as fd:\n yaml.dump(yaml_content, fd, default_flow_style=False)\n\n\ndef make_parser(parser):\n parser.description = '''\nMake a new pyodide package. Creates a simple template that will work\nfor most pure Python packages, but will have to be edited for more wv\ncomplex things.'''.strip()\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"The package name on PyPI\")\n parser.add_argument(\n '--version', type=str, default=None,\n help=\"Package version string, \"\n \"e.g. v1.2.1 (defaults to latest stable release)\")\n return parser\n\n\ndef main(args):\n package = args.package[0]\n make_package(package, args.version)\n\n\nif __name__ == '__main__':\n parser = make_parser(argparse.ArgumentParser())\n args = parser.parse_args()\n main(args)\n", "path": "pyodide_build/mkpkg.py"}]} | 1,425 | 138 |
gh_patches_debug_30846 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-1877 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider tmobile_us is broken
During the global build at 2021-05-26-14-42-23, spider **tmobile_us** failed with **7356 features** and **2 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tmobile_us.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson))
</issue>
<code>
[start of locations/spiders/tmobile_us.py]
1 # -*- coding: utf-8 -*-
2 import json
3 from urllib.parse import urlencode
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10 DAY_MAPPING = {'Monday': 'Mo',
11 'Tuesday': 'Tu',
12 'Wednesday': 'We',
13 'Thursday': 'Th',
14 'Friday': 'Fr',
15 'Saturday': 'Sa',
16 'Sunday': 'Su'}
17
18 BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'
19
20
21 class TMobileUSSpider(scrapy.Spider):
22 name = "tmobile_us"
23 item_attributes = { 'brand': "T-Mobile" }
24 allowed_domains = ["www.t-mobile.com"]
25 download_delay = 0.2
26
27 def parse_hours(self, store_hours):
28 opening_hours = OpeningHours()
29 if store_hours is None:
30 return
31
32 for store_day in store_hours:
33 day = DAY_MAPPING[store_day.get("day")]
34 open_time = store_day.get("opens")
35 close_time = store_day.get("closes")
36 if open_time is None and close_time is None:
37 continue
38 opening_hours.add_range(day=day,
39 open_time=open_time,
40 close_time=close_time,
41 time_format='%H:%M'
42 )
43
44 return opening_hours.as_opening_hours()
45
46 def start_requests(self):
47 url = BASE_URL
48
49 with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:
50
51 next(points) # Ignore the header
52 for point in points:
53 _, lat, lon = point.strip().split(',')
54
55 params = {
56 'latitude': '{}'.format(lat),
57 'longitude': '{}'.format(lon),
58 'count': '1000',
59 'radius': '25',
60 'ignoreLoadingBar': 'false'
61 }
62
63 yield scrapy.http.Request(url + urlencode(params), callback=self.parse)
64
65 def parse(self, response):
66 data = json.loads(response.body_as_unicode())
67
68 for store in data:
69 properties = {
70 'name': store["name"],
71 'ref': store["id"],
72 'addr_full': store["location"]["address"]["streetAddress"],
73 'city': store["location"]["address"]["addressLocality"],
74 'state': store["location"]["address"]["addressRegion"],
75 'postcode': store["location"]["address"]["postalCode"],
76 'phone': store.get("telephone"),
77 'website': store.get("url") or response.url,
78 'lat': float(store["location"]["latitude"]),
79 'lon': float(store["location"]["longitude"]),
80 }
81
82 hours = self.parse_hours(store.get("hours", []))
83 if hours:
84 properties["opening_hours"] = hours
85
86 yield GeojsonPointItem(**properties)
87
[end of locations/spiders/tmobile_us.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/tmobile_us.py b/locations/spiders/tmobile_us.py
--- a/locations/spiders/tmobile_us.py
+++ b/locations/spiders/tmobile_us.py
@@ -15,12 +15,12 @@
'Saturday': 'Sa',
'Sunday': 'Su'}
-BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'
+BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'
class TMobileUSSpider(scrapy.Spider):
name = "tmobile_us"
- item_attributes = { 'brand': "T-Mobile" }
+ item_attributes = {'brand': "T-Mobile"}
allowed_domains = ["www.t-mobile.com"]
download_delay = 0.2
@@ -67,7 +67,7 @@
for store in data:
properties = {
- 'name': store["name"],
+ 'name': store.get("name"),
'ref': store["id"],
'addr_full': store["location"]["address"]["streetAddress"],
'city': store["location"]["address"]["addressLocality"],
@@ -75,8 +75,8 @@
'postcode': store["location"]["address"]["postalCode"],
'phone': store.get("telephone"),
'website': store.get("url") or response.url,
- 'lat': float(store["location"]["latitude"]),
- 'lon': float(store["location"]["longitude"]),
+ 'lat': store["location"]["latitude"],
+ 'lon': store["location"]["longitude"],
}
hours = self.parse_hours(store.get("hours", []))
| {"golden_diff": "diff --git a/locations/spiders/tmobile_us.py b/locations/spiders/tmobile_us.py\n--- a/locations/spiders/tmobile_us.py\n+++ b/locations/spiders/tmobile_us.py\n@@ -15,12 +15,12 @@\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n \n-BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'\n+BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'\n \n \n class TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n- item_attributes = { 'brand': \"T-Mobile\" }\n+ item_attributes = {'brand': \"T-Mobile\"}\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n \n@@ -67,7 +67,7 @@\n \n for store in data:\n properties = {\n- 'name': store[\"name\"],\n+ 'name': store.get(\"name\"),\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n@@ -75,8 +75,8 @@\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n- 'lat': float(store[\"location\"][\"latitude\"]),\n- 'lon': float(store[\"location\"][\"longitude\"]),\n+ 'lat': store[\"location\"][\"latitude\"],\n+ 'lon': store[\"location\"][\"longitude\"],\n }\n \n hours = self.parse_hours(store.get(\"hours\", []))\n", "issue": "Spider tmobile_us is broken\nDuring the global build at 2021-05-26-14-42-23, spider **tmobile_us** failed with **7356 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tmobile_us.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom urllib.parse import urlencode\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n\nBASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'\n\n\nclass TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n item_attributes = { 'brand': \"T-Mobile\" }\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n if store_hours is None:\n return\n\n for store_day in store_hours:\n day = DAY_MAPPING[store_day.get(\"day\")]\n open_time = store_day.get(\"opens\")\n close_time = store_day.get(\"closes\")\n if open_time is None and close_time is None:\n continue\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H:%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def start_requests(self):\n url = BASE_URL\n\n with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:\n\n next(points) # Ignore the header\n for point in points:\n _, lat, lon = point.strip().split(',')\n\n params = {\n 'latitude': '{}'.format(lat),\n 'longitude': '{}'.format(lon),\n 'count': '1000',\n 'radius': '25',\n 'ignoreLoadingBar': 'false'\n }\n\n yield scrapy.http.Request(url + urlencode(params), callback=self.parse)\n\n def parse(self, response):\n data = json.loads(response.body_as_unicode())\n\n for store in data:\n properties = {\n 'name': store[\"name\"],\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n 'state': store[\"location\"][\"address\"][\"addressRegion\"],\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n 'lat': float(store[\"location\"][\"latitude\"]),\n 'lon': float(store[\"location\"][\"longitude\"]),\n }\n\n hours = self.parse_hours(store.get(\"hours\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tmobile_us.py"}]} | 1,506 | 393 |
gh_patches_debug_44285 | rasdani/github-patches | git_diff | lisa-lab__pylearn2-579 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update default training algorithm to support multiple monitoring datasets
Just follow SGD or BGD for an example
</issue>
<code>
[start of pylearn2/training_algorithms/default.py]
1 """
2 .. todo::
3
4 WRITEME
5 """
6 from pylearn2.monitor import Monitor
7 from pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm
8 from pylearn2.utils import safe_zip
9 from pylearn2.utils.data_specs import DataSpecsMapping
10 import theano.tensor as T
11
12
13 class DefaultTrainingAlgorithm(TrainingAlgorithm):
14 """
15 .. todo::
16
17 WRITEME
18 """
19 def __init__(self, batch_size=None, batches_per_iter=1000,
20 monitoring_batches=-1, monitoring_dataset=None,
21 termination_criterion=None):
22 """
23 Parameters
24 ----------
25 batch_size : int
26 If batch_size is None, reverts to the `force_batch_size` field of \
27 the model
28 batches_per_iter : int
29 WRITEME
30 monitoring_batches : int
31 WRITEME
32 monitoring_dataset : WRITEME
33 termination_criterion : WRITEME
34 If specified, can cause the algorithm to terminate before \
35 `model.learn_batch` says to
36 """
37 self.batch_size, self.batches_per_iter = batch_size, batches_per_iter
38 if monitoring_dataset is None:
39 assert monitoring_batches == -1
40 self.monitoring_dataset = monitoring_dataset
41 self.monitoring_batches = monitoring_batches
42 self.bSetup = False
43 self.termination_criterion = termination_criterion
44
45 def setup(self, model, dataset):
46 """
47 Allows the training algorithm to do some preliminary configuration
48 *before* we actually start training the model. The dataset is provided
49 in case other derived training algorithms need to modify model based on
50 the dataset.
51
52 Parameters
53 ----------
54 model : object
55 Python object representing the model to train loosely \
56 implementing the interface of models.model.Model.
57
58 dataset : pylearn2.datasets.dataset.Dataset
59 Dataset object used to draw training data
60 """
61 self.model = model
62
63 self.monitor = Monitor.get_monitor(model)
64
65 if self.monitoring_dataset is not None:
66 # Get the data specifications needed by the model
67 space, source = model.get_monitoring_data_specs()
68
69 # Create Theano variables for each of the individual components
70 # of that data. Usually, it will be X for inputs and Y for targets.
71 # First, we need to find these components, and put them in a tuple
72 mapping = DataSpecsMapping((space, source))
73 space_tuple = mapping.flatten(space, return_tuple=True)
74 source_tuple = mapping.flatten(source, return_tuple=True)
75 # Then, build a flat tuple of these Theano variables
76 ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)
77 for (sp, src) in safe_zip(space_tuple, source_tuple))
78 # Finally, organize them back into a structure expected by the
79 # monitoring channels of the model
80 nested_ipt = mapping.nest(ipt)
81
82 self.monitor.add_dataset(dataset=self.monitoring_dataset,
83 mode="sequential",
84 batch_size=self.batch_size,
85 num_batches=self.monitoring_batches)
86
87 channels = model.get_monitoring_channels(nested_ipt)
88 if not isinstance(channels, dict):
89 raise TypeError("model.get_monitoring_channels must return a "
90 "dictionary, but it returned " + str(channels))
91 for name in channels:
92 J = channels[name]
93 if isinstance(J, tuple):
94 assert len(J) == 2
95 J, prereqs = J
96 else:
97 prereqs = None
98
99 self.monitor.add_channel(name=name,
100 ipt=nested_ipt,
101 val=J,
102 prereqs=prereqs,
103 data_specs=(space, source))
104 self.first = True
105 self.bSetup = True
106
107 def train(self, dataset):
108 """
109 .. todo::
110
111 WRITEME
112 """
113 assert self.bSetup
114 model = self.model
115 if self.batch_size is None:
116 batch_size = model.force_batch_size
117 else:
118 batch_size = self.batch_size
119 if hasattr(model, 'force_batch_size'):
120 assert (model.force_batch_size <= 0 or batch_size ==
121 model.force_batch_size)
122
123 for i in xrange(self.batches_per_iter):
124 # model.train_batch and self.train both return False when training
125 # should terminate.
126 learn_more = model.train_batch(dataset, batch_size)
127 model.monitor.report_batch(batch_size)
128 if not learn_more:
129 break
130
131 # Make sure we didn't exit training loop because Model.learn
132 # hasn't been updated to new interface yet.
133 if learn_more not in [True,False]:
134 msg = ('The learn method of model %s did not return a boolean ' +
135 'value. Please update your model accordingly.')
136 raise ValueError(msg % str(model))
137 self.learn_more = learn_more
138
139 def continue_learning(self, model):
140 """
141 .. todo::
142
143 WRITEME
144 """
145 if self.learn_more:
146 if self.termination_criterion is not None:
147 return self.termination_criterion.continue_learning(model)
148 return True
149 return False
150
[end of pylearn2/training_algorithms/default.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pylearn2/training_algorithms/default.py b/pylearn2/training_algorithms/default.py
--- a/pylearn2/training_algorithms/default.py
+++ b/pylearn2/training_algorithms/default.py
@@ -29,7 +29,8 @@
WRITEME
monitoring_batches : int
WRITEME
- monitoring_dataset : WRITEME
+ monitoring_dataset: Dataset or dict
+ A Dataset or a dictionary mapping string dataset names to Datasets
termination_criterion : WRITEME
If specified, can cause the algorithm to terminate before \
`model.learn_batch` says to
@@ -37,7 +38,8 @@
self.batch_size, self.batches_per_iter = batch_size, batches_per_iter
if monitoring_dataset is None:
assert monitoring_batches == -1
- self.monitoring_dataset = monitoring_dataset
+
+ self._set_monitoring_dataset(monitoring_dataset)
self.monitoring_batches = monitoring_batches
self.bSetup = False
self.termination_criterion = termination_criterion
@@ -74,33 +76,42 @@
source_tuple = mapping.flatten(source, return_tuple=True)
# Then, build a flat tuple of these Theano variables
ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)
- for (sp, src) in safe_zip(space_tuple, source_tuple))
+ for (sp, src) in safe_zip(space_tuple, source_tuple))
# Finally, organize them back into a structure expected by the
# monitoring channels of the model
nested_ipt = mapping.nest(ipt)
- self.monitor.add_dataset(dataset=self.monitoring_dataset,
- mode="sequential",
- batch_size=self.batch_size,
- num_batches=self.monitoring_batches)
-
channels = model.get_monitoring_channels(nested_ipt)
if not isinstance(channels, dict):
raise TypeError("model.get_monitoring_channels must return a "
"dictionary, but it returned " + str(channels))
- for name in channels:
- J = channels[name]
- if isinstance(J, tuple):
- assert len(J) == 2
- J, prereqs = J
+
+ for dataset_name in self.monitoring_dataset:
+ if dataset_name == '':
+ prefix = ''
else:
- prereqs = None
+ prefix = dataset_name + '_'
+ monitoring_dataset = self.monitoring_dataset[dataset_name]
+
+ self.monitor.add_dataset(dataset=monitoring_dataset,
+ mode="sequential",
+ batch_size=self.batch_size)
+
+ for name in channels:
+ J = channels[name]
+ if isinstance(J, tuple):
+ assert len(J) == 2
+ J, prereqs = J
+ else:
+ prereqs = None
+
+ self.monitor.add_channel(name=prefix + name,
+ ipt=nested_ipt,
+ val=J,
+ dataset=monitoring_dataset,
+ prereqs=prereqs,
+ data_specs=(space, source))
- self.monitor.add_channel(name=name,
- ipt=nested_ipt,
- val=J,
- prereqs=prereqs,
- data_specs=(space, source))
self.first = True
self.bSetup = True
@@ -130,7 +141,7 @@
# Make sure we didn't exit training loop because Model.learn
# hasn't been updated to new interface yet.
- if learn_more not in [True,False]:
+ if learn_more not in [True, False]:
msg = ('The learn method of model %s did not return a boolean ' +
'value. Please update your model accordingly.')
raise ValueError(msg % str(model))
| {"golden_diff": "diff --git a/pylearn2/training_algorithms/default.py b/pylearn2/training_algorithms/default.py\n--- a/pylearn2/training_algorithms/default.py\n+++ b/pylearn2/training_algorithms/default.py\n@@ -29,7 +29,8 @@\n WRITEME\n monitoring_batches : int\n WRITEME\n- monitoring_dataset : WRITEME\n+ monitoring_dataset: Dataset or dict\n+ A Dataset or a dictionary mapping string dataset names to Datasets\n termination_criterion : WRITEME\n If specified, can cause the algorithm to terminate before \\\n `model.learn_batch` says to\n@@ -37,7 +38,8 @@\n self.batch_size, self.batches_per_iter = batch_size, batches_per_iter\n if monitoring_dataset is None:\n assert monitoring_batches == -1\n- self.monitoring_dataset = monitoring_dataset\n+\n+ self._set_monitoring_dataset(monitoring_dataset)\n self.monitoring_batches = monitoring_batches\n self.bSetup = False\n self.termination_criterion = termination_criterion\n@@ -74,33 +76,42 @@\n source_tuple = mapping.flatten(source, return_tuple=True)\n # Then, build a flat tuple of these Theano variables\n ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)\n- for (sp, src) in safe_zip(space_tuple, source_tuple))\n+ for (sp, src) in safe_zip(space_tuple, source_tuple))\n # Finally, organize them back into a structure expected by the\n # monitoring channels of the model\n nested_ipt = mapping.nest(ipt)\n \n- self.monitor.add_dataset(dataset=self.monitoring_dataset,\n- mode=\"sequential\",\n- batch_size=self.batch_size,\n- num_batches=self.monitoring_batches)\n-\n channels = model.get_monitoring_channels(nested_ipt)\n if not isinstance(channels, dict):\n raise TypeError(\"model.get_monitoring_channels must return a \"\n \"dictionary, but it returned \" + str(channels))\n- for name in channels:\n- J = channels[name]\n- if isinstance(J, tuple):\n- assert len(J) == 2\n- J, prereqs = J\n+\n+ for dataset_name in self.monitoring_dataset:\n+ if dataset_name == '':\n+ prefix = ''\n else:\n- prereqs = None\n+ prefix = dataset_name + '_'\n+ monitoring_dataset = self.monitoring_dataset[dataset_name]\n+\n+ self.monitor.add_dataset(dataset=monitoring_dataset,\n+ mode=\"sequential\",\n+ batch_size=self.batch_size)\n+\n+ for name in channels:\n+ J = channels[name]\n+ if isinstance(J, tuple):\n+ assert len(J) == 2\n+ J, prereqs = J\n+ else:\n+ prereqs = None\n+\n+ self.monitor.add_channel(name=prefix + name,\n+ ipt=nested_ipt,\n+ val=J,\n+ dataset=monitoring_dataset,\n+ prereqs=prereqs,\n+ data_specs=(space, source))\n \n- self.monitor.add_channel(name=name,\n- ipt=nested_ipt,\n- val=J,\n- prereqs=prereqs,\n- data_specs=(space, source))\n self.first = True\n self.bSetup = True\n \n@@ -130,7 +141,7 @@\n \n # Make sure we didn't exit training loop because Model.learn\n # hasn't been updated to new interface yet.\n- if learn_more not in [True,False]:\n+ if learn_more not in [True, False]:\n msg = ('The learn method of model %s did not return a boolean ' +\n 'value. Please update your model accordingly.')\n raise ValueError(msg % str(model))\n", "issue": "Update default training algorithm to support multiple monitoring datasets\nJust follow SGD or BGD for an example\n\n", "before_files": [{"content": "\"\"\"\n.. todo::\n\n WRITEME\n\"\"\"\nfrom pylearn2.monitor import Monitor\nfrom pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm\nfrom pylearn2.utils import safe_zip\nfrom pylearn2.utils.data_specs import DataSpecsMapping\nimport theano.tensor as T\n\n\nclass DefaultTrainingAlgorithm(TrainingAlgorithm):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n def __init__(self, batch_size=None, batches_per_iter=1000,\n monitoring_batches=-1, monitoring_dataset=None,\n termination_criterion=None):\n \"\"\"\n Parameters\n ----------\n batch_size : int\n If batch_size is None, reverts to the `force_batch_size` field of \\\n the model\n batches_per_iter : int\n WRITEME\n monitoring_batches : int\n WRITEME\n monitoring_dataset : WRITEME\n termination_criterion : WRITEME\n If specified, can cause the algorithm to terminate before \\\n `model.learn_batch` says to\n \"\"\"\n self.batch_size, self.batches_per_iter = batch_size, batches_per_iter\n if monitoring_dataset is None:\n assert monitoring_batches == -1\n self.monitoring_dataset = monitoring_dataset\n self.monitoring_batches = monitoring_batches\n self.bSetup = False\n self.termination_criterion = termination_criterion\n\n def setup(self, model, dataset):\n \"\"\"\n Allows the training algorithm to do some preliminary configuration\n *before* we actually start training the model. The dataset is provided\n in case other derived training algorithms need to modify model based on\n the dataset.\n\n Parameters\n ----------\n model : object\n Python object representing the model to train loosely \\\n implementing the interface of models.model.Model.\n\n dataset : pylearn2.datasets.dataset.Dataset\n Dataset object used to draw training data\n \"\"\"\n self.model = model\n\n self.monitor = Monitor.get_monitor(model)\n\n if self.monitoring_dataset is not None:\n # Get the data specifications needed by the model\n space, source = model.get_monitoring_data_specs()\n\n # Create Theano variables for each of the individual components\n # of that data. Usually, it will be X for inputs and Y for targets.\n # First, we need to find these components, and put them in a tuple\n mapping = DataSpecsMapping((space, source))\n space_tuple = mapping.flatten(space, return_tuple=True)\n source_tuple = mapping.flatten(source, return_tuple=True)\n # Then, build a flat tuple of these Theano variables\n ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)\n for (sp, src) in safe_zip(space_tuple, source_tuple))\n # Finally, organize them back into a structure expected by the\n # monitoring channels of the model\n nested_ipt = mapping.nest(ipt)\n\n self.monitor.add_dataset(dataset=self.monitoring_dataset,\n mode=\"sequential\",\n batch_size=self.batch_size,\n num_batches=self.monitoring_batches)\n\n channels = model.get_monitoring_channels(nested_ipt)\n if not isinstance(channels, dict):\n raise TypeError(\"model.get_monitoring_channels must return a \"\n \"dictionary, but it returned \" + str(channels))\n for name in channels:\n J = channels[name]\n if isinstance(J, tuple):\n assert len(J) == 2\n J, prereqs = J\n else:\n prereqs = None\n\n self.monitor.add_channel(name=name,\n ipt=nested_ipt,\n val=J,\n prereqs=prereqs,\n data_specs=(space, source))\n self.first = True\n self.bSetup = True\n\n def train(self, dataset):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n assert self.bSetup\n model = self.model\n if self.batch_size is None:\n batch_size = model.force_batch_size\n else:\n batch_size = self.batch_size\n if hasattr(model, 'force_batch_size'):\n assert (model.force_batch_size <= 0 or batch_size ==\n model.force_batch_size)\n\n for i in xrange(self.batches_per_iter):\n # model.train_batch and self.train both return False when training\n # should terminate.\n learn_more = model.train_batch(dataset, batch_size)\n model.monitor.report_batch(batch_size)\n if not learn_more:\n break\n\n # Make sure we didn't exit training loop because Model.learn\n # hasn't been updated to new interface yet.\n if learn_more not in [True,False]:\n msg = ('The learn method of model %s did not return a boolean ' +\n 'value. Please update your model accordingly.')\n raise ValueError(msg % str(model))\n self.learn_more = learn_more\n\n def continue_learning(self, model):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n if self.learn_more:\n if self.termination_criterion is not None:\n return self.termination_criterion.continue_learning(model)\n return True\n return False\n", "path": "pylearn2/training_algorithms/default.py"}]} | 1,996 | 834 |
gh_patches_debug_34098 | rasdani/github-patches | git_diff | airctic__icevision-646 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow passing a Config object to the MMDetection models
## 🚀 Feature
**Is your feature request related to a problem? Please describe.**
In the current version, to update the loss_weight for example, We have to duplicate an existing config .py file and make the changes in the new file, and pass it the model method. The latter expect a config file_path
**Describe the solution you'd like**
I think it would be better to pass the MMDetection config object instead to the model method. By doing so, we could let the users populate the config object and update any field they wish to change, without creating and updating external files, like this:
```
cfg = Config.fromfile(cfg_filepath)
cfg.model.bbox_head.loss_cls.loss_weight = 0.8
cfg.model.bbox_head.loss_bbox.loss_weight = 2
```
**Additional context**
There is a discussion in our Discord forum around this issue:
https://discordapp.com/channels/735877944085446747/780951884683935744/811650062706540644
</issue>
<code>
[start of icevision/models/mmdet/common/bbox/two_stage/model.py]
1 __all__ = ["model"]
2
3 from icevision.imports import *
4 from mmcv import Config
5 from mmdet.models import build_detector
6 from mmcv.runner import load_checkpoint
7
8
9 def model(
10 cfg_path: Union[str, Path],
11 num_classes: int,
12 weights_path: Optional[Union[str, Path]] = None,
13 ) -> nn.Module:
14 cfg = Config.fromfile(str(cfg_path))
15 cfg.model.roi_head.bbox_head.num_classes = num_classes - 1
16 if weights_path is not None:
17 cfg.model.pretrained = None
18
19 _model = build_detector(cfg.model, cfg.get("train_cfg"), cfg.get("test_cfg"))
20
21 if weights_path is not None:
22 load_checkpoint(_model, str(weights_path))
23
24 return _model
25
[end of icevision/models/mmdet/common/bbox/two_stage/model.py]
[start of icevision/models/mmdet/common/bbox/single_stage/model.py]
1 __all__ = ["model"]
2
3 from icevision.imports import *
4 from mmcv import Config
5 from mmdet.models import build_detector
6 from mmcv.runner import load_checkpoint
7
8
9 def model(
10 cfg_path: Union[str, Path],
11 num_classes: int,
12 weights_path: Optional[Union[str, Path]] = None,
13 ) -> nn.Module:
14 cfg = Config.fromfile(str(cfg_path))
15 cfg.model.bbox_head.num_classes = num_classes - 1
16 if weights_path is not None:
17 cfg.model.pretrained = None
18
19 _model = build_detector(cfg.model, cfg.get("train_cfg"), cfg.get("test_cfg"))
20
21 if weights_path is not None:
22 load_checkpoint(_model, str(weights_path))
23
24 return _model
25
[end of icevision/models/mmdet/common/bbox/single_stage/model.py]
[start of icevision/models/mmdet/common/mask/two_stage/model.py]
1 __all__ = ["model"]
2
3 from icevision.imports import *
4 from mmcv import Config
5 from mmdet.models import build_detector
6 from mmcv.runner import load_checkpoint
7
8
9 def model(
10 cfg_path: Union[str, Path],
11 num_classes: int,
12 weights_path: Optional[Union[str, Path]] = None,
13 ) -> nn.Module:
14 cfg = Config.fromfile(str(cfg_path))
15 cfg.model.roi_head.bbox_head.num_classes = num_classes - 1
16 cfg.model.roi_head.mask_head.num_classes = num_classes - 1
17 if weights_path is not None:
18 cfg.model.pretrained = None
19
20 _model = build_detector(cfg.model, cfg.get("train_cfg"), cfg.get("test_cfg"))
21
22 if weights_path is not None:
23 load_checkpoint(_model, str(weights_path))
24
25 return _model
26
[end of icevision/models/mmdet/common/mask/two_stage/model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/icevision/models/mmdet/common/bbox/single_stage/model.py b/icevision/models/mmdet/common/bbox/single_stage/model.py
--- a/icevision/models/mmdet/common/bbox/single_stage/model.py
+++ b/icevision/models/mmdet/common/bbox/single_stage/model.py
@@ -7,11 +7,16 @@
def model(
- cfg_path: Union[str, Path],
+ cfg: Union[str, Path, Config],
num_classes: int,
weights_path: Optional[Union[str, Path]] = None,
) -> nn.Module:
- cfg = Config.fromfile(str(cfg_path))
+
+ # if `cfg` argument is a path (str, Path) create an Config object from the file
+ # otherwise cfg should be already an Config object
+ if isinstance(cfg, (str, Path)):
+ cfg = Config.fromfile(str(cfg))
+
cfg.model.bbox_head.num_classes = num_classes - 1
if weights_path is not None:
cfg.model.pretrained = None
diff --git a/icevision/models/mmdet/common/bbox/two_stage/model.py b/icevision/models/mmdet/common/bbox/two_stage/model.py
--- a/icevision/models/mmdet/common/bbox/two_stage/model.py
+++ b/icevision/models/mmdet/common/bbox/two_stage/model.py
@@ -7,11 +7,16 @@
def model(
- cfg_path: Union[str, Path],
+ cfg: Union[str, Path, Config],
num_classes: int,
weights_path: Optional[Union[str, Path]] = None,
) -> nn.Module:
- cfg = Config.fromfile(str(cfg_path))
+
+ # if `cfg` argument is a path (str, Path) create an Config object from the file
+ # otherwise cfg should be already an Config object
+ if isinstance(cfg, (str, Path)):
+ cfg = Config.fromfile(str(cfg))
+
cfg.model.roi_head.bbox_head.num_classes = num_classes - 1
if weights_path is not None:
cfg.model.pretrained = None
diff --git a/icevision/models/mmdet/common/mask/two_stage/model.py b/icevision/models/mmdet/common/mask/two_stage/model.py
--- a/icevision/models/mmdet/common/mask/two_stage/model.py
+++ b/icevision/models/mmdet/common/mask/two_stage/model.py
@@ -7,11 +7,16 @@
def model(
- cfg_path: Union[str, Path],
+ cfg: Union[str, Path, Config],
num_classes: int,
weights_path: Optional[Union[str, Path]] = None,
) -> nn.Module:
- cfg = Config.fromfile(str(cfg_path))
+
+ # if `cfg` argument is a path (str, Path) create an Config object from the file
+ # otherwise cfg should be already an Config object
+ if isinstance(cfg, (str, Path)):
+ cfg = Config.fromfile(str(cfg))
+
cfg.model.roi_head.bbox_head.num_classes = num_classes - 1
cfg.model.roi_head.mask_head.num_classes = num_classes - 1
if weights_path is not None:
| {"golden_diff": "diff --git a/icevision/models/mmdet/common/bbox/single_stage/model.py b/icevision/models/mmdet/common/bbox/single_stage/model.py\n--- a/icevision/models/mmdet/common/bbox/single_stage/model.py\n+++ b/icevision/models/mmdet/common/bbox/single_stage/model.py\n@@ -7,11 +7,16 @@\n \n \n def model(\n- cfg_path: Union[str, Path],\n+ cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n ) -> nn.Module:\n- cfg = Config.fromfile(str(cfg_path))\n+\n+ # if `cfg` argument is a path (str, Path) create an Config object from the file\n+ # otherwise cfg should be already an Config object\n+ if isinstance(cfg, (str, Path)):\n+ cfg = Config.fromfile(str(cfg))\n+\n cfg.model.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\ndiff --git a/icevision/models/mmdet/common/bbox/two_stage/model.py b/icevision/models/mmdet/common/bbox/two_stage/model.py\n--- a/icevision/models/mmdet/common/bbox/two_stage/model.py\n+++ b/icevision/models/mmdet/common/bbox/two_stage/model.py\n@@ -7,11 +7,16 @@\n \n \n def model(\n- cfg_path: Union[str, Path],\n+ cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n ) -> nn.Module:\n- cfg = Config.fromfile(str(cfg_path))\n+\n+ # if `cfg` argument is a path (str, Path) create an Config object from the file\n+ # otherwise cfg should be already an Config object\n+ if isinstance(cfg, (str, Path)):\n+ cfg = Config.fromfile(str(cfg))\n+\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\ndiff --git a/icevision/models/mmdet/common/mask/two_stage/model.py b/icevision/models/mmdet/common/mask/two_stage/model.py\n--- a/icevision/models/mmdet/common/mask/two_stage/model.py\n+++ b/icevision/models/mmdet/common/mask/two_stage/model.py\n@@ -7,11 +7,16 @@\n \n \n def model(\n- cfg_path: Union[str, Path],\n+ cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n ) -> nn.Module:\n- cfg = Config.fromfile(str(cfg_path))\n+\n+ # if `cfg` argument is a path (str, Path) create an Config object from the file\n+ # otherwise cfg should be already an Config object\n+ if isinstance(cfg, (str, Path)):\n+ cfg = Config.fromfile(str(cfg))\n+\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n cfg.model.roi_head.mask_head.num_classes = num_classes - 1\n if weights_path is not None:\n", "issue": "Allow passing a Config object to the MMDetection models\n## \ud83d\ude80 Feature\r\n**Is your feature request related to a problem? Please describe.**\r\nIn the current version, to update the loss_weight for example, We have to duplicate an existing config .py file and make the changes in the new file, and pass it the model method. The latter expect a config file_path\r\n\r\n\r\n**Describe the solution you'd like**\r\n I think it would be better to pass the MMDetection config object instead to the model method. By doing so, we could let the users populate the config object and update any field they wish to change, without creating and updating external files, like this: \r\n\r\n```\r\ncfg = Config.fromfile(cfg_filepath)\r\ncfg.model.bbox_head.loss_cls.loss_weight = 0.8\r\ncfg.model.bbox_head.loss_bbox.loss_weight = 2\r\n```\r\n\r\n\r\n**Additional context**\r\nThere is a discussion in our Discord forum around this issue:\r\nhttps://discordapp.com/channels/735877944085446747/780951884683935744/811650062706540644\r\n\n", "before_files": [{"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg_path: Union[str, Path],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n cfg = Config.fromfile(str(cfg_path))\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/bbox/two_stage/model.py"}, {"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg_path: Union[str, Path],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n cfg = Config.fromfile(str(cfg_path))\n cfg.model.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/bbox/single_stage/model.py"}, {"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg_path: Union[str, Path],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n cfg = Config.fromfile(str(cfg_path))\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n cfg.model.roi_head.mask_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/mask/two_stage/model.py"}]} | 1,527 | 717 |
gh_patches_debug_4636 | rasdani/github-patches | git_diff | enthought__chaco-885 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ImportError: cannot import name 'marker_trait' from 'chaco.api'
**Problem Description**
ImportError: cannot import name 'marker_trait' from 'chaco.api' when trying to run a 2008 demo
**Reproduction Steps:**
```python
python chaco/examples/tutorials/scipy2008/traits_example.py
```
**Expected behavior:**
test past
**OS, Python version:** [MacOS, python3.8 (with or without edm) or 3.11]
</issue>
<code>
[start of examples/tutorials/scipy2008/traits_example.py]
1 from numpy import linspace, sin
2
3 from enable.api import ColorTrait
4 from chaco.api import ArrayPlotData, Plot, marker_trait
5 from enable.api import ComponentEditor
6 from traits.api import HasTraits, Instance, Int
7 from traitsui.api import Group, Item, View
8
9
10 class ScatterPlotTraits(HasTraits):
11
12 plot = Instance(Plot)
13 color = ColorTrait("blue")
14 marker = marker_trait
15 marker_size = Int(4)
16
17 traits_view = View(
18 Group(
19 Item("color", label="Color", style="custom"),
20 Item("marker", label="Marker"),
21 Item("marker_size", label="Size"),
22 Item("plot", editor=ComponentEditor(), show_label=False),
23 orientation="vertical",
24 ),
25 width=800,
26 height=600,
27 resizable=True,
28 title="Chaco Plot",
29 )
30
31 def __init__(self):
32 # Create the data and the PlotData object
33 x = linspace(-14, 14, 100)
34 y = sin(x) * x ** 3
35 plotdata = ArrayPlotData(x=x, y=y)
36 # Create a Plot and associate it with the PlotData
37 plot = Plot(plotdata)
38 # Create a line plot in the Plot
39 self.renderer = plot.plot(("x", "y"), type="scatter", color="blue")[0]
40 self.plot = plot
41
42 def _color_changed(self):
43 self.renderer.color = self.color
44
45 def _marker_changed(self):
46 self.renderer.marker = self.marker
47
48 def _marker_size_changed(self):
49 self.renderer.marker_size = self.marker_size
50
51
52 # ===============================================================================
53 # demo object that is used by the demo.py application.
54 # ===============================================================================
55 demo = ScatterPlotTraits()
56
57 if __name__ == "__main__":
58 demo.configure_traits()
59
[end of examples/tutorials/scipy2008/traits_example.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/tutorials/scipy2008/traits_example.py b/examples/tutorials/scipy2008/traits_example.py
--- a/examples/tutorials/scipy2008/traits_example.py
+++ b/examples/tutorials/scipy2008/traits_example.py
@@ -1,7 +1,7 @@
from numpy import linspace, sin
-from enable.api import ColorTrait
-from chaco.api import ArrayPlotData, Plot, marker_trait
+from enable.api import ColorTrait, marker_trait
+from chaco.api import ArrayPlotData, Plot
from enable.api import ComponentEditor
from traits.api import HasTraits, Instance, Int
from traitsui.api import Group, Item, View
| {"golden_diff": "diff --git a/examples/tutorials/scipy2008/traits_example.py b/examples/tutorials/scipy2008/traits_example.py\n--- a/examples/tutorials/scipy2008/traits_example.py\n+++ b/examples/tutorials/scipy2008/traits_example.py\n@@ -1,7 +1,7 @@\n from numpy import linspace, sin\n \n-from enable.api import ColorTrait\n-from chaco.api import ArrayPlotData, Plot, marker_trait\n+from enable.api import ColorTrait, marker_trait\n+from chaco.api import ArrayPlotData, Plot\n from enable.api import ComponentEditor\n from traits.api import HasTraits, Instance, Int\n from traitsui.api import Group, Item, View\n", "issue": "ImportError: cannot import name 'marker_trait' from 'chaco.api'\n**Problem Description**\nImportError: cannot import name 'marker_trait' from 'chaco.api' when trying to run a 2008 demo\n\n**Reproduction Steps:**\n\n```python\npython chaco/examples/tutorials/scipy2008/traits_example.py\n```\n\n**Expected behavior:**\ntest past\n\n**OS, Python version:** [MacOS, python3.8 (with or without edm) or 3.11]\n\n", "before_files": [{"content": "from numpy import linspace, sin\n\nfrom enable.api import ColorTrait\nfrom chaco.api import ArrayPlotData, Plot, marker_trait\nfrom enable.api import ComponentEditor\nfrom traits.api import HasTraits, Instance, Int\nfrom traitsui.api import Group, Item, View\n\n\nclass ScatterPlotTraits(HasTraits):\n\n plot = Instance(Plot)\n color = ColorTrait(\"blue\")\n marker = marker_trait\n marker_size = Int(4)\n\n traits_view = View(\n Group(\n Item(\"color\", label=\"Color\", style=\"custom\"),\n Item(\"marker\", label=\"Marker\"),\n Item(\"marker_size\", label=\"Size\"),\n Item(\"plot\", editor=ComponentEditor(), show_label=False),\n orientation=\"vertical\",\n ),\n width=800,\n height=600,\n resizable=True,\n title=\"Chaco Plot\",\n )\n\n def __init__(self):\n # Create the data and the PlotData object\n x = linspace(-14, 14, 100)\n y = sin(x) * x ** 3\n plotdata = ArrayPlotData(x=x, y=y)\n # Create a Plot and associate it with the PlotData\n plot = Plot(plotdata)\n # Create a line plot in the Plot\n self.renderer = plot.plot((\"x\", \"y\"), type=\"scatter\", color=\"blue\")[0]\n self.plot = plot\n\n def _color_changed(self):\n self.renderer.color = self.color\n\n def _marker_changed(self):\n self.renderer.marker = self.marker\n\n def _marker_size_changed(self):\n self.renderer.marker_size = self.marker_size\n\n\n# ===============================================================================\n# demo object that is used by the demo.py application.\n# ===============================================================================\ndemo = ScatterPlotTraits()\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n", "path": "examples/tutorials/scipy2008/traits_example.py"}]} | 1,170 | 155 |
gh_patches_debug_19957 | rasdani/github-patches | git_diff | huggingface__accelerate-43 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AcceleratedOptimizer `zero_grad` argument not supported: `set_to_none`
Currently the `AcceleratedOptimizer` class doesn't support the argument `set_to_none`, is this an intentional exclusion?
</issue>
<code>
[start of src/accelerate/optimizer.py]
1 # Copyright 2021 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import torch
16
17 from .state import AcceleratorState, DistributedType, is_tpu_available
18
19
20 if is_tpu_available():
21 import torch_xla.core.xla_model as xm
22
23
24 def move_to_device(state, device):
25 if isinstance(state, (list, tuple)):
26 return type(state)(move_to_device(t, device) for t in state)
27 elif isinstance(state, dict):
28 return type(state)({k: move_to_device(v, device) for k, v in state.items()})
29 elif isinstance(state, torch.Tensor):
30 return state.to(device)
31 return state
32
33
34 class AcceleratedOptimizer(torch.optim.Optimizer):
35 """
36 Internal wrapper around a torch optimizer.
37
38 Args:
39 optimizer (:obj:`torch.optim.optimizer.Optimizer`):
40 The optimizer to wrap.
41 device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):
42 Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of
43 :obj:`optimizer` on the right device.
44 scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):
45 The scaler to use in the step function if training with mixed precision.
46 """
47
48 def __init__(self, optimizer, device_placement=True, scaler=None):
49 self.optimizer = optimizer
50 self.scaler = scaler
51 self.state = AcceleratorState()
52
53 # Handle device placement
54 if device_placement:
55 state_dict = self.optimizer.state_dict()
56 if self.state.distributed_type == DistributedType.TPU:
57 xm.send_cpu_data_to_device(state_dict, self.state.device)
58 else:
59 state_dict = move_to_device(state_dict, self.state.device)
60 self.optimizer.load_state_dict(state_dict)
61
62 @property
63 def param_groups(self):
64 return self.optimizer.param_groups
65
66 @param_groups.setter
67 def param_groups(self, param_groups):
68 self.optimizer.param_groups = param_groups
69
70 @property
71 def defaults(self):
72 return self.optimizer.defaults
73
74 @defaults.setter
75 def defaults(self, defaults):
76 self.optimizer.defaults = defaults
77
78 def add_param_group(self, param_group):
79 self.optimizer.add_param_group(param_group)
80
81 def load_state_dict(self, state_dict):
82 if self.state.distributed_type == DistributedType.TPU and self.device_placement:
83 xm.send_cpu_data_to_device(state_dict, self.state.device)
84 self.optimizer.load_state_dict(state_dict)
85
86 def state_dict(self):
87 return self.optimizer.state_dict()
88
89 def zero_grad(self):
90 self.optimizer.zero_grad()
91
92 def step(self):
93 if self.state.distributed_type == DistributedType.TPU:
94 xm.optimizer_step(self.optimizer)
95 elif self.scaler is not None:
96 self.scaler.step(self.optimizer)
97 self.scaler.update()
98 else:
99 self.optimizer.step()
100
101 def _switch_parameters(self, parameters_map):
102 for param_group in self.optimizer.param_groups:
103 param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
104
[end of src/accelerate/optimizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py
--- a/src/accelerate/optimizer.py
+++ b/src/accelerate/optimizer.py
@@ -14,6 +14,8 @@
import torch
+from packaging import version
+
from .state import AcceleratorState, DistributedType, is_tpu_available
@@ -86,8 +88,18 @@
def state_dict(self):
return self.optimizer.state_dict()
- def zero_grad(self):
- self.optimizer.zero_grad()
+ def zero_grad(self, set_to_none=None):
+ if version.parse(torch.__version__) < version.parse("1.7.0"):
+ if set_to_none is not None:
+ raise ValueError(
+ "`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for "
+ f"earlier versions (found version {torch.__version__})."
+ )
+ self.optimizer.zero_grad()
+ else:
+ if set_to_none is not None:
+ set_to_none = False
+ self.optimizer.zero_grad(set_to_none=set_to_none)
def step(self):
if self.state.distributed_type == DistributedType.TPU:
| {"golden_diff": "diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -14,6 +14,8 @@\n \n import torch\n \n+from packaging import version\n+\n from .state import AcceleratorState, DistributedType, is_tpu_available\n \n \n@@ -86,8 +88,18 @@\n def state_dict(self):\n return self.optimizer.state_dict()\n \n- def zero_grad(self):\n- self.optimizer.zero_grad()\n+ def zero_grad(self, set_to_none=None):\n+ if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n+ if set_to_none is not None:\n+ raise ValueError(\n+ \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n+ f\"earlier versions (found version {torch.__version__}).\"\n+ )\n+ self.optimizer.zero_grad()\n+ else:\n+ if set_to_none is not None:\n+ set_to_none = False\n+ self.optimizer.zero_grad(set_to_none=set_to_none)\n \n def step(self):\n if self.state.distributed_type == DistributedType.TPU:\n", "issue": "AcceleratedOptimizer `zero_grad` argument not supported: `set_to_none`\nCurrently the `AcceleratedOptimizer` class doesn't support the argument `set_to_none`, is this an intentional exclusion?\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nfrom .state import AcceleratorState, DistributedType, is_tpu_available\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\ndef move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n return type(state)(move_to_device(t, device) for t in state)\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\n return state.to(device)\n return state\n\n\nclass AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n\n Args:\n optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n :obj:`optimizer` on the right device.\n scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.state = AcceleratorState()\n\n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n if self.state.distributed_type == DistributedType.TPU:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n else:\n state_dict = move_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n @param_groups.setter\n def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n\n @property\n def defaults(self):\n return self.optimizer.defaults\n\n @defaults.setter\n def defaults(self, defaults):\n self.optimizer.defaults = defaults\n\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n\n def load_state_dict(self, state_dict):\n if self.state.distributed_type == DistributedType.TPU and self.device_placement:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def step(self):\n if self.state.distributed_type == DistributedType.TPU:\n xm.optimizer_step(self.optimizer)\n elif self.scaler is not None:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n self.optimizer.step()\n\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n", "path": "src/accelerate/optimizer.py"}]} | 1,571 | 281 |
gh_patches_debug_12535 | rasdani/github-patches | git_diff | conda__conda-6918 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
deprecate 'conda help' in favor of --help CLI flag
With the install on Windows 10:
_platform : win-64 conda version : 4.3.30
conda is private : False conda-env version : 4.3.30
conda-build version : 3.0.22 python version : 3.6.3.final.0
requests version : 2.18.4 root environment : C:\Anaconda3 (writable)_
the command: `conda help install` (or other similar, say, conda help create, etc.) produces the message
`C:\Anaconda3\python.exe: can't open file 'C:\Anaconda3\Scripts\conda': [Errno 2] No such file or directory`
This syntax works under Linux (Ubuntu), but of course, there is no just "conda" file in Windows Anaconda Scripts.
</issue>
<code>
[start of conda/cli/main_help.py]
1 # (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6 from __future__ import absolute_import, division, print_function, unicode_literals
7
8 import subprocess
9 import sys
10
11
12 def execute(args, parser):
13 if not args.command:
14 parser.print_help()
15 return
16 subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover
17
[end of conda/cli/main_help.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda/cli/main_help.py b/conda/cli/main_help.py
--- a/conda/cli/main_help.py
+++ b/conda/cli/main_help.py
@@ -5,7 +5,6 @@
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import absolute_import, division, print_function, unicode_literals
-import subprocess
import sys
@@ -13,4 +12,7 @@
if not args.command:
parser.print_help()
return
- subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover
+ print("ERROR: The 'conda help' command is deprecated.\n"
+ "Instead use 'conda %s --help'." % args.command,
+ file=sys.stderr)
+ return 1
| {"golden_diff": "diff --git a/conda/cli/main_help.py b/conda/cli/main_help.py\n--- a/conda/cli/main_help.py\n+++ b/conda/cli/main_help.py\n@@ -5,7 +5,6 @@\n # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n-import subprocess\n import sys\n \n \n@@ -13,4 +12,7 @@\n if not args.command:\n parser.print_help()\n return\n- subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover\n+ print(\"ERROR: The 'conda help' command is deprecated.\\n\"\n+ \"Instead use 'conda %s --help'.\" % args.command,\n+ file=sys.stderr)\n+ return 1\n", "issue": "deprecate 'conda help' in favor of --help CLI flag\nWith the install on Windows 10:\r\n\r\n _platform : win-64 conda version : 4.3.30\r\n conda is private : False conda-env version : 4.3.30\r\n conda-build version : 3.0.22 python version : 3.6.3.final.0\r\n requests version : 2.18.4 root environment : C:\\Anaconda3 (writable)_\r\n\r\nthe command: `conda help install` (or other similar, say, conda help create, etc.) produces the message\r\n\r\n`C:\\Anaconda3\\python.exe: can't open file 'C:\\Anaconda3\\Scripts\\conda': [Errno 2] No such file or directory`\r\n\r\nThis syntax works under Linux (Ubuntu), but of course, there is no just \"conda\" file in Windows Anaconda Scripts.\n", "before_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport subprocess\nimport sys\n\n\ndef execute(args, parser):\n if not args.command:\n parser.print_help()\n return\n subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover\n", "path": "conda/cli/main_help.py"}]} | 911 | 185 |
gh_patches_debug_24251 | rasdani/github-patches | git_diff | nvaccess__nvda-7114 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
When the -c option is used when starting NVDA user configuration can not be saved
Steps to reproduce:
- Open a command prompt and cd to the nvda.exe directory.
- start nvda with `nvda.exe -c "C:\myTemp"
- open the nvda menu and select "save configuration"
An error is shown "Could not save configuration - probably read only file system".
Regression caused by atomic rename/write fix. Config path is unicode without `-c`, but when `-c` is passed, the config path is ANSI. fileUtils assumes path is always Unicode.
</issue>
<code>
[start of source/fileUtils.py]
1 #fileUtils.py
2 #A part of NonVisual Desktop Access (NVDA)
3 #Copyright (C) 2017 NV Access Limited, Bram Duvigneau
4 #This file is covered by the GNU General Public License.
5 #See the file COPYING for more details.
6 import os
7 import ctypes
8 from contextlib import contextmanager
9 from tempfile import NamedTemporaryFile
10 from logHandler import log
11
12 #: Constant; flag for MoveFileEx(). If a file with the destination filename already exists, it is overwritten.
13 MOVEFILE_REPLACE_EXISTING = 1
14
15 @contextmanager
16 def FaultTolerantFile(name):
17 '''Used to write out files in a more fault tolerant way. A temporary file is used, and replaces the
18 file `name' when the context manager scope ends and the the context manager __exit__ is called. This
19 means writing out the complete file can be performed with less concern of corrupting the original file
20 if the process is interrupted by windows shutting down.
21
22 Usage:
23 with FaultTolerantFile("myFile.txt") as f:
24 f.write("This is a test")
25
26 This creates a temporary file, and the writes actually happen on this temp file. At the end of the
27 `with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces "myFile.txt"
28 '''
29 dirpath, filename = os.path.split(name)
30 with NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:
31 log.debug(f.name)
32 yield f
33 f.flush()
34 os.fsync(f)
35 f.close()
36 moveFileResult = ctypes.windll.kernel32.MoveFileExW(f.name, name, MOVEFILE_REPLACE_EXISTING)
37 if moveFileResult == 0:
38 raise ctypes.WinError()
39
[end of source/fileUtils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/source/fileUtils.py b/source/fileUtils.py
--- a/source/fileUtils.py
+++ b/source/fileUtils.py
@@ -18,6 +18,7 @@
file `name' when the context manager scope ends and the the context manager __exit__ is called. This
means writing out the complete file can be performed with less concern of corrupting the original file
if the process is interrupted by windows shutting down.
+ `name` must be unicode.
Usage:
with FaultTolerantFile("myFile.txt") as f:
@@ -26,6 +27,8 @@
This creates a temporary file, and the writes actually happen on this temp file. At the end of the
`with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces "myFile.txt"
'''
+ if not isinstance(name, unicode):
+ raise TypeError("name must be unicode")
dirpath, filename = os.path.split(name)
with NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:
log.debug(f.name)
| {"golden_diff": "diff --git a/source/fileUtils.py b/source/fileUtils.py\n--- a/source/fileUtils.py\n+++ b/source/fileUtils.py\n@@ -18,6 +18,7 @@\n \tfile `name' when the context manager scope ends and the the context manager __exit__ is called. This\n \tmeans writing out the complete file can be performed with less concern of corrupting the original file\n \tif the process is interrupted by windows shutting down.\n+\t`name` must be unicode.\n \n \tUsage:\n \t\twith FaultTolerantFile(\"myFile.txt\") as f:\n@@ -26,6 +27,8 @@\n \tThis creates a temporary file, and the writes actually happen on this temp file. At the end of the \n \t`with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces \"myFile.txt\"\n \t'''\n+\tif not isinstance(name, unicode):\n+\t\traise TypeError(\"name must be unicode\")\n \tdirpath, filename = os.path.split(name)\n \twith NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:\n \t\tlog.debug(f.name)\n", "issue": "When the -c option is used when starting NVDA user configuration can not be saved\nSteps to reproduce:\r\n\r\n- Open a command prompt and cd to the nvda.exe directory.\r\n- start nvda with `nvda.exe -c \"C:\\myTemp\"\r\n- open the nvda menu and select \"save configuration\"\r\n\r\nAn error is shown \"Could not save configuration - probably read only file system\".\r\n\r\nRegression caused by atomic rename/write fix. Config path is unicode without `-c`, but when `-c` is passed, the config path is ANSI. fileUtils assumes path is always Unicode.\n", "before_files": [{"content": "#fileUtils.py\n#A part of NonVisual Desktop Access (NVDA)\n#Copyright (C) 2017 NV Access Limited, Bram Duvigneau\n#This file is covered by the GNU General Public License.\n#See the file COPYING for more details.\nimport os\nimport ctypes\nfrom contextlib import contextmanager\nfrom tempfile import NamedTemporaryFile\nfrom logHandler import log\n\n#: Constant; flag for MoveFileEx(). If a file with the destination filename already exists, it is overwritten.\nMOVEFILE_REPLACE_EXISTING = 1\n\n@contextmanager\ndef FaultTolerantFile(name):\n\t'''Used to write out files in a more fault tolerant way. A temporary file is used, and replaces the \n\tfile `name' when the context manager scope ends and the the context manager __exit__ is called. This\n\tmeans writing out the complete file can be performed with less concern of corrupting the original file\n\tif the process is interrupted by windows shutting down.\n\n\tUsage:\n\t\twith FaultTolerantFile(\"myFile.txt\") as f:\n\t\t\tf.write(\"This is a test\")\n\n\tThis creates a temporary file, and the writes actually happen on this temp file. At the end of the \n\t`with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces \"myFile.txt\"\n\t'''\n\tdirpath, filename = os.path.split(name)\n\twith NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:\n\t\tlog.debug(f.name)\n\t\tyield f\n\t\tf.flush()\n\t\tos.fsync(f)\n\t\tf.close()\n\t\tmoveFileResult = ctypes.windll.kernel32.MoveFileExW(f.name, name, MOVEFILE_REPLACE_EXISTING)\n\t\tif moveFileResult == 0:\n\t\t\traise ctypes.WinError()\n", "path": "source/fileUtils.py"}]} | 1,121 | 254 |
gh_patches_debug_25184 | rasdani/github-patches | git_diff | getsentry__sentry-6688 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Search related events by event ID

Right now you can search [by message or tags](https://github.com/getsentry/sentry/blob/master/src/sentry/api/endpoints/group_events.py#L104), but not individual event ID. This is useful when trying to locate a specific event. More useful now that sampling has been disabled by default.
We should also add a column for event ID here.
Refs #3147
</issue>
<code>
[start of src/sentry/api/endpoints/group_events.py]
1 from __future__ import absolute_import
2
3 import six
4
5 from sentry import tagstore
6 from sentry.api.base import DocSection, EnvironmentMixin
7 from sentry.api.bases import GroupEndpoint
8 from sentry.api.serializers import serialize
9 from sentry.api.paginator import DateTimePaginator
10 from sentry.models import Environment, Event, Group
11 from sentry.search.utils import parse_query
12 from sentry.utils.apidocs import scenario, attach_scenarios
13 from rest_framework.response import Response
14 from sentry.search.utils import InvalidQuery
15
16
17 @scenario('ListAvailableSamples')
18 def list_available_samples_scenario(runner):
19 group = Group.objects.filter(project=runner.default_project).first()
20 runner.request(method='GET', path='/issues/%s/events/' % group.id)
21
22
23 class GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):
24 doc_section = DocSection.EVENTS
25
26 @attach_scenarios([list_available_samples_scenario])
27 def get(self, request, group):
28 """
29 List an Issue's Events
30 ``````````````````````
31
32 This endpoint lists an issue's events.
33
34 :pparam string issue_id: the ID of the issue to retrieve.
35 :auth: required
36 """
37
38 events = Event.objects.filter(
39 group_id=group.id,
40 )
41
42 query = request.GET.get('query')
43 if query:
44 try:
45 query_kwargs = parse_query(group.project, query, request.user)
46 except InvalidQuery as exc:
47 return Response({'detail': six.text_type(exc)}, status=400)
48
49 if query_kwargs['query']:
50 events = events.filter(
51 message__icontains=query_kwargs['query'],
52 )
53
54 if query_kwargs['tags']:
55 try:
56 environment_id = self._get_environment_id_from_request(
57 request, group.project.organization_id)
58 except Environment.DoesNotExist:
59 event_ids = []
60 else:
61 event_ids = tagstore.get_group_event_ids(
62 group.project_id, group.id, environment_id, query_kwargs['tags'])
63
64 if event_ids:
65 events = events.filter(
66 id__in=event_ids,
67 )
68 else:
69 events = events.none()
70
71 return self.paginate(
72 request=request,
73 queryset=events,
74 order_by='-datetime',
75 on_results=lambda x: serialize(x, request.user),
76 paginator_cls=DateTimePaginator,
77 )
78
[end of src/sentry/api/endpoints/group_events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/api/endpoints/group_events.py b/src/sentry/api/endpoints/group_events.py
--- a/src/sentry/api/endpoints/group_events.py
+++ b/src/sentry/api/endpoints/group_events.py
@@ -12,6 +12,7 @@
from sentry.utils.apidocs import scenario, attach_scenarios
from rest_framework.response import Response
from sentry.search.utils import InvalidQuery
+from django.db.models import Q
@scenario('ListAvailableSamples')
@@ -40,6 +41,7 @@
)
query = request.GET.get('query')
+
if query:
try:
query_kwargs = parse_query(group.project, query, request.user)
@@ -47,9 +49,12 @@
return Response({'detail': six.text_type(exc)}, status=400)
if query_kwargs['query']:
- events = events.filter(
- message__icontains=query_kwargs['query'],
- )
+ q = Q(message__icontains=query_kwargs['query'])
+
+ if len(query) == 32:
+ q |= Q(event_id__exact=query_kwargs['query'])
+
+ events = events.filter(q)
if query_kwargs['tags']:
try:
| {"golden_diff": "diff --git a/src/sentry/api/endpoints/group_events.py b/src/sentry/api/endpoints/group_events.py\n--- a/src/sentry/api/endpoints/group_events.py\n+++ b/src/sentry/api/endpoints/group_events.py\n@@ -12,6 +12,7 @@\n from sentry.utils.apidocs import scenario, attach_scenarios\n from rest_framework.response import Response\n from sentry.search.utils import InvalidQuery\n+from django.db.models import Q\n \n \n @scenario('ListAvailableSamples')\n@@ -40,6 +41,7 @@\n )\n \n query = request.GET.get('query')\n+\n if query:\n try:\n query_kwargs = parse_query(group.project, query, request.user)\n@@ -47,9 +49,12 @@\n return Response({'detail': six.text_type(exc)}, status=400)\n \n if query_kwargs['query']:\n- events = events.filter(\n- message__icontains=query_kwargs['query'],\n- )\n+ q = Q(message__icontains=query_kwargs['query'])\n+\n+ if len(query) == 32:\n+ q |= Q(event_id__exact=query_kwargs['query'])\n+\n+ events = events.filter(q)\n \n if query_kwargs['tags']:\n try:\n", "issue": "Search related events by event ID\n\r\n\r\n\r\nRight now you can search [by message or tags](https://github.com/getsentry/sentry/blob/master/src/sentry/api/endpoints/group_events.py#L104), but not individual event ID. This is useful when trying to locate a specific event. More useful now that sampling has been disabled by default.\r\n\r\nWe should also add a column for event ID here.\r\n\r\nRefs #3147\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport six\n\nfrom sentry import tagstore\nfrom sentry.api.base import DocSection, EnvironmentMixin\nfrom sentry.api.bases import GroupEndpoint\nfrom sentry.api.serializers import serialize\nfrom sentry.api.paginator import DateTimePaginator\nfrom sentry.models import Environment, Event, Group\nfrom sentry.search.utils import parse_query\nfrom sentry.utils.apidocs import scenario, attach_scenarios\nfrom rest_framework.response import Response\nfrom sentry.search.utils import InvalidQuery\n\n\n@scenario('ListAvailableSamples')\ndef list_available_samples_scenario(runner):\n group = Group.objects.filter(project=runner.default_project).first()\n runner.request(method='GET', path='/issues/%s/events/' % group.id)\n\n\nclass GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):\n doc_section = DocSection.EVENTS\n\n @attach_scenarios([list_available_samples_scenario])\n def get(self, request, group):\n \"\"\"\n List an Issue's Events\n ``````````````````````\n\n This endpoint lists an issue's events.\n\n :pparam string issue_id: the ID of the issue to retrieve.\n :auth: required\n \"\"\"\n\n events = Event.objects.filter(\n group_id=group.id,\n )\n\n query = request.GET.get('query')\n if query:\n try:\n query_kwargs = parse_query(group.project, query, request.user)\n except InvalidQuery as exc:\n return Response({'detail': six.text_type(exc)}, status=400)\n\n if query_kwargs['query']:\n events = events.filter(\n message__icontains=query_kwargs['query'],\n )\n\n if query_kwargs['tags']:\n try:\n environment_id = self._get_environment_id_from_request(\n request, group.project.organization_id)\n except Environment.DoesNotExist:\n event_ids = []\n else:\n event_ids = tagstore.get_group_event_ids(\n group.project_id, group.id, environment_id, query_kwargs['tags'])\n\n if event_ids:\n events = events.filter(\n id__in=event_ids,\n )\n else:\n events = events.none()\n\n return self.paginate(\n request=request,\n queryset=events,\n order_by='-datetime',\n on_results=lambda x: serialize(x, request.user),\n paginator_cls=DateTimePaginator,\n )\n", "path": "src/sentry/api/endpoints/group_events.py"}]} | 1,338 | 270 |
gh_patches_debug_43222 | rasdani/github-patches | git_diff | hydroshare__hydroshare-5067 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
add a select for "subject areas"
**Describe the feature you'd like and what it will do**
Currently, subject areas is free text.
Should be a typeahead -- noninvasive.
@martinseul suggested that we should populate a list similar to
https://has.arizona.edu/research-focus-areas
Related to https://github.com/hydroshare/hydroshare/issues/4733
</issue>
<code>
[start of theme/migrations/0023_alter_userprofile_subject_areas.py]
1 # Generated by Django 3.2.15 on 2023-04-11 18:41
2
3 import django.contrib.postgres.fields
4 from django.db import migrations, models
5
6
7 def migrate_csv_subject_areas(apps, schema_editor):
8 SubjectArea = apps.get_model('hs_dictionary.SubjectArea')
9 UserProfile = apps.get_model('theme.UserProfile')
10 # Attempt to match existing SAs from profiles
11 profiles_with_sa = UserProfile.objects \
12 .exclude(subject_areas__isnull=True) \
13 .exclude(subject_areas='')
14
15 subject_area_objects = SubjectArea.objects.all()
16
17 for profile in profiles_with_sa:
18 old_subject_areas = profile.subject_areas.split(',')
19 old_subject_areas = [s for s in old_subject_areas]
20 print('*' * 100)
21 print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')
22 new_subj_areas = []
23 for subject in old_subject_areas:
24 print(f"Searching for a match with '{subject}'")
25 match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()]
26 if match:
27 new_subj_areas.append(match[0].name)
28 if match[0].name == subject:
29 print(f'- Exact match with pre-existing subject area: {subject}')
30 else:
31 print(f'- Near match with pre-existing subject area: {subject}')
32 else:
33 if subject.strip() == subject:
34 print(f"- Unmatched subject area '{subject}' will remain unaltered")
35 new_subj_areas.append(subject)
36 else:
37 print(f"- Unmatched subject area '{subject}' contains whitespace that will be stripped")
38 new_subj_areas.append(subject.strip())
39
40 sas = ','.join(new_subj_areas)
41 print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}')
42 profile.subject_areas = f'{{{sas}}}'
43 profile.save()
44
45 profiles_without_sa = UserProfile.objects \
46 .filter(subject_areas='')
47 for profile in profiles_without_sa:
48 print(f'Updating {profile} from "" to {{}}')
49 profile.subject_areas = '{}'
50 profile.save()
51
52
53 class Migration(migrations.Migration):
54
55 dependencies = [
56 ('theme', '0022_alter_userprofile_subject_areas'),
57 ]
58
59 operations = [
60 migrations.RunSQL("UPDATE theme_userprofile set subject_areas = NULL "
61 "where theme_userprofile.subject_areas like '';"),
62 migrations.RunPython(migrate_csv_subject_areas),
63 migrations.AlterField(
64 model_name='userprofile',
65 name='subject_areas',
66 field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=1024), blank=True, help_text='A list of subject areas you are interested in researching. e.g. "Water Management." Free text entry or select from the suggestions', null=True, size=None),
67 ),
68 ]
69
[end of theme/migrations/0023_alter_userprofile_subject_areas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/theme/migrations/0023_alter_userprofile_subject_areas.py b/theme/migrations/0023_alter_userprofile_subject_areas.py
--- a/theme/migrations/0023_alter_userprofile_subject_areas.py
+++ b/theme/migrations/0023_alter_userprofile_subject_areas.py
@@ -2,12 +2,25 @@
import django.contrib.postgres.fields
from django.db import migrations, models
+from django.db.utils import DataError
+from django.core.management import call_command
+import re
def migrate_csv_subject_areas(apps, schema_editor):
+ def strip_for_dict(string=""):
+ # Check if the string consists of only spaces and braces
+ res, _ = re.subn('{|}', '', string)
+ if res.strip() == "":
+ return ""
+ # replace invalid braces and quotes
+ string = string.replace("{", "[").replace("}", "]").replace("\"", "\'")
+ return string.strip()
+ call_command('create_subject_areas_dict')
SubjectArea = apps.get_model('hs_dictionary.SubjectArea')
UserProfile = apps.get_model('theme.UserProfile')
# Attempt to match existing SAs from profiles
+ errors = []
profiles_with_sa = UserProfile.objects \
.exclude(subject_areas__isnull=True) \
.exclude(subject_areas='')
@@ -21,8 +34,16 @@
print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')
new_subj_areas = []
for subject in old_subject_areas:
+ if subject == '':
+ # There is a trailing comma that we need to remove
+ continue
+ stripped_subject = strip_for_dict(subject)
+ if stripped_subject == '':
+ # The subject contained only invalid chars
+ print(f"- Unmatched subject area '{subject}' contains invalid chars that will be stripped")
+ continue
print(f"Searching for a match with '{subject}'")
- match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()]
+ match = [sa for sa in subject_area_objects if sa.name.lower() == stripped_subject.lower()]
if match:
new_subj_areas.append(match[0].name)
if match[0].name == subject:
@@ -30,24 +51,33 @@
else:
print(f'- Near match with pre-existing subject area: {subject}')
else:
- if subject.strip() == subject:
+ if stripped_subject == subject:
print(f"- Unmatched subject area '{subject}' will remain unaltered")
new_subj_areas.append(subject)
else:
- print(f"- Unmatched subject area '{subject}' contains whitespace that will be stripped")
- new_subj_areas.append(subject.strip())
+ print(f"- Unmatched subject area '{subject}' contains invalid chars that will be stripped")
+ new_subj_areas.append(stripped_subject)
sas = ','.join(new_subj_areas)
- print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}')
+ message = f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}'
+ print(message)
profile.subject_areas = f'{{{sas}}}'
+ try:
+ profile.save()
+ except DataError as e:
+ errors.append(f'Error saving profile: {e}' + message)
+
+ profiles_without_sa = UserProfile.objects.filter(subject_areas='')
+ for profile in profiles_without_sa:
+ print(f'Updating {profile} from "" to {{}}')
+ profile.subject_areas = '{}'
profile.save()
- profiles_without_sa = UserProfile.objects \
- .filter(subject_areas='')
- for profile in profiles_without_sa:
- print(f'Updating {profile} from "" to {{}}')
- profile.subject_areas = '{}'
- profile.save()
+ print("Done updating Subject Areas.")
+ if errors:
+ print("Errors during update:")
+ for error in errors:
+ print(error)
class Migration(migrations.Migration):
| {"golden_diff": "diff --git a/theme/migrations/0023_alter_userprofile_subject_areas.py b/theme/migrations/0023_alter_userprofile_subject_areas.py\n--- a/theme/migrations/0023_alter_userprofile_subject_areas.py\n+++ b/theme/migrations/0023_alter_userprofile_subject_areas.py\n@@ -2,12 +2,25 @@\n \n import django.contrib.postgres.fields\n from django.db import migrations, models\n+from django.db.utils import DataError\n+from django.core.management import call_command\n+import re\n \n \n def migrate_csv_subject_areas(apps, schema_editor):\n+ def strip_for_dict(string=\"\"):\n+ # Check if the string consists of only spaces and braces\n+ res, _ = re.subn('{|}', '', string)\n+ if res.strip() == \"\":\n+ return \"\"\n+ # replace invalid braces and quotes\n+ string = string.replace(\"{\", \"[\").replace(\"}\", \"]\").replace(\"\\\"\", \"\\'\")\n+ return string.strip()\n+ call_command('create_subject_areas_dict')\n SubjectArea = apps.get_model('hs_dictionary.SubjectArea')\n UserProfile = apps.get_model('theme.UserProfile')\n # Attempt to match existing SAs from profiles\n+ errors = []\n profiles_with_sa = UserProfile.objects \\\n .exclude(subject_areas__isnull=True) \\\n .exclude(subject_areas='')\n@@ -21,8 +34,16 @@\n print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')\n new_subj_areas = []\n for subject in old_subject_areas:\n+ if subject == '':\n+ # There is a trailing comma that we need to remove\n+ continue\n+ stripped_subject = strip_for_dict(subject)\n+ if stripped_subject == '':\n+ # The subject contained only invalid chars\n+ print(f\"- Unmatched subject area '{subject}' contains invalid chars that will be stripped\")\n+ continue\n print(f\"Searching for a match with '{subject}'\")\n- match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()]\n+ match = [sa for sa in subject_area_objects if sa.name.lower() == stripped_subject.lower()]\n if match:\n new_subj_areas.append(match[0].name)\n if match[0].name == subject:\n@@ -30,24 +51,33 @@\n else:\n print(f'- Near match with pre-existing subject area: {subject}')\n else:\n- if subject.strip() == subject:\n+ if stripped_subject == subject:\n print(f\"- Unmatched subject area '{subject}' will remain unaltered\")\n new_subj_areas.append(subject)\n else:\n- print(f\"- Unmatched subject area '{subject}' contains whitespace that will be stripped\")\n- new_subj_areas.append(subject.strip())\n+ print(f\"- Unmatched subject area '{subject}' contains invalid chars that will be stripped\")\n+ new_subj_areas.append(stripped_subject)\n \n sas = ','.join(new_subj_areas)\n- print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}')\n+ message = f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}'\n+ print(message)\n profile.subject_areas = f'{{{sas}}}'\n+ try:\n+ profile.save()\n+ except DataError as e:\n+ errors.append(f'Error saving profile: {e}' + message)\n+\n+ profiles_without_sa = UserProfile.objects.filter(subject_areas='')\n+ for profile in profiles_without_sa:\n+ print(f'Updating {profile} from \"\" to {{}}')\n+ profile.subject_areas = '{}'\n profile.save()\n \n- profiles_without_sa = UserProfile.objects \\\n- .filter(subject_areas='')\n- for profile in profiles_without_sa:\n- print(f'Updating {profile} from \"\" to {{}}')\n- profile.subject_areas = '{}'\n- profile.save()\n+ print(\"Done updating Subject Areas.\")\n+ if errors:\n+ print(\"Errors during update:\")\n+ for error in errors:\n+ print(error)\n \n \n class Migration(migrations.Migration):\n", "issue": "add a select for \"subject areas\"\n**Describe the feature you'd like and what it will do**\r\nCurrently, subject areas is free text.\r\nShould be a typeahead -- noninvasive.\r\n@martinseul suggested that we should populate a list similar to\r\nhttps://has.arizona.edu/research-focus-areas\r\n\r\n\r\nRelated to https://github.com/hydroshare/hydroshare/issues/4733\r\n\n", "before_files": [{"content": "# Generated by Django 3.2.15 on 2023-04-11 18:41\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\n\n\ndef migrate_csv_subject_areas(apps, schema_editor):\n SubjectArea = apps.get_model('hs_dictionary.SubjectArea')\n UserProfile = apps.get_model('theme.UserProfile')\n # Attempt to match existing SAs from profiles\n profiles_with_sa = UserProfile.objects \\\n .exclude(subject_areas__isnull=True) \\\n .exclude(subject_areas='')\n\n subject_area_objects = SubjectArea.objects.all()\n\n for profile in profiles_with_sa:\n old_subject_areas = profile.subject_areas.split(',')\n old_subject_areas = [s for s in old_subject_areas]\n print('*' * 100)\n print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')\n new_subj_areas = []\n for subject in old_subject_areas:\n print(f\"Searching for a match with '{subject}'\")\n match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()]\n if match:\n new_subj_areas.append(match[0].name)\n if match[0].name == subject:\n print(f'- Exact match with pre-existing subject area: {subject}')\n else:\n print(f'- Near match with pre-existing subject area: {subject}')\n else:\n if subject.strip() == subject:\n print(f\"- Unmatched subject area '{subject}' will remain unaltered\")\n new_subj_areas.append(subject)\n else:\n print(f\"- Unmatched subject area '{subject}' contains whitespace that will be stripped\")\n new_subj_areas.append(subject.strip())\n\n sas = ','.join(new_subj_areas)\n print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}')\n profile.subject_areas = f'{{{sas}}}'\n profile.save()\n\n profiles_without_sa = UserProfile.objects \\\n .filter(subject_areas='')\n for profile in profiles_without_sa:\n print(f'Updating {profile} from \"\" to {{}}')\n profile.subject_areas = '{}'\n profile.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('theme', '0022_alter_userprofile_subject_areas'),\n ]\n\n operations = [\n migrations.RunSQL(\"UPDATE theme_userprofile set subject_areas = NULL \"\n \"where theme_userprofile.subject_areas like '';\"),\n migrations.RunPython(migrate_csv_subject_areas),\n migrations.AlterField(\n model_name='userprofile',\n name='subject_areas',\n field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=1024), blank=True, help_text='A list of subject areas you are interested in researching. e.g. \"Water Management.\" Free text entry or select from the suggestions', null=True, size=None),\n ),\n ]\n", "path": "theme/migrations/0023_alter_userprofile_subject_areas.py"}]} | 1,394 | 898 |
gh_patches_debug_14475 | rasdani/github-patches | git_diff | pre-commit__pre-commit-2686 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ParserError exception raised for invalid configuration
### search you tried in the issue tracker
ParserError and unicode
### describe your issue
I executed `pre-commit autoupdate` with an invalid configuration file (the second `- repo` is indented incorrectly) and got this error message:
````
$ pre-commit autoupdate
An unexpected error has occurred: ParserError: while parsing a block mapping
in "<unicode string>", line 1, column 1
did not find expected key
in "<unicode string>", line 7, column 1
Check the log at /home/carsten/.cache/pre-commit/pre-commit.log
````
This is an expected error and I would expect an error message like `Your configuration file "..." is wrongly formatted at <pos>. Please review the format of the content.'.
Thank you,
Carsten
### pre-commit --version
pre-commit 2.21.0
### .pre-commit-config.yaml
```yaml
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-executables-have-shebangs
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.9.0.2
hooks:
- id: shellcheck
```
### ~/.cache/pre-commit/pre-commit.log (if present)
### version information
```
pre-commit version: 2.21.0
git --version: git version 2.35.3
sys.version:
3.10.8 (main, Oct 28 2022, 17:28:32) [GCC]
sys.executable: /home/carsten/virtualenv/bin/python3.10
os.name: posix
sys.platform: linux
```
### error information
```
An unexpected error has occurred: ParserError: while parsing a block mapping
in "<unicode string>", line 1, column 1
did not find expected key
in "<unicode string>", line 7, column 1
```
```
Traceback (most recent call last):
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/error_handler.py", line 73, in error_handler
yield
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/main.py", line 355, in main
return autoupdate(
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/autoupdate.py", line 154, in autoupdate
migrate_config(config_file, quiet=True)
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py", line 47, in migrate_config
contents = _migrate_map(contents)
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py", line 16, in _migrate_map
if isinstance(yaml_load(contents), list):
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/__init__.py", line 81, in load
return loader.get_single_data()
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/constructor.py", line 49, in get_single_data
node = self.get_single_node()
File "yaml/_yaml.pyx", line 673, in yaml._yaml.CParser.get_single_node
File "yaml/_yaml.pyx", line 687, in yaml._yaml.CParser._compose_document
File "yaml/_yaml.pyx", line 731, in yaml._yaml.CParser._compose_node
File "yaml/_yaml.pyx", line 847, in yaml._yaml.CParser._compose_mapping_node
File "yaml/_yaml.pyx", line 860, in yaml._yaml.CParser._parse_next_event
yaml.parser.ParserError: while parsing a block mapping
in "<unicode string>", line 1, column 1
did not find expected key
in "<unicode string>", line 7, column 1
```
</issue>
<code>
[start of pre_commit/commands/migrate_config.py]
1 from __future__ import annotations
2
3 import re
4 import textwrap
5
6 import yaml
7
8 from pre_commit.yaml import yaml_load
9
10
11 def _is_header_line(line: str) -> bool:
12 return line.startswith(('#', '---')) or not line.strip()
13
14
15 def _migrate_map(contents: str) -> str:
16 if isinstance(yaml_load(contents), list):
17 # Find the first non-header line
18 lines = contents.splitlines(True)
19 i = 0
20 # Only loop on non empty configuration file
21 while i < len(lines) and _is_header_line(lines[i]):
22 i += 1
23
24 header = ''.join(lines[:i])
25 rest = ''.join(lines[i:])
26
27 # If they are using the "default" flow style of yaml, this operation
28 # will yield a valid configuration
29 try:
30 trial_contents = f'{header}repos:\n{rest}'
31 yaml_load(trial_contents)
32 contents = trial_contents
33 except yaml.YAMLError:
34 contents = f'{header}repos:\n{textwrap.indent(rest, " " * 4)}'
35
36 return contents
37
38
39 def _migrate_sha_to_rev(contents: str) -> str:
40 return re.sub(r'(\n\s+)sha:', r'\1rev:', contents)
41
42
43 def migrate_config(config_file: str, quiet: bool = False) -> int:
44 with open(config_file) as f:
45 orig_contents = contents = f.read()
46
47 contents = _migrate_map(contents)
48 contents = _migrate_sha_to_rev(contents)
49
50 if contents != orig_contents:
51 with open(config_file, 'w') as f:
52 f.write(contents)
53
54 print('Configuration has been migrated.')
55 elif not quiet:
56 print('Configuration is already migrated.')
57 return 0
58
[end of pre_commit/commands/migrate_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py
--- a/pre_commit/commands/migrate_config.py
+++ b/pre_commit/commands/migrate_config.py
@@ -3,8 +3,10 @@
import re
import textwrap
+import cfgv
import yaml
+from pre_commit.clientlib import InvalidConfigError
from pre_commit.yaml import yaml_load
@@ -44,6 +46,13 @@
with open(config_file) as f:
orig_contents = contents = f.read()
+ with cfgv.reraise_as(InvalidConfigError):
+ with cfgv.validate_context(f'File {config_file}'):
+ try:
+ yaml_load(orig_contents)
+ except Exception as e:
+ raise cfgv.ValidationError(str(e))
+
contents = _migrate_map(contents)
contents = _migrate_sha_to_rev(contents)
| {"golden_diff": "diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py\n--- a/pre_commit/commands/migrate_config.py\n+++ b/pre_commit/commands/migrate_config.py\n@@ -3,8 +3,10 @@\n import re\n import textwrap\n \n+import cfgv\n import yaml\n \n+from pre_commit.clientlib import InvalidConfigError\n from pre_commit.yaml import yaml_load\n \n \n@@ -44,6 +46,13 @@\n with open(config_file) as f:\n orig_contents = contents = f.read()\n \n+ with cfgv.reraise_as(InvalidConfigError):\n+ with cfgv.validate_context(f'File {config_file}'):\n+ try:\n+ yaml_load(orig_contents)\n+ except Exception as e:\n+ raise cfgv.ValidationError(str(e))\n+\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n", "issue": "ParserError exception raised for invalid configuration\n### search you tried in the issue tracker\r\n\r\nParserError and unicode\r\n\r\n### describe your issue\r\n\r\nI executed `pre-commit autoupdate` with an invalid configuration file (the second `- repo` is indented incorrectly) and got this error message:\r\n\r\n````\r\n$ pre-commit autoupdate\r\nAn unexpected error has occurred: ParserError: while parsing a block mapping\r\n in \"<unicode string>\", line 1, column 1\r\ndid not find expected key\r\n in \"<unicode string>\", line 7, column 1\r\nCheck the log at /home/carsten/.cache/pre-commit/pre-commit.log\r\n````\r\n\r\nThis is an expected error and I would expect an error message like `Your configuration file \"...\" is wrongly formatted at <pos>. Please review the format of the content.'.\r\n\r\nThank you,\r\nCarsten\r\n\r\n### pre-commit --version\r\n\r\npre-commit 2.21.0\r\n\r\n### .pre-commit-config.yaml\r\n\r\n```yaml\r\nrepos:\r\n - repo: https://github.com/pre-commit/pre-commit-hooks\r\n rev: v4.4.0\r\n hooks:\r\n - id: check-executables-have-shebangs\r\n\r\n- repo: https://github.com/shellcheck-py/shellcheck-py\r\n rev: v0.9.0.2\r\n hooks:\r\n - id: shellcheck\r\n```\r\n\r\n\r\n### ~/.cache/pre-commit/pre-commit.log (if present)\r\n\r\n### version information\r\n\r\n```\r\npre-commit version: 2.21.0\r\ngit --version: git version 2.35.3\r\nsys.version:\r\n 3.10.8 (main, Oct 28 2022, 17:28:32) [GCC]\r\nsys.executable: /home/carsten/virtualenv/bin/python3.10\r\nos.name: posix\r\nsys.platform: linux\r\n```\r\n\r\n### error information\r\n\r\n```\r\nAn unexpected error has occurred: ParserError: while parsing a block mapping\r\n in \"<unicode string>\", line 1, column 1\r\ndid not find expected key\r\n in \"<unicode string>\", line 7, column 1\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/error_handler.py\", line 73, in error_handler\r\n yield\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/main.py\", line 355, in main\r\n return autoupdate(\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/autoupdate.py\", line 154, in autoupdate\r\n migrate_config(config_file, quiet=True)\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py\", line 47, in migrate_config\r\n contents = _migrate_map(contents)\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py\", line 16, in _migrate_map\r\n if isinstance(yaml_load(contents), list):\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/__init__.py\", line 81, in load\r\n return loader.get_single_data()\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/constructor.py\", line 49, in get_single_data\r\n node = self.get_single_node()\r\n File \"yaml/_yaml.pyx\", line 673, in yaml._yaml.CParser.get_single_node\r\n File \"yaml/_yaml.pyx\", line 687, in yaml._yaml.CParser._compose_document\r\n File \"yaml/_yaml.pyx\", line 731, in yaml._yaml.CParser._compose_node\r\n File \"yaml/_yaml.pyx\", line 847, in yaml._yaml.CParser._compose_mapping_node\r\n File \"yaml/_yaml.pyx\", line 860, in yaml._yaml.CParser._parse_next_event\r\nyaml.parser.ParserError: while parsing a block mapping\r\n in \"<unicode string>\", line 1, column 1\r\ndid not find expected key\r\n in \"<unicode string>\", line 7, column 1\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\nimport textwrap\n\nimport yaml\n\nfrom pre_commit.yaml import yaml_load\n\n\ndef _is_header_line(line: str) -> bool:\n return line.startswith(('#', '---')) or not line.strip()\n\n\ndef _migrate_map(contents: str) -> str:\n if isinstance(yaml_load(contents), list):\n # Find the first non-header line\n lines = contents.splitlines(True)\n i = 0\n # Only loop on non empty configuration file\n while i < len(lines) and _is_header_line(lines[i]):\n i += 1\n\n header = ''.join(lines[:i])\n rest = ''.join(lines[i:])\n\n # If they are using the \"default\" flow style of yaml, this operation\n # will yield a valid configuration\n try:\n trial_contents = f'{header}repos:\\n{rest}'\n yaml_load(trial_contents)\n contents = trial_contents\n except yaml.YAMLError:\n contents = f'{header}repos:\\n{textwrap.indent(rest, \" \" * 4)}'\n\n return contents\n\n\ndef _migrate_sha_to_rev(contents: str) -> str:\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n\n\ndef migrate_config(config_file: str, quiet: bool = False) -> int:\n with open(config_file) as f:\n orig_contents = contents = f.read()\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n\n if contents != orig_contents:\n with open(config_file, 'w') as f:\n f.write(contents)\n\n print('Configuration has been migrated.')\n elif not quiet:\n print('Configuration is already migrated.')\n return 0\n", "path": "pre_commit/commands/migrate_config.py"}]} | 1,983 | 200 |
gh_patches_debug_11999 | rasdani/github-patches | git_diff | chainer__chainer-249 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add type check to LeakyReLU function
Related to #123
</issue>
<code>
[start of chainer/functions/leaky_relu.py]
1 from chainer import cuda
2 from chainer import function
3
4
5 def _kern():
6 return cuda.elementwise(
7 'float* y, const float* cond, const float* x, float slope',
8 'y[i] = cond[i] >= 0 ? x[i] : slope * x[i]', 'lrelu')
9
10
11 class LeakyReLU(function.Function):
12
13 """Leaky rectifier unit."""
14
15 def __init__(self, slope=0.2):
16 self.slope = slope
17
18 def forward_cpu(self, x):
19 y = x[0].copy()
20 y[x[0] < 0] *= self.slope
21 return y,
22
23 def forward_gpu(self, x):
24 y = cuda.empty_like(x[0])
25 _kern()(y, x[0], x[0], self.slope)
26 return y,
27
28 def backward_cpu(self, x, gy):
29 gx = gy[0].copy()
30 gx[x[0] < 0] *= self.slope
31 return gx,
32
33 def backward_gpu(self, x, gy):
34 gx = cuda.empty_like(x[0])
35 _kern()(gx, x[0], gy[0], self.slope)
36 return gx,
37
38
39 def leaky_relu(x, slope=0.2):
40 """Leaky Rectified Linear Unit function.
41
42 This function is expressed as :math:`f(x) = \max(x, ax)`, where :math:`a`
43 is a configurable slope value.
44
45 Args:
46 x (~chainer.Variable): Input variable.
47 slope (float): Slope value :math:`a`.
48
49 Returns:
50 ~chainer.Variable: Output variable.
51
52 """
53 return LeakyReLU(slope)(x)
54
[end of chainer/functions/leaky_relu.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/functions/leaky_relu.py b/chainer/functions/leaky_relu.py
--- a/chainer/functions/leaky_relu.py
+++ b/chainer/functions/leaky_relu.py
@@ -1,5 +1,8 @@
+import numpy
+
from chainer import cuda
from chainer import function
+from chainer.utils import type_check
def _kern():
@@ -15,6 +18,13 @@
def __init__(self, slope=0.2):
self.slope = slope
+ def check_type_forward(self, in_types):
+ type_check.expect(in_types.size() == 1)
+ x_type, = in_types
+ type_check.expect(
+ x_type.dtype == numpy.float32,
+ )
+
def forward_cpu(self, x):
y = x[0].copy()
y[x[0] < 0] *= self.slope
| {"golden_diff": "diff --git a/chainer/functions/leaky_relu.py b/chainer/functions/leaky_relu.py\n--- a/chainer/functions/leaky_relu.py\n+++ b/chainer/functions/leaky_relu.py\n@@ -1,5 +1,8 @@\n+import numpy\n+\n from chainer import cuda\n from chainer import function\n+from chainer.utils import type_check\n \n \n def _kern():\n@@ -15,6 +18,13 @@\n def __init__(self, slope=0.2):\n self.slope = slope\n \n+ def check_type_forward(self, in_types):\n+ type_check.expect(in_types.size() == 1)\n+ x_type, = in_types\n+ type_check.expect(\n+ x_type.dtype == numpy.float32,\n+ )\n+\n def forward_cpu(self, x):\n y = x[0].copy()\n y[x[0] < 0] *= self.slope\n", "issue": "Add type check to LeakyReLU function\nRelated to #123 \n\n", "before_files": [{"content": "from chainer import cuda\nfrom chainer import function\n\n\ndef _kern():\n return cuda.elementwise(\n 'float* y, const float* cond, const float* x, float slope',\n 'y[i] = cond[i] >= 0 ? x[i] : slope * x[i]', 'lrelu')\n\n\nclass LeakyReLU(function.Function):\n\n \"\"\"Leaky rectifier unit.\"\"\"\n\n def __init__(self, slope=0.2):\n self.slope = slope\n\n def forward_cpu(self, x):\n y = x[0].copy()\n y[x[0] < 0] *= self.slope\n return y,\n\n def forward_gpu(self, x):\n y = cuda.empty_like(x[0])\n _kern()(y, x[0], x[0], self.slope)\n return y,\n\n def backward_cpu(self, x, gy):\n gx = gy[0].copy()\n gx[x[0] < 0] *= self.slope\n return gx,\n\n def backward_gpu(self, x, gy):\n gx = cuda.empty_like(x[0])\n _kern()(gx, x[0], gy[0], self.slope)\n return gx,\n\n\ndef leaky_relu(x, slope=0.2):\n \"\"\"Leaky Rectified Linear Unit function.\n\n This function is expressed as :math:`f(x) = \\max(x, ax)`, where :math:`a`\n is a configurable slope value.\n\n Args:\n x (~chainer.Variable): Input variable.\n slope (float): Slope value :math:`a`.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return LeakyReLU(slope)(x)\n", "path": "chainer/functions/leaky_relu.py"}]} | 1,039 | 206 |
gh_patches_debug_1427 | rasdani/github-patches | git_diff | saleor__saleor-340 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move py.test config to tox.ini
Pytest (like many tools) can read its configuration from `tox.ini`. There is no need to keep a separate `pytest.ini`.
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2 from setuptools import setup, find_packages
3 from setuptools.command.test import test as TestCommand
4 import os
5 import sys
6
7 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saleor.settings')
8
9
10 class PyTest(TestCommand):
11 user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
12
13 def initialize_options(self):
14 TestCommand.initialize_options(self)
15 self.pytest_args = []
16
17 def finalize_options(self):
18 TestCommand.finalize_options(self)
19 self.test_args = []
20 self.test_suite = True
21
22 def run_tests(self):
23 #import here, cause outside the eggs aren't loaded
24 import pytest
25 errno = pytest.main(self.pytest_args)
26 sys.exit(errno)
27
28
29 setup(
30 name='saleor',
31 author='Mirumee Software',
32 author_email='[email protected]',
33 description="A fork'n'play e-commerce in Django",
34 license='BSD',
35 version='0.1.0a0',
36 url='http://getsaleor.com/',
37 packages=find_packages(),
38 include_package_data=True,
39 install_requires=[
40 'Babel>=1.3,<1.4a0',
41 'BabelDjango>=0.2,<0.3a0',
42 'Django>=1.8',
43 'dj_database_url>=0.3.0',
44 'django-emailit>=0.2.2',
45 'django-materializecss-form==0.0.64',
46 'django-model-utils>=2.0.0,<2.1a0',
47 'django-mptt>=0.7.1',
48 'django-offsite-storage>=0.0.5',
49 'django-payments>=0.7.0,<0.8a0',
50 'django-prices>=0.4.0,<0.5a0',
51 'djangorestframework>=3.1,<3.2a0',
52 'django-selectable==0.8.0',
53 'django-versatileimagefield>=1.0.1,<1.1a0',
54 'fake-factory>=0.3.2',
55 'google-measurement-protocol>=0.1.2,<0.2a0',
56 'jsonfield>=1.0.3',
57 'Markdown>=2.4',
58 'prices>=0.5,<0.6a0',
59 'requests>=1.2.0',
60 'satchless>=1.1.2,<1.2a0',
61 'unidecode'
62 ],
63 extras_require={
64 'PaaS': [
65 'whitenoise==1.0.6',
66 'gunicorn==19.2.1',
67 'psycopg2==2.6']},
68 cmdclass={
69 'test': PyTest},
70 entry_points={
71 'console_scripts': ['saleor = saleor:manage']},
72 tests_require=[
73 'mock==1.0.1',
74 'purl>=0.4.1',
75 'pytest',
76 'pytest-django'])
77
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -70,7 +70,7 @@
entry_points={
'console_scripts': ['saleor = saleor:manage']},
tests_require=[
- 'mock==1.0.1',
+ 'mock==1.3.0',
'purl>=0.4.1',
'pytest',
'pytest-django'])
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -70,7 +70,7 @@\n entry_points={\n 'console_scripts': ['saleor = saleor:manage']},\n tests_require=[\n- 'mock==1.0.1',\n+ 'mock==1.3.0',\n 'purl>=0.4.1',\n 'pytest',\n 'pytest-django'])\n", "issue": "Move py.test config to tox.ini\nPytest (like many tools) can read its configuration from `tox.ini`. There is no need to keep a separate `pytest.ini`.\n\n", "before_files": [{"content": "#! /usr/bin/env python\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\nimport os\nimport sys\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saleor.settings')\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(\n name='saleor',\n author='Mirumee Software',\n author_email='[email protected]',\n description=\"A fork'n'play e-commerce in Django\",\n license='BSD',\n version='0.1.0a0',\n url='http://getsaleor.com/',\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'Babel>=1.3,<1.4a0',\n 'BabelDjango>=0.2,<0.3a0',\n 'Django>=1.8',\n 'dj_database_url>=0.3.0',\n 'django-emailit>=0.2.2',\n 'django-materializecss-form==0.0.64',\n 'django-model-utils>=2.0.0,<2.1a0',\n 'django-mptt>=0.7.1',\n 'django-offsite-storage>=0.0.5',\n 'django-payments>=0.7.0,<0.8a0',\n 'django-prices>=0.4.0,<0.5a0',\n 'djangorestframework>=3.1,<3.2a0',\n 'django-selectable==0.8.0',\n 'django-versatileimagefield>=1.0.1,<1.1a0',\n 'fake-factory>=0.3.2',\n 'google-measurement-protocol>=0.1.2,<0.2a0',\n 'jsonfield>=1.0.3',\n 'Markdown>=2.4',\n 'prices>=0.5,<0.6a0',\n 'requests>=1.2.0',\n 'satchless>=1.1.2,<1.2a0',\n 'unidecode'\n ],\n extras_require={\n 'PaaS': [\n 'whitenoise==1.0.6',\n 'gunicorn==19.2.1',\n 'psycopg2==2.6']},\n cmdclass={\n 'test': PyTest},\n entry_points={\n 'console_scripts': ['saleor = saleor:manage']},\n tests_require=[\n 'mock==1.0.1',\n 'purl>=0.4.1',\n 'pytest',\n 'pytest-django'])\n", "path": "setup.py"}]} | 1,378 | 98 |
gh_patches_debug_2789 | rasdani/github-patches | git_diff | ivy-llc__ivy-18204 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
meshgrid
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/creation.py]
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
4 from .tensor import Tensor
5 from ivy.functional.frontends.paddle.func_wrapper import (
6 to_ivy_arrays_and_back,
7 )
8
9
10 @to_ivy_arrays_and_back
11 def to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):
12 array = ivy.array(data, dtype=dtype, device=place)
13 return Tensor(array, dtype=dtype, place=place)
14
15
16 @with_unsupported_dtypes({"2.5.0 and below": "int8"}, "paddle")
17 @to_ivy_arrays_and_back
18 def ones(shape, /, *, dtype=None, name=None):
19 dtype = "float32" if dtype is None else dtype
20 return ivy.ones(shape, dtype=dtype)
21
22
23 @with_unsupported_dtypes(
24 {"2.5.0 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle"
25 )
26 @to_ivy_arrays_and_back
27 def ones_like(x, /, *, dtype=None, name=None):
28 dtype = x.dtype if dtype is None else dtype
29 return ivy.ones_like(x, dtype=dtype)
30
31
32 @with_unsupported_dtypes({"2.5.0 and below": "int8"}, "paddle")
33 @to_ivy_arrays_and_back
34 def zeros(shape, /, *, dtype=None, name=None):
35 dtype = "float32" if dtype is None else dtype
36 return ivy.zeros(shape, dtype=dtype)
37
38
39 @with_unsupported_dtypes(
40 {"2.5.0 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle"
41 )
42 @to_ivy_arrays_and_back
43 def zeros_like(x, /, *, dtype=None, name=None):
44 dtype = x.dtype if dtype is None else dtype
45 return ivy.zeros_like(x, dtype=dtype)
46
47
48 @to_ivy_arrays_and_back
49 def full(shape, fill_value, /, *, dtype=None, name=None):
50 dtype = "float32" if dtype is None else dtype
51 return ivy.full(shape, fill_value, dtype=dtype)
52
53
54 @to_ivy_arrays_and_back
55 def full_like(x, fill_value, /, *, dtype=None, name=None):
56 dtype = x.dtype if dtype is None else dtype
57 return ivy.full_like(x, fill_value, dtype=dtype)
58
59
60 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
61 @to_ivy_arrays_and_back
62 def arange(start, end=None, step=1, dtype=None, name=None):
63 return ivy.arange(start, end, step=step, dtype=dtype)
64
65
66 @to_ivy_arrays_and_back
67 def empty(shape, dtype=None):
68 return ivy.empty(shape=shape, dtype=dtype)
69
70
71 @to_ivy_arrays_and_back
72 def eye(num_rows, num_columns=None, dtype=None, name=None):
73 return ivy.eye(num_rows, num_columns, dtype=dtype)
74
75
76 @to_ivy_arrays_and_back
77 def empty_like(x, dtype=None, name=None):
78 return ivy.empty_like(x, dtype=dtype)
79
80
81 @with_unsupported_dtypes(
82 {
83 "2.5.0 and below": (
84 "uint8",
85 "int8",
86 "int16",
87 "float16",
88 "complex64",
89 "complex128",
90 "bool",
91 )
92 },
93 "paddle",
94 )
95 @to_ivy_arrays_and_back
96 def tril(x, diagonal=0, name=None):
97 return ivy.tril(x, k=diagonal)
98
99
100 @with_unsupported_dtypes(
101 {
102 "2.5.0 and below": (
103 "uint8",
104 "int8",
105 "int16",
106 "float16",
107 "complex64",
108 "complex128",
109 "bool",
110 )
111 },
112 "paddle",
113 )
114 @to_ivy_arrays_and_back
115 def triu(x, diagonal=0, name=None):
116 return ivy.triu(x, k=diagonal)
117
118
119 @with_supported_dtypes(
120 {"2.5.0 and below": ("float32", "float64", "int32", "int64")}, "paddle"
121 )
122 @to_ivy_arrays_and_back
123 def diagflat(x, offset=0, name=None):
124 arr = ivy.diagflat(x, offset=offset)
125 return arr
126
[end of ivy/functional/frontends/paddle/tensor/creation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py
--- a/ivy/functional/frontends/paddle/tensor/creation.py
+++ b/ivy/functional/frontends/paddle/tensor/creation.py
@@ -123,3 +123,11 @@
def diagflat(x, offset=0, name=None):
arr = ivy.diagflat(x, offset=offset)
return arr
+
+
+@with_supported_dtypes(
+ {"2.5.0 and below": ("float32", "float64", "int32", "int64")}, "paddle"
+)
+@to_ivy_arrays_and_back
+def meshgrid(*args, **kwargs):
+ return ivy.meshgrid(*args, indexing="ij")
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py\n--- a/ivy/functional/frontends/paddle/tensor/creation.py\n+++ b/ivy/functional/frontends/paddle/tensor/creation.py\n@@ -123,3 +123,11 @@\n def diagflat(x, offset=0, name=None):\r\n arr = ivy.diagflat(x, offset=offset)\r\n return arr\r\n+\r\n+\r\n+@with_supported_dtypes(\r\n+ {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n+)\r\n+@to_ivy_arrays_and_back\r\n+def meshgrid(*args, **kwargs):\r\n+ return ivy.meshgrid(*args, indexing=\"ij\")\n", "issue": "meshgrid\n\n", "before_files": [{"content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty_like(x, dtype=None, name=None):\r\n return ivy.empty_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef tril(x, diagonal=0, name=None):\r\n return ivy.tril(x, k=diagonal)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef triu(x, diagonal=0, name=None):\r\n return ivy.triu(x, k=diagonal)\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef diagflat(x, offset=0, name=None):\r\n arr = ivy.diagflat(x, offset=offset)\r\n return arr\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py"}]} | 1,834 | 191 |
gh_patches_debug_2003 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-9604 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release google-cloud-storage
Hi @tseaver, could you help cut a release for google-cloud-storage?
cc: @JesseLovelace
</issue>
<code>
[start of storage/setup.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-storage"
24 description = "Google Cloud Storage API client library"
25 version = "1.21.0"
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "google-auth >= 1.2.0",
33 "google-cloud-core >= 1.0.3, < 2.0dev",
34 "google-resumable-media >= 0.3.1, != 0.4.0, < 0.5dev",
35 ]
36 extras = {}
37
38
39 # Setup boilerplate below this line.
40
41 package_root = os.path.abspath(os.path.dirname(__file__))
42
43 readme_filename = os.path.join(package_root, "README.rst")
44 with io.open(readme_filename, encoding="utf-8") as readme_file:
45 readme = readme_file.read()
46
47 # Only include packages under the 'google' namespace. Do not include tests,
48 # benchmarks, etc.
49 packages = [
50 package for package in setuptools.find_packages() if package.startswith("google")
51 ]
52
53 # Determine which namespaces are needed.
54 namespaces = ["google"]
55 if "google.cloud" in packages:
56 namespaces.append("google.cloud")
57
58
59 setuptools.setup(
60 name=name,
61 version=version,
62 description=description,
63 long_description=readme,
64 author="Google LLC",
65 author_email="[email protected]",
66 license="Apache 2.0",
67 url="https://github.com/GoogleCloudPlatform/google-cloud-python",
68 classifiers=[
69 release_status,
70 "Intended Audience :: Developers",
71 "License :: OSI Approved :: Apache Software License",
72 "Programming Language :: Python",
73 "Programming Language :: Python :: 2",
74 "Programming Language :: Python :: 2.7",
75 "Programming Language :: Python :: 3",
76 "Programming Language :: Python :: 3.5",
77 "Programming Language :: Python :: 3.6",
78 "Programming Language :: Python :: 3.7",
79 "Operating System :: OS Independent",
80 "Topic :: Internet",
81 ],
82 platforms="Posix; MacOS X; Windows",
83 packages=packages,
84 namespace_packages=namespaces,
85 install_requires=dependencies,
86 extras_require=extras,
87 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
88 include_package_data=True,
89 zip_safe=False,
90 )
91
[end of storage/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/storage/setup.py b/storage/setup.py
--- a/storage/setup.py
+++ b/storage/setup.py
@@ -22,7 +22,7 @@
name = "google-cloud-storage"
description = "Google Cloud Storage API client library"
-version = "1.21.0"
+version = "1.22.0"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
| {"golden_diff": "diff --git a/storage/setup.py b/storage/setup.py\n--- a/storage/setup.py\n+++ b/storage/setup.py\n@@ -22,7 +22,7 @@\n \n name = \"google-cloud-storage\"\n description = \"Google Cloud Storage API client library\"\n-version = \"1.21.0\"\n+version = \"1.22.0\"\n # Should be one of:\n # 'Development Status :: 3 - Alpha'\n # 'Development Status :: 4 - Beta'\n", "issue": "Release google-cloud-storage \nHi @tseaver, could you help cut a release for google-cloud-storage?\r\n\r\ncc: @JesseLovelace \n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-storage\"\ndescription = \"Google Cloud Storage API client library\"\nversion = \"1.21.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-auth >= 1.2.0\",\n \"google-cloud-core >= 1.0.3, < 2.0dev\",\n \"google-resumable-media >= 0.3.1, != 0.4.0, < 0.5dev\",\n]\nextras = {}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/GoogleCloudPlatform/google-cloud-python\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "storage/setup.py"}]} | 1,424 | 102 |
gh_patches_debug_318 | rasdani/github-patches | git_diff | TencentBlueKing__bk-user-164 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
部门查询接口 ?lookup_field=name,当部门名称中含有 "." 时返回 404
**用文字描述你遇到的问题**
请用简练的文字描述你遇到的问题,问题描述的清晰程度决定了问题被解决的效率。
**重现方法**
1. 创建一个目录,名字包含点,如【广东省.深圳市】
2. 使用api查询, http:://{host:port}/api/v2/departments/广东省.深圳市/?lookup_field=name
查询结果是404
请描述问题重现的方法,如果不方便描述,可以通过截图或者视频辅助。
**预期行为**
预期的正常行为
**版本**
- 提供用户管理的具体版本号
- 是否是企业版问题?
**如果是 SaaS 页面问题,请提供使用的操作系统和浏览器信息**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**额外信息**
任何你觉得有助于问题解决的内容
</issue>
<code>
[start of src/api/bkuser_core/departments/urls.py]
1 # -*- coding: utf-8 -*-
2 """
3 TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
4 Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
5 Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at http://opensource.org/licenses/MIT
7 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
8 an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
9 specific language governing permissions and limitations under the License.
10 """
11 from bkuser_core.common.constants import LOOKUP_FIELD_NAME
12 from django.conf.urls import url
13
14 from . import views
15
16 PVAR_DEPARTMENT_ID = r"(?P<%s>[\w\-]+)" % LOOKUP_FIELD_NAME
17
18 urlpatterns = [
19 url(
20 r"^api/v2/departments/$",
21 views.DepartmentViewSet.as_view(
22 {
23 "get": "list",
24 "post": "create",
25 }
26 ),
27 name="departments",
28 ),
29 url(
30 r"^api/v2/departments/%s/$" % PVAR_DEPARTMENT_ID,
31 views.DepartmentViewSet.as_view(
32 {
33 "get": "retrieve",
34 "post": "update",
35 "delete": "destroy",
36 "patch": "partial_update",
37 }
38 ),
39 name="departments.action",
40 ),
41 url(
42 r"^api/v2/departments/%s/restoration/$" % PVAR_DEPARTMENT_ID,
43 views.DepartmentViewSet.as_view(
44 {
45 "post": "restoration",
46 }
47 ),
48 name="departments.restoration",
49 ),
50 url(
51 r"^api/v2/departments/%s/ancestors/$" % PVAR_DEPARTMENT_ID,
52 views.DepartmentViewSet.as_view(
53 {
54 "get": "get_ancestor",
55 }
56 ),
57 name="departments.ancestors",
58 ),
59 url(
60 r"^api/v2/departments/%s/children/$" % PVAR_DEPARTMENT_ID,
61 views.DepartmentViewSet.as_view(
62 {
63 "get": "get_children",
64 }
65 ),
66 name="departments.children",
67 ),
68 url(
69 r"^api/v2/departments/%s/profiles/$" % PVAR_DEPARTMENT_ID,
70 views.DepartmentViewSet.as_view({"get": "get_profiles", "post": "add_profiles"}),
71 name="departments.profiles",
72 ),
73 #########
74 # Batch #
75 #########
76 url(
77 r"^api/v2/batch/departments/profiles/$",
78 views.BatchDepartmentsViewSet.as_view(
79 {
80 "get": "multiple_retrieve_profiles",
81 }
82 ),
83 name="department.batch",
84 ),
85 ########
86 # Edge #
87 ########
88 url(
89 r"^api/v2/edges/department_profile/$",
90 views.DepartmentProfileEdgeViewSet.as_view({"get": "list"}),
91 name="edge.department_profile",
92 ),
93 #############
94 # shortcuts #
95 #############
96 url(
97 r"^api/v2/shortcuts/departments/tops/$",
98 views.DepartmentViewSet.as_view({"get": "list_tops"}),
99 name="shortcuts.departments.list.tops",
100 ),
101 ]
102
[end of src/api/bkuser_core/departments/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/api/bkuser_core/departments/urls.py b/src/api/bkuser_core/departments/urls.py
--- a/src/api/bkuser_core/departments/urls.py
+++ b/src/api/bkuser_core/departments/urls.py
@@ -13,7 +13,7 @@
from . import views
-PVAR_DEPARTMENT_ID = r"(?P<%s>[\w\-]+)" % LOOKUP_FIELD_NAME
+PVAR_DEPARTMENT_ID = r"(?P<%s>[\w\-\.]+)" % LOOKUP_FIELD_NAME
urlpatterns = [
url(
| {"golden_diff": "diff --git a/src/api/bkuser_core/departments/urls.py b/src/api/bkuser_core/departments/urls.py\n--- a/src/api/bkuser_core/departments/urls.py\n+++ b/src/api/bkuser_core/departments/urls.py\n@@ -13,7 +13,7 @@\n \n from . import views\n \n-PVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-]+)\" % LOOKUP_FIELD_NAME\n+PVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-\\.]+)\" % LOOKUP_FIELD_NAME\n \n urlpatterns = [\n url(\n", "issue": "\u90e8\u95e8\u67e5\u8be2\u63a5\u53e3 ?lookup_field=name\uff0c\u5f53\u90e8\u95e8\u540d\u79f0\u4e2d\u542b\u6709 \".\" \u65f6\u8fd4\u56de 404\n**\u7528\u6587\u5b57\u63cf\u8ff0\u4f60\u9047\u5230\u7684\u95ee\u9898**\r\n\r\n\u8bf7\u7528\u7b80\u7ec3\u7684\u6587\u5b57\u63cf\u8ff0\u4f60\u9047\u5230\u7684\u95ee\u9898\uff0c\u95ee\u9898\u63cf\u8ff0\u7684\u6e05\u6670\u7a0b\u5ea6\u51b3\u5b9a\u4e86\u95ee\u9898\u88ab\u89e3\u51b3\u7684\u6548\u7387\u3002\r\n\r\n**\u91cd\u73b0\u65b9\u6cd5**\r\n1. \u521b\u5efa\u4e00\u4e2a\u76ee\u5f55\uff0c\u540d\u5b57\u5305\u542b\u70b9\uff0c\u5982\u3010\u5e7f\u4e1c\u7701.\u6df1\u5733\u5e02\u3011\r\n2. \u4f7f\u7528api\u67e5\u8be2\uff0c http:://{host:port}/api/v2/departments/\u5e7f\u4e1c\u7701.\u6df1\u5733\u5e02/?lookup_field=name\r\n\r\n\u67e5\u8be2\u7ed3\u679c\u662f404\r\n\r\n\u8bf7\u63cf\u8ff0\u95ee\u9898\u91cd\u73b0\u7684\u65b9\u6cd5\uff0c\u5982\u679c\u4e0d\u65b9\u4fbf\u63cf\u8ff0\uff0c\u53ef\u4ee5\u901a\u8fc7\u622a\u56fe\u6216\u8005\u89c6\u9891\u8f85\u52a9\u3002\r\n\r\n**\u9884\u671f\u884c\u4e3a**\r\n\r\n\u9884\u671f\u7684\u6b63\u5e38\u884c\u4e3a\r\n\r\n**\u7248\u672c**\r\n- \u63d0\u4f9b\u7528\u6237\u7ba1\u7406\u7684\u5177\u4f53\u7248\u672c\u53f7\r\n- \u662f\u5426\u662f\u4f01\u4e1a\u7248\u95ee\u9898\uff1f\r\n\r\n**\u5982\u679c\u662f SaaS \u9875\u9762\u95ee\u9898\uff0c\u8bf7\u63d0\u4f9b\u4f7f\u7528\u7684\u64cd\u4f5c\u7cfb\u7edf\u548c\u6d4f\u89c8\u5668\u4fe1\u606f**\r\n - OS: [e.g. iOS]\r\n - Browser [e.g. chrome, safari]\r\n - Version [e.g. 22]\r\n\r\n**\u989d\u5916\u4fe1\u606f**\r\n\r\n\u4efb\u4f55\u4f60\u89c9\u5f97\u6709\u52a9\u4e8e\u95ee\u9898\u89e3\u51b3\u7684\u5185\u5bb9\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91-\u7528\u6237\u7ba1\u7406(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nfrom bkuser_core.common.constants import LOOKUP_FIELD_NAME\nfrom django.conf.urls import url\n\nfrom . import views\n\nPVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-]+)\" % LOOKUP_FIELD_NAME\n\nurlpatterns = [\n url(\n r\"^api/v2/departments/$\",\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"list\",\n \"post\": \"create\",\n }\n ),\n name=\"departments\",\n ),\n url(\n r\"^api/v2/departments/%s/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"retrieve\",\n \"post\": \"update\",\n \"delete\": \"destroy\",\n \"patch\": \"partial_update\",\n }\n ),\n name=\"departments.action\",\n ),\n url(\n r\"^api/v2/departments/%s/restoration/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"post\": \"restoration\",\n }\n ),\n name=\"departments.restoration\",\n ),\n url(\n r\"^api/v2/departments/%s/ancestors/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_ancestor\",\n }\n ),\n name=\"departments.ancestors\",\n ),\n url(\n r\"^api/v2/departments/%s/children/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_children\",\n }\n ),\n name=\"departments.children\",\n ),\n url(\n r\"^api/v2/departments/%s/profiles/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view({\"get\": \"get_profiles\", \"post\": \"add_profiles\"}),\n name=\"departments.profiles\",\n ),\n #########\n # Batch #\n #########\n url(\n r\"^api/v2/batch/departments/profiles/$\",\n views.BatchDepartmentsViewSet.as_view(\n {\n \"get\": \"multiple_retrieve_profiles\",\n }\n ),\n name=\"department.batch\",\n ),\n ########\n # Edge #\n ########\n url(\n r\"^api/v2/edges/department_profile/$\",\n views.DepartmentProfileEdgeViewSet.as_view({\"get\": \"list\"}),\n name=\"edge.department_profile\",\n ),\n #############\n # shortcuts #\n #############\n url(\n r\"^api/v2/shortcuts/departments/tops/$\",\n views.DepartmentViewSet.as_view({\"get\": \"list_tops\"}),\n name=\"shortcuts.departments.list.tops\",\n ),\n]\n", "path": "src/api/bkuser_core/departments/urls.py"}]} | 1,690 | 137 |
gh_patches_debug_2253 | rasdani/github-patches | git_diff | coala__coala-bears-1082 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GofmtBear: Add advanced asciinema
The coala bear GofmtBear does not have a proper asciinema.
`gofmt` is a command line tool that automatically solves formatting / styling issues to the absolute coding style that Go has.
I'm planning to use a working code as the sample where it's filled with mixed indentation (spaces and tabs), semicolons and demonstrate how gofmt formats the code automatically and correctly to the absolute Go coding style.
</issue>
<code>
[start of bears/go/GofmtBear.py]
1 from coalib.bearlib.abstractions.Linter import linter
2 from coalib.bears.requirements.GoRequirement import GoRequirement
3
4
5 @linter(executable='gofmt',
6 use_stdin=True,
7 output_format='corrected',
8 result_message='Formatting can be improved.')
9 class GofmtBear:
10 """
11 Suggest better formatting options in Go code. Basic checks like alignment,
12 indentation, and redundant parentheses are provided.
13
14 This is done using the ``gofmt`` utility. For more information visit
15 <https://golang.org/cmd/gofmt/>.
16 """
17 LANGUAGES = {'Go'}
18 REQUIREMENTS = {GoRequirement(package='golang.org/cmd/gofmt', flag='-u')}
19 AUTHORS = {'The coala developers'}
20 AUTHORS_EMAILS = {'[email protected]'}
21 LICENSE = 'AGPL-3.0'
22 CAN_FIX = {'Formatting'}
23
24 @staticmethod
25 def create_arguments(filename, file, config_file):
26 return ()
27
[end of bears/go/GofmtBear.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bears/go/GofmtBear.py b/bears/go/GofmtBear.py
--- a/bears/go/GofmtBear.py
+++ b/bears/go/GofmtBear.py
@@ -20,6 +20,7 @@
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL-3.0'
CAN_FIX = {'Formatting'}
+ ASCIINEMA_URL = 'https://asciinema.org/a/94812'
@staticmethod
def create_arguments(filename, file, config_file):
| {"golden_diff": "diff --git a/bears/go/GofmtBear.py b/bears/go/GofmtBear.py\n--- a/bears/go/GofmtBear.py\n+++ b/bears/go/GofmtBear.py\n@@ -20,6 +20,7 @@\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_FIX = {'Formatting'}\n+ ASCIINEMA_URL = 'https://asciinema.org/a/94812'\n \n @staticmethod\n def create_arguments(filename, file, config_file):\n", "issue": "GofmtBear: Add advanced asciinema\nThe coala bear GofmtBear does not have a proper asciinema.\r\n\r\n`gofmt` is a command line tool that automatically solves formatting / styling issues to the absolute coding style that Go has.\r\n\r\nI'm planning to use a working code as the sample where it's filled with mixed indentation (spaces and tabs), semicolons and demonstrate how gofmt formats the code automatically and correctly to the absolute Go coding style.\r\n\n", "before_files": [{"content": "from coalib.bearlib.abstractions.Linter import linter\nfrom coalib.bears.requirements.GoRequirement import GoRequirement\n\n\n@linter(executable='gofmt',\n use_stdin=True,\n output_format='corrected',\n result_message='Formatting can be improved.')\nclass GofmtBear:\n \"\"\"\n Suggest better formatting options in Go code. Basic checks like alignment,\n indentation, and redundant parentheses are provided.\n\n This is done using the ``gofmt`` utility. For more information visit\n <https://golang.org/cmd/gofmt/>.\n \"\"\"\n LANGUAGES = {'Go'}\n REQUIREMENTS = {GoRequirement(package='golang.org/cmd/gofmt', flag='-u')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_FIX = {'Formatting'}\n\n @staticmethod\n def create_arguments(filename, file, config_file):\n return ()\n", "path": "bears/go/GofmtBear.py"}]} | 901 | 129 |
gh_patches_debug_9694 | rasdani/github-patches | git_diff | pretix__pretix-883 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
rich_text: tel schema
When providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.
rich_text: tel schema
When providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.
</issue>
<code>
[start of src/pretix/base/templatetags/rich_text.py]
1 import urllib.parse
2
3 import bleach
4 import markdown
5 from bleach import DEFAULT_CALLBACKS
6 from django import template
7 from django.conf import settings
8 from django.core import signing
9 from django.urls import reverse
10 from django.utils.http import is_safe_url
11 from django.utils.safestring import mark_safe
12
13 register = template.Library()
14
15 ALLOWED_TAGS = [
16 'a',
17 'abbr',
18 'acronym',
19 'b',
20 'blockquote',
21 'br',
22 'code',
23 'em',
24 'i',
25 'li',
26 'ol',
27 'strong',
28 'ul',
29 'p',
30 'table',
31 'tbody',
32 'thead',
33 'tr',
34 'td',
35 'th',
36 'div',
37 'span',
38 'hr',
39 'h1',
40 'h2',
41 'h3',
42 'h4',
43 'h5',
44 'h6',
45 'pre',
46 # Update doc/user/markdown.rst if you change this!
47 ]
48
49 ALLOWED_ATTRIBUTES = {
50 'a': ['href', 'title'],
51 'abbr': ['title'],
52 'acronym': ['title'],
53 'table': ['width'],
54 'td': ['width', 'align'],
55 'div': ['class'],
56 'p': ['class'],
57 'span': ['class'],
58 # Update doc/user/markdown.rst if you change this!
59 }
60
61 ALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel']
62
63
64 def safelink_callback(attrs, new=False):
65 url = attrs.get((None, 'href'), '/')
66 if not is_safe_url(url) and not url.startswith('mailto:'):
67 signer = signing.Signer(salt='safe-redirect')
68 attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))
69 attrs[None, 'target'] = '_blank'
70 attrs[None, 'rel'] = 'noopener'
71 return attrs
72
73
74 def abslink_callback(attrs, new=False):
75 attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/'))
76 attrs[None, 'target'] = '_blank'
77 attrs[None, 'rel'] = 'noopener'
78 return attrs
79
80
81 def markdown_compile(source):
82 return bleach.clean(
83 markdown.markdown(
84 source,
85 extensions=[
86 'markdown.extensions.sane_lists',
87 # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails
88 ]
89 ),
90 tags=ALLOWED_TAGS,
91 attributes=ALLOWED_ATTRIBUTES,
92 protocols=ALLOWED_PROTOCOLS,
93 )
94
95
96 @register.filter
97 def rich_text(text: str, **kwargs):
98 """
99 Processes markdown and cleans HTML in a text input.
100 """
101 text = str(text)
102 body_md = bleach.linkify(
103 markdown_compile(text),
104 callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback])
105 )
106 return mark_safe(body_md)
107
[end of src/pretix/base/templatetags/rich_text.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pretix/base/templatetags/rich_text.py b/src/pretix/base/templatetags/rich_text.py
--- a/src/pretix/base/templatetags/rich_text.py
+++ b/src/pretix/base/templatetags/rich_text.py
@@ -63,7 +63,7 @@
def safelink_callback(attrs, new=False):
url = attrs.get((None, 'href'), '/')
- if not is_safe_url(url) and not url.startswith('mailto:'):
+ if not is_safe_url(url) and not url.startswith('mailto:') and not url.startswith('tel:'):
signer = signing.Signer(salt='safe-redirect')
attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))
attrs[None, 'target'] = '_blank'
| {"golden_diff": "diff --git a/src/pretix/base/templatetags/rich_text.py b/src/pretix/base/templatetags/rich_text.py\n--- a/src/pretix/base/templatetags/rich_text.py\n+++ b/src/pretix/base/templatetags/rich_text.py\n@@ -63,7 +63,7 @@\n \n def safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n- if not is_safe_url(url) and not url.startswith('mailto:'):\n+ if not is_safe_url(url) and not url.startswith('mailto:') and not url.startswith('tel:'):\n signer = signing.Signer(salt='safe-redirect')\n attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))\n attrs[None, 'target'] = '_blank'\n", "issue": "rich_text: tel schema\nWhen providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.\nrich_text: tel schema\nWhen providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.\n", "before_files": [{"content": "import urllib.parse\n\nimport bleach\nimport markdown\nfrom bleach import DEFAULT_CALLBACKS\nfrom django import template\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.urls import reverse\nfrom django.utils.http import is_safe_url\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n 'a',\n 'abbr',\n 'acronym',\n 'b',\n 'blockquote',\n 'br',\n 'code',\n 'em',\n 'i',\n 'li',\n 'ol',\n 'strong',\n 'ul',\n 'p',\n 'table',\n 'tbody',\n 'thead',\n 'tr',\n 'td',\n 'th',\n 'div',\n 'span',\n 'hr',\n 'h1',\n 'h2',\n 'h3',\n 'h4',\n 'h5',\n 'h6',\n 'pre',\n # Update doc/user/markdown.rst if you change this!\n]\n\nALLOWED_ATTRIBUTES = {\n 'a': ['href', 'title'],\n 'abbr': ['title'],\n 'acronym': ['title'],\n 'table': ['width'],\n 'td': ['width', 'align'],\n 'div': ['class'],\n 'p': ['class'],\n 'span': ['class'],\n # Update doc/user/markdown.rst if you change this!\n}\n\nALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel']\n\n\ndef safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n if not is_safe_url(url) and not url.startswith('mailto:'):\n signer = signing.Signer(salt='safe-redirect')\n attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef abslink_callback(attrs, new=False):\n attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/'))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef markdown_compile(source):\n return bleach.clean(\n markdown.markdown(\n source,\n extensions=[\n 'markdown.extensions.sane_lists',\n # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails\n ]\n ),\n tags=ALLOWED_TAGS,\n attributes=ALLOWED_ATTRIBUTES,\n protocols=ALLOWED_PROTOCOLS,\n )\n\n\[email protected]\ndef rich_text(text: str, **kwargs):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n body_md = bleach.linkify(\n markdown_compile(text),\n callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback])\n )\n return mark_safe(body_md)\n", "path": "src/pretix/base/templatetags/rich_text.py"}]} | 1,703 | 196 |
gh_patches_debug_37392 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-1670 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stray WPS513 warning.
# Bug report
`WPS513` is emitted on `if`/`else`/`if` chains that contain other statements in the last `if` block.
## What's wrong
The following snippet:
```python
if private_key is not None:
if isinstance(private_key, PKey):
key = private_key
else:
if isinstance(private_key, str): # WPS513 Found implicit `elif` condition
stream = io.StringIO()
stream.write(private_key)
stream.seek(0)
private_key = stream
key = RSAKey.from_private_key(private_key)
```
triggers a stray WPS513 warning. Seems like the assignment to `key` is not taken into account.
## How is that should be
`WPS513` should only be emitted if the dangling `if` statement is the only one present in the `else` block.
</issue>
<code>
[start of wemake_python_styleguide/visitors/tokenize/conditions.py]
1 import tokenize
2 from typing import ClassVar, FrozenSet
3
4 from typing_extensions import final
5
6 from wemake_python_styleguide.violations.refactoring import (
7 ImplicitElifViolation,
8 )
9 from wemake_python_styleguide.visitors.base import BaseTokenVisitor
10
11
12 @final
13 class IfElseVisitor(BaseTokenVisitor):
14 """
15 Checks if tokens tokens.
16
17 We use ``tokenize`` instead of ``ast`` because
18
19 .. code:: python
20
21 if some:
22 ...
23 else:
24 if other:
25 ...
26
27 has the same ``ast`` representation as:
28
29 .. code:: python
30
31 if some:
32 ...
33 elif other:
34 ...
35
36 That's why we have to use ``tokenize`` to find
37 the raw tokens inside the text.
38
39 """
40
41 _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset((
42 tokenize.NEWLINE,
43 tokenize.NL,
44 tokenize.COLON,
45 tokenize.INDENT,
46 ))
47
48 def visit_name(self, token: tokenize.TokenInfo) -> None:
49 """
50 Checks that ``if`` nodes are defined correctly.
51
52 Raises:
53 ImplicitElifViolation
54
55 """
56 self._check_implicit_elif(token)
57
58 def _does_else_belong_to_if(self, start_index: int) -> bool:
59 previous_token = self.file_tokens[start_index - 1]
60
61 if previous_token.type != tokenize.DEDENT:
62 # This is not the first token on the line, which means that it can
63 # also be "embedded" else: x if A else B
64 return False
65
66 for token in reversed(self.file_tokens[:start_index - 1]):
67 if token.type != tokenize.NAME:
68 continue
69
70 # Here we rely upon an intuition that in Python else have to be
71 # on the same level (same indentation) as parent statement.
72 if token.start[1] == previous_token.start[1]:
73 return token.string in {'if', 'elif'}
74
75 return False
76
77 def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:
78 if token.string != 'else':
79 return
80
81 index = self.file_tokens.index(token)
82
83 # `else` token can belong also to `for` and `try/except` statement,
84 # which can trigger false positive for that violation.
85 if not self._does_else_belong_to_if(index):
86 return
87
88 # There's a bug in coverage, I am not sure how to make it work.
89 for next_token in self.file_tokens[index + 1:]: # pragma: no cover
90 if next_token.exact_type in self._allowed_token_types:
91 continue
92 elif next_token.string == 'if':
93 self.add_violation(ImplicitElifViolation(next_token))
94 return
95
[end of wemake_python_styleguide/visitors/tokenize/conditions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/visitors/tokenize/conditions.py b/wemake_python_styleguide/visitors/tokenize/conditions.py
--- a/wemake_python_styleguide/visitors/tokenize/conditions.py
+++ b/wemake_python_styleguide/visitors/tokenize/conditions.py
@@ -1,5 +1,5 @@
import tokenize
-from typing import ClassVar, FrozenSet
+from typing import ClassVar, FrozenSet, Sequence
from typing_extensions import final
@@ -74,21 +74,63 @@
return False
- def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:
- if token.string != 'else':
- return
+ def _if_has_code_below(
+ self,
+ remaining_tokens: Sequence[tokenize.TokenInfo],
+ ) -> bool:
+ """
+ Checks code immediately below an if statement to remove false positives.
+
+ Checks that, below an if that comes immediately after an else, there is
+ more code to be considered so as not to throw an incorrect violation.
+ """
+ index = 1
+
+ while remaining_tokens[index - 1].exact_type != tokenize.INDENT:
+ index += 1
+
+ context_count = 1
- index = self.file_tokens.index(token)
+ while context_count:
+ next_token = remaining_tokens[index]
+ if next_token.exact_type == tokenize.INDENT:
+ context_count += 1
+ if next_token.exact_type == tokenize.DEDENT:
+ context_count -= 1
+ index += 1
+
+ return remaining_tokens[index].exact_type != tokenize.DEDENT
+
+ def _check_complex_else(
+ self,
+ tokens: Sequence[tokenize.TokenInfo],
+ current_token: tokenize.TokenInfo,
+ index: int,
+ ) -> None:
+ complex_else = self._if_has_code_below(tokens[index + 1:])
+ if not complex_else:
+ self.add_violation(ImplicitElifViolation(current_token))
+
+ def _is_invalid_token(self, index: int, token: tokenize.TokenInfo) -> bool:
+ is_not_else = token.string != 'else'
# `else` token can belong also to `for` and `try/except` statement,
# which can trigger false positive for that violation.
- if not self._does_else_belong_to_if(index):
+ belongs_to_if = self._does_else_belong_to_if(index)
+
+ return is_not_else or not belongs_to_if
+
+ def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:
+ token_index = self.file_tokens.index(token)
+
+ if self._is_invalid_token(token_index, token):
return
# There's a bug in coverage, I am not sure how to make it work.
- for next_token in self.file_tokens[index + 1:]: # pragma: no cover
+ next_tokens = self.file_tokens[token_index + 1:]
+ for index, next_token in enumerate(next_tokens): # pragma: no cover
if next_token.exact_type in self._allowed_token_types:
continue
elif next_token.string == 'if':
- self.add_violation(ImplicitElifViolation(next_token))
+ self._check_complex_else(next_tokens, next_token, index)
return
| {"golden_diff": "diff --git a/wemake_python_styleguide/visitors/tokenize/conditions.py b/wemake_python_styleguide/visitors/tokenize/conditions.py\n--- a/wemake_python_styleguide/visitors/tokenize/conditions.py\n+++ b/wemake_python_styleguide/visitors/tokenize/conditions.py\n@@ -1,5 +1,5 @@\n import tokenize\n-from typing import ClassVar, FrozenSet\n+from typing import ClassVar, FrozenSet, Sequence\n \n from typing_extensions import final\n \n@@ -74,21 +74,63 @@\n \n return False\n \n- def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n- if token.string != 'else':\n- return\n+ def _if_has_code_below(\n+ self,\n+ remaining_tokens: Sequence[tokenize.TokenInfo],\n+ ) -> bool:\n+ \"\"\"\n+ Checks code immediately below an if statement to remove false positives.\n+\n+ Checks that, below an if that comes immediately after an else, there is\n+ more code to be considered so as not to throw an incorrect violation.\n+ \"\"\"\n+ index = 1\n+\n+ while remaining_tokens[index - 1].exact_type != tokenize.INDENT:\n+ index += 1\n+\n+ context_count = 1\n \n- index = self.file_tokens.index(token)\n+ while context_count:\n+ next_token = remaining_tokens[index]\n+ if next_token.exact_type == tokenize.INDENT:\n+ context_count += 1\n+ if next_token.exact_type == tokenize.DEDENT:\n+ context_count -= 1\n+ index += 1\n+\n+ return remaining_tokens[index].exact_type != tokenize.DEDENT\n+\n+ def _check_complex_else(\n+ self,\n+ tokens: Sequence[tokenize.TokenInfo],\n+ current_token: tokenize.TokenInfo,\n+ index: int,\n+ ) -> None:\n+ complex_else = self._if_has_code_below(tokens[index + 1:])\n+ if not complex_else:\n+ self.add_violation(ImplicitElifViolation(current_token))\n+\n+ def _is_invalid_token(self, index: int, token: tokenize.TokenInfo) -> bool:\n+ is_not_else = token.string != 'else'\n \n # `else` token can belong also to `for` and `try/except` statement,\n # which can trigger false positive for that violation.\n- if not self._does_else_belong_to_if(index):\n+ belongs_to_if = self._does_else_belong_to_if(index)\n+\n+ return is_not_else or not belongs_to_if\n+\n+ def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n+ token_index = self.file_tokens.index(token)\n+\n+ if self._is_invalid_token(token_index, token):\n return\n \n # There's a bug in coverage, I am not sure how to make it work.\n- for next_token in self.file_tokens[index + 1:]: # pragma: no cover\n+ next_tokens = self.file_tokens[token_index + 1:]\n+ for index, next_token in enumerate(next_tokens): # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n continue\n elif next_token.string == 'if':\n- self.add_violation(ImplicitElifViolation(next_token))\n+ self._check_complex_else(next_tokens, next_token, index)\n return\n", "issue": "Stray WPS513 warning.\n# Bug report\r\n\r\n`WPS513` is emitted on `if`/`else`/`if` chains that contain other statements in the last `if` block.\r\n\r\n## What's wrong\r\n\r\nThe following snippet:\r\n\r\n```python\r\nif private_key is not None:\r\n if isinstance(private_key, PKey):\r\n key = private_key\r\n else:\r\n if isinstance(private_key, str): # WPS513 Found implicit `elif` condition\r\n stream = io.StringIO()\r\n stream.write(private_key)\r\n stream.seek(0)\r\n private_key = stream\r\n key = RSAKey.from_private_key(private_key)\r\n```\r\n\r\ntriggers a stray WPS513 warning. Seems like the assignment to `key` is not taken into account.\r\n\r\n## How is that should be\r\n\r\n`WPS513` should only be emitted if the dangling `if` statement is the only one present in the `else` block.\n", "before_files": [{"content": "import tokenize\nfrom typing import ClassVar, FrozenSet\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.violations.refactoring import (\n ImplicitElifViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseTokenVisitor\n\n\n@final\nclass IfElseVisitor(BaseTokenVisitor):\n \"\"\"\n Checks if tokens tokens.\n\n We use ``tokenize`` instead of ``ast`` because\n\n .. code:: python\n\n if some:\n ...\n else:\n if other:\n ...\n\n has the same ``ast`` representation as:\n\n .. code:: python\n\n if some:\n ...\n elif other:\n ...\n\n That's why we have to use ``tokenize`` to find\n the raw tokens inside the text.\n\n \"\"\"\n\n _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset((\n tokenize.NEWLINE,\n tokenize.NL,\n tokenize.COLON,\n tokenize.INDENT,\n ))\n\n def visit_name(self, token: tokenize.TokenInfo) -> None:\n \"\"\"\n Checks that ``if`` nodes are defined correctly.\n\n Raises:\n ImplicitElifViolation\n\n \"\"\"\n self._check_implicit_elif(token)\n\n def _does_else_belong_to_if(self, start_index: int) -> bool:\n previous_token = self.file_tokens[start_index - 1]\n\n if previous_token.type != tokenize.DEDENT:\n # This is not the first token on the line, which means that it can\n # also be \"embedded\" else: x if A else B\n return False\n\n for token in reversed(self.file_tokens[:start_index - 1]):\n if token.type != tokenize.NAME:\n continue\n\n # Here we rely upon an intuition that in Python else have to be\n # on the same level (same indentation) as parent statement.\n if token.start[1] == previous_token.start[1]:\n return token.string in {'if', 'elif'}\n\n return False\n\n def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n if token.string != 'else':\n return\n\n index = self.file_tokens.index(token)\n\n # `else` token can belong also to `for` and `try/except` statement,\n # which can trigger false positive for that violation.\n if not self._does_else_belong_to_if(index):\n return\n\n # There's a bug in coverage, I am not sure how to make it work.\n for next_token in self.file_tokens[index + 1:]: # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n continue\n elif next_token.string == 'if':\n self.add_violation(ImplicitElifViolation(next_token))\n return\n", "path": "wemake_python_styleguide/visitors/tokenize/conditions.py"}]} | 1,554 | 758 |
gh_patches_debug_20297 | rasdani/github-patches | git_diff | mozmeao__snippets-service-1063 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix Snippets ICal AttributeError exception
Snippets ICal feed raises:
`AttributeError: 'ASRSnippet' object has no attribute 'locales'`
Fix the error and add tests catch this on the unit test level. We did catch this on the acceptance tests level though :sweat_smile:
</issue>
<code>
[start of snippets/base/urls.py]
1 from django.urls import path
2
3 from watchman import views as watchman_views
4
5 from snippets.base import views
6 from snippets.base import feed
7
8
9 urlpatterns = [
10 path('', views.HomeView.as_view()),
11 path('<int:startpage_version>/<name>/<version>/<appbuildid>/<build_target>/'
12 '<locale>/<channel>/<os_version>/<distribution>/<distribution_version>/',
13 views.fetch_snippets, name='base.fetch_snippets'),
14 path('preview/', views.preview_snippet, name='base.preview'),
15 path('preview-asr/<str:uuid>/', views.preview_asr_snippet, name='asr-preview'),
16 path('show/<int:snippet_id>/', views.show_snippet, name='base.show'),
17 path('show/uuid/<str:snippet_id>/', views.show_snippet, {'uuid': True}, name='base.show_uuid'),
18 path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),
19 path('healthz/', watchman_views.ping, name='watchman.ping'),
20 path('readiness/', watchman_views.status, name='watchman.status'),
21 path('feeds/snippets.ics', feed.SnippetsFeed()),
22 ]
23
[end of snippets/base/urls.py]
[start of snippets/base/feed.py]
1 import operator
2 from datetime import timedelta
3 from distutils.util import strtobool
4 from textwrap import dedent
5 from urllib.parse import urlparse
6
7 from django.conf import settings
8 from django.db.models import Q
9
10 import django_filters
11 from django_ical.views import ICalFeed
12
13 from snippets.base import models
14
15
16 class ASRSnippetFilter(django_filters.FilterSet):
17 name = django_filters.CharFilter(lookup_expr='icontains')
18 locale = django_filters.CharFilter(method='filter_locale')
19 only_scheduled = django_filters.ChoiceFilter(
20 method='filter_scheduled', choices=(('true', 'Yes'),
21 ('false', 'No'),
22 ('all', 'All')))
23
24 def filter_locale(self, queryset, name, value):
25 if not value:
26 return queryset
27
28 locales = value.split(',')
29 return queryset.filter(
30 operator.or_(
31 *[Q(locale__code=',{},'.format(locale)) for locale in locales]
32 )
33 )
34
35 def filter_scheduled(self, queryset, name, value):
36 if value == 'all':
37 return queryset
38
39 value = strtobool(value)
40
41 if value:
42 return queryset.exclude(publish_start=None, publish_end=None)
43
44 return queryset.filter(publish_start=None, publish_end=None)
45
46 class Meta:
47 model = models.ASRSnippet
48 fields = []
49
50
51 class SnippetsFeed(ICalFeed):
52 timezone = 'UTC'
53 title = 'Snippets'
54
55 def __call__(self, request, *args, **kwargs):
56 self.request = request
57 return super().__call__(request, *args, **kwargs)
58
59 @property
60 def product_id(self):
61 return '//{}/Snippets?{}'.format(urlparse(settings.SITE_URL).netloc,
62 self.request.GET.urlencode())
63
64 def items(self):
65 queryset = (models.ASRSnippet.objects
66 .filter(for_qa=False, status=models.STATUS_CHOICES['Published'])
67 .order_by('publish_start'))
68 filtr = ASRSnippetFilter(self.request.GET, queryset=queryset)
69 return filtr.qs
70
71 def item_title(self, item):
72 return item.name
73
74 def item_link(self, item):
75 return item.get_admin_url()
76
77 def item_description(self, item):
78 description = dedent('''\
79 Channels: {}
80 Locales: {}'
81 Preview Link: {}
82 '''.format(', '.join(item.channels),
83 ', '.join(item.locales.values_list('name', flat=True)),
84 item.get_preview_url()))
85 return description
86
87 def item_start_datetime(self, item):
88 return item.publish_start or item.created
89
90 def item_end_datetime(self, item):
91 return item.publish_end or (self.item_start_datetime(item) + timedelta(days=365))
92
93 def item_created(self, item):
94 return item.created
95
96 def item_updateddate(self, item):
97 return item.modified
98
[end of snippets/base/feed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/snippets/base/feed.py b/snippets/base/feed.py
--- a/snippets/base/feed.py
+++ b/snippets/base/feed.py
@@ -77,10 +77,10 @@
def item_description(self, item):
description = dedent('''\
Channels: {}
- Locales: {}'
+ Locale: {}'
Preview Link: {}
'''.format(', '.join(item.channels),
- ', '.join(item.locales.values_list('name', flat=True)),
+ item.locale,
item.get_preview_url()))
return description
diff --git a/snippets/base/urls.py b/snippets/base/urls.py
--- a/snippets/base/urls.py
+++ b/snippets/base/urls.py
@@ -18,5 +18,5 @@
path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),
path('healthz/', watchman_views.ping, name='watchman.ping'),
path('readiness/', watchman_views.status, name='watchman.status'),
- path('feeds/snippets.ics', feed.SnippetsFeed()),
+ path('feeds/snippets.ics', feed.SnippetsFeed(), name='ical-feed'),
]
| {"golden_diff": "diff --git a/snippets/base/feed.py b/snippets/base/feed.py\n--- a/snippets/base/feed.py\n+++ b/snippets/base/feed.py\n@@ -77,10 +77,10 @@\n def item_description(self, item):\n description = dedent('''\\\n Channels: {}\n- Locales: {}'\n+ Locale: {}'\n Preview Link: {}\n '''.format(', '.join(item.channels),\n- ', '.join(item.locales.values_list('name', flat=True)),\n+ item.locale,\n item.get_preview_url()))\n return description\n \ndiff --git a/snippets/base/urls.py b/snippets/base/urls.py\n--- a/snippets/base/urls.py\n+++ b/snippets/base/urls.py\n@@ -18,5 +18,5 @@\n path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),\n path('healthz/', watchman_views.ping, name='watchman.ping'),\n path('readiness/', watchman_views.status, name='watchman.status'),\n- path('feeds/snippets.ics', feed.SnippetsFeed()),\n+ path('feeds/snippets.ics', feed.SnippetsFeed(), name='ical-feed'),\n ]\n", "issue": "Fix Snippets ICal AttributeError exception \nSnippets ICal feed raises:\r\n\r\n`AttributeError: 'ASRSnippet' object has no attribute 'locales'`\r\n\r\nFix the error and add tests catch this on the unit test level. We did catch this on the acceptance tests level though :sweat_smile: \r\n\n", "before_files": [{"content": "from django.urls import path\n\nfrom watchman import views as watchman_views\n\nfrom snippets.base import views\nfrom snippets.base import feed\n\n\nurlpatterns = [\n path('', views.HomeView.as_view()),\n path('<int:startpage_version>/<name>/<version>/<appbuildid>/<build_target>/'\n '<locale>/<channel>/<os_version>/<distribution>/<distribution_version>/',\n views.fetch_snippets, name='base.fetch_snippets'),\n path('preview/', views.preview_snippet, name='base.preview'),\n path('preview-asr/<str:uuid>/', views.preview_asr_snippet, name='asr-preview'),\n path('show/<int:snippet_id>/', views.show_snippet, name='base.show'),\n path('show/uuid/<str:snippet_id>/', views.show_snippet, {'uuid': True}, name='base.show_uuid'),\n path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),\n path('healthz/', watchman_views.ping, name='watchman.ping'),\n path('readiness/', watchman_views.status, name='watchman.status'),\n path('feeds/snippets.ics', feed.SnippetsFeed()),\n]\n", "path": "snippets/base/urls.py"}, {"content": "import operator\nfrom datetime import timedelta\nfrom distutils.util import strtobool\nfrom textwrap import dedent\nfrom urllib.parse import urlparse\n\nfrom django.conf import settings\nfrom django.db.models import Q\n\nimport django_filters\nfrom django_ical.views import ICalFeed\n\nfrom snippets.base import models\n\n\nclass ASRSnippetFilter(django_filters.FilterSet):\n name = django_filters.CharFilter(lookup_expr='icontains')\n locale = django_filters.CharFilter(method='filter_locale')\n only_scheduled = django_filters.ChoiceFilter(\n method='filter_scheduled', choices=(('true', 'Yes'),\n ('false', 'No'),\n ('all', 'All')))\n\n def filter_locale(self, queryset, name, value):\n if not value:\n return queryset\n\n locales = value.split(',')\n return queryset.filter(\n operator.or_(\n *[Q(locale__code=',{},'.format(locale)) for locale in locales]\n )\n )\n\n def filter_scheduled(self, queryset, name, value):\n if value == 'all':\n return queryset\n\n value = strtobool(value)\n\n if value:\n return queryset.exclude(publish_start=None, publish_end=None)\n\n return queryset.filter(publish_start=None, publish_end=None)\n\n class Meta:\n model = models.ASRSnippet\n fields = []\n\n\nclass SnippetsFeed(ICalFeed):\n timezone = 'UTC'\n title = 'Snippets'\n\n def __call__(self, request, *args, **kwargs):\n self.request = request\n return super().__call__(request, *args, **kwargs)\n\n @property\n def product_id(self):\n return '//{}/Snippets?{}'.format(urlparse(settings.SITE_URL).netloc,\n self.request.GET.urlencode())\n\n def items(self):\n queryset = (models.ASRSnippet.objects\n .filter(for_qa=False, status=models.STATUS_CHOICES['Published'])\n .order_by('publish_start'))\n filtr = ASRSnippetFilter(self.request.GET, queryset=queryset)\n return filtr.qs\n\n def item_title(self, item):\n return item.name\n\n def item_link(self, item):\n return item.get_admin_url()\n\n def item_description(self, item):\n description = dedent('''\\\n Channels: {}\n Locales: {}'\n Preview Link: {}\n '''.format(', '.join(item.channels),\n ', '.join(item.locales.values_list('name', flat=True)),\n item.get_preview_url()))\n return description\n\n def item_start_datetime(self, item):\n return item.publish_start or item.created\n\n def item_end_datetime(self, item):\n return item.publish_end or (self.item_start_datetime(item) + timedelta(days=365))\n\n def item_created(self, item):\n return item.created\n\n def item_updateddate(self, item):\n return item.modified\n", "path": "snippets/base/feed.py"}]} | 1,723 | 270 |
gh_patches_debug_29324 | rasdani/github-patches | git_diff | dask__distributed-228 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No such file or directory "bokeh" - in release 1.9.2 (conda install)
Get this error in my logs:
```
scheduler_1 | distributed.scheduler - INFO - http at: 172.17.0.4:9786
scheduler_1 | distributed.scheduler - WARNING - Could not start Bokeh web UI
scheduler_1 | Traceback (most recent call last):
scheduler_1 | File "/opt/conda/envs/fire/lib/python3.5/site-packages/distributed/cli/dscheduler.py", line scheduler_1 | bokeh_proc[0] = subprocess.Popen(args)
scheduler_1 | File "/opt/conda/envs/fire/lib/python3.5/subprocess.py", line 950, in __init__
scheduler_1 | restore_signals, start_new_session)
scheduler_1 | File "/opt/conda/envs/fire/lib/python3.5/subprocess.py", line 1544, in _execute_child
scheduler_1 | raise child_exception_type(errno_num, err_msg)
scheduler_1 | FileNotFoundError: [Errno 2] No such file or directory: 'bokeh'
scheduler_1 | distributed.core - INFO - Connection from 172.17.0.6:60119 to Scheduler
scheduler_1 | distributed.scheduler - INFO - Register 172.17.0.6:5700
scheduler_1 | distributed.scheduler - INFO - Starting worker compute stream, 172.17.0.6:5700
```
</issue>
<code>
[start of distributed/cli/dscheduler.py]
1 from __future__ import print_function, division, absolute_import
2
3 import logging
4 import os
5 import socket
6 import subprocess
7 from sys import argv, exit
8 from time import sleep
9
10 import click
11
12 import distributed
13 from distributed import Scheduler
14 from distributed.utils import get_ip
15 from distributed.http import HTTPScheduler
16 from distributed.cli.utils import check_python_3
17 from tornado.ioloop import IOLoop
18
19 logger = logging.getLogger('distributed.scheduler')
20
21 ip = get_ip()
22
23 import signal
24
25 bokeh_proc = [False]
26
27
28 def handle_signal(sig, frame):
29 if bokeh_proc[0]:
30 bokeh_proc[0].terminate()
31 IOLoop.instance().add_callback(IOLoop.instance().stop)
32
33 signal.signal(signal.SIGINT, handle_signal)
34 signal.signal(signal.SIGTERM, handle_signal)
35
36
37 @click.command()
38 @click.argument('center', type=str, default='')
39 @click.option('--port', type=int, default=8786, help="Serving port")
40 @click.option('--http-port', type=int, default=9786, help="HTTP port")
41 @click.option('--bokeh-port', type=int, default=8787, help="HTTP port")
42 @click.option('--bokeh/--no-bokeh', '_bokeh', default=True, show_default=True,
43 required=False, help="Launch Bokeh Web UI")
44 @click.option('--host', type=str, default=ip,
45 help="Serving host defaults to %s" % ip)
46 @click.option('--show/--no-show', default=False, help="Show web UI")
47 def main(center, host, port, http_port, bokeh_port, show, _bokeh):
48 ip = socket.gethostbyname(host)
49 loop = IOLoop.current()
50 scheduler = Scheduler(center, ip=ip,
51 services={('http', http_port): HTTPScheduler})
52 if center:
53 loop.run_sync(scheduler.sync_center)
54 scheduler.start(port)
55
56 if _bokeh:
57 try:
58 import bokeh
59 import distributed.bokeh
60 hosts = ['%s:%d' % (h, bokeh_port) for h in
61 ['localhost', '127.0.0.1', ip, socket.gethostname(), host]]
62 dirname = os.path.dirname(distributed.__file__)
63 paths = [os.path.join(dirname, 'bokeh', name)
64 for name in ['status', 'tasks']]
65 args = (['bokeh', 'serve'] + paths +
66 ['--log-level', 'warning',
67 '--check-unused-sessions=50',
68 '--unused-session-lifetime=1',
69 '--port', str(bokeh_port)] +
70 sum([['--host', host] for host in hosts], []))
71 if show:
72 args.append('--show')
73 bokeh_proc[0] = subprocess.Popen(args)
74
75 logger.info(" Start Bokeh UI at: http://%s:%d/status/"
76 % (ip, bokeh_port))
77 except ImportError:
78 logger.info("Please install Bokeh to get Web UI")
79 except Exception as e:
80 logger.warn("Could not start Bokeh web UI", exc_info=True)
81
82 loop.start()
83 loop.close()
84 scheduler.stop()
85 bokeh_proc[0].terminate()
86
87 logger.info("End scheduler at %s:%d", ip, port)
88
89
90 def go():
91 check_python_3()
92 main()
93
94
95 if __name__ == '__main__':
96 go()
97
[end of distributed/cli/dscheduler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/cli/dscheduler.py b/distributed/cli/dscheduler.py
--- a/distributed/cli/dscheduler.py
+++ b/distributed/cli/dscheduler.py
@@ -1,9 +1,9 @@
from __future__ import print_function, division, absolute_import
import logging
+import multiprocessing
import os
import socket
-import subprocess
from sys import argv, exit
from time import sleep
@@ -22,12 +22,8 @@
import signal
-bokeh_proc = [False]
-
def handle_signal(sig, frame):
- if bokeh_proc[0]:
- bokeh_proc[0].terminate()
IOLoop.instance().add_callback(IOLoop.instance().stop)
signal.signal(signal.SIGINT, handle_signal)
@@ -70,7 +66,10 @@
sum([['--host', host] for host in hosts], []))
if show:
args.append('--show')
- bokeh_proc[0] = subprocess.Popen(args)
+ from bokeh.command.bootstrap import main
+ proc = multiprocessing.Process(target=main, args=(args,))
+ proc.daemon = True
+ proc.start()
logger.info(" Start Bokeh UI at: http://%s:%d/status/"
% (ip, bokeh_port))
@@ -82,7 +81,7 @@
loop.start()
loop.close()
scheduler.stop()
- bokeh_proc[0].terminate()
+ proc.terminate()
logger.info("End scheduler at %s:%d", ip, port)
| {"golden_diff": "diff --git a/distributed/cli/dscheduler.py b/distributed/cli/dscheduler.py\n--- a/distributed/cli/dscheduler.py\n+++ b/distributed/cli/dscheduler.py\n@@ -1,9 +1,9 @@\n from __future__ import print_function, division, absolute_import\n \n import logging\n+import multiprocessing\n import os\n import socket\n-import subprocess\n from sys import argv, exit\n from time import sleep\n \n@@ -22,12 +22,8 @@\n \n import signal\n \n-bokeh_proc = [False]\n-\n \n def handle_signal(sig, frame):\n- if bokeh_proc[0]:\n- bokeh_proc[0].terminate()\n IOLoop.instance().add_callback(IOLoop.instance().stop)\n \n signal.signal(signal.SIGINT, handle_signal)\n@@ -70,7 +66,10 @@\n sum([['--host', host] for host in hosts], []))\n if show:\n args.append('--show')\n- bokeh_proc[0] = subprocess.Popen(args)\n+ from bokeh.command.bootstrap import main\n+ proc = multiprocessing.Process(target=main, args=(args,))\n+ proc.daemon = True\n+ proc.start()\n \n logger.info(\" Start Bokeh UI at: http://%s:%d/status/\"\n % (ip, bokeh_port))\n@@ -82,7 +81,7 @@\n loop.start()\n loop.close()\n scheduler.stop()\n- bokeh_proc[0].terminate()\n+ proc.terminate()\n \n logger.info(\"End scheduler at %s:%d\", ip, port)\n", "issue": "No such file or directory \"bokeh\" - in release 1.9.2 (conda install)\nGet this error in my logs:\n\n```\nscheduler_1 | distributed.scheduler - INFO - http at: 172.17.0.4:9786\nscheduler_1 | distributed.scheduler - WARNING - Could not start Bokeh web UI\nscheduler_1 | Traceback (most recent call last):\nscheduler_1 | File \"/opt/conda/envs/fire/lib/python3.5/site-packages/distributed/cli/dscheduler.py\", line scheduler_1 | bokeh_proc[0] = subprocess.Popen(args)\nscheduler_1 | File \"/opt/conda/envs/fire/lib/python3.5/subprocess.py\", line 950, in __init__\nscheduler_1 | restore_signals, start_new_session)\nscheduler_1 | File \"/opt/conda/envs/fire/lib/python3.5/subprocess.py\", line 1544, in _execute_child\nscheduler_1 | raise child_exception_type(errno_num, err_msg)\nscheduler_1 | FileNotFoundError: [Errno 2] No such file or directory: 'bokeh'\nscheduler_1 | distributed.core - INFO - Connection from 172.17.0.6:60119 to Scheduler\nscheduler_1 | distributed.scheduler - INFO - Register 172.17.0.6:5700\nscheduler_1 | distributed.scheduler - INFO - Starting worker compute stream, 172.17.0.6:5700\n```\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport logging\nimport os\nimport socket\nimport subprocess\nfrom sys import argv, exit\nfrom time import sleep\n\nimport click\n\nimport distributed\nfrom distributed import Scheduler\nfrom distributed.utils import get_ip\nfrom distributed.http import HTTPScheduler\nfrom distributed.cli.utils import check_python_3\nfrom tornado.ioloop import IOLoop\n\nlogger = logging.getLogger('distributed.scheduler')\n\nip = get_ip()\n\nimport signal\n\nbokeh_proc = [False]\n\n\ndef handle_signal(sig, frame):\n if bokeh_proc[0]:\n bokeh_proc[0].terminate()\n IOLoop.instance().add_callback(IOLoop.instance().stop)\n\nsignal.signal(signal.SIGINT, handle_signal)\nsignal.signal(signal.SIGTERM, handle_signal)\n\n\[email protected]()\[email protected]('center', type=str, default='')\[email protected]('--port', type=int, default=8786, help=\"Serving port\")\[email protected]('--http-port', type=int, default=9786, help=\"HTTP port\")\[email protected]('--bokeh-port', type=int, default=8787, help=\"HTTP port\")\[email protected]('--bokeh/--no-bokeh', '_bokeh', default=True, show_default=True,\n required=False, help=\"Launch Bokeh Web UI\")\[email protected]('--host', type=str, default=ip,\n help=\"Serving host defaults to %s\" % ip)\[email protected]('--show/--no-show', default=False, help=\"Show web UI\")\ndef main(center, host, port, http_port, bokeh_port, show, _bokeh):\n ip = socket.gethostbyname(host)\n loop = IOLoop.current()\n scheduler = Scheduler(center, ip=ip,\n services={('http', http_port): HTTPScheduler})\n if center:\n loop.run_sync(scheduler.sync_center)\n scheduler.start(port)\n\n if _bokeh:\n try:\n import bokeh\n import distributed.bokeh\n hosts = ['%s:%d' % (h, bokeh_port) for h in\n ['localhost', '127.0.0.1', ip, socket.gethostname(), host]]\n dirname = os.path.dirname(distributed.__file__)\n paths = [os.path.join(dirname, 'bokeh', name)\n for name in ['status', 'tasks']]\n args = (['bokeh', 'serve'] + paths +\n ['--log-level', 'warning',\n '--check-unused-sessions=50',\n '--unused-session-lifetime=1',\n '--port', str(bokeh_port)] +\n sum([['--host', host] for host in hosts], []))\n if show:\n args.append('--show')\n bokeh_proc[0] = subprocess.Popen(args)\n\n logger.info(\" Start Bokeh UI at: http://%s:%d/status/\"\n % (ip, bokeh_port))\n except ImportError:\n logger.info(\"Please install Bokeh to get Web UI\")\n except Exception as e:\n logger.warn(\"Could not start Bokeh web UI\", exc_info=True)\n\n loop.start()\n loop.close()\n scheduler.stop()\n bokeh_proc[0].terminate()\n\n logger.info(\"End scheduler at %s:%d\", ip, port)\n\n\ndef go():\n check_python_3()\n main()\n\n\nif __name__ == '__main__':\n go()\n", "path": "distributed/cli/dscheduler.py"}]} | 1,800 | 338 |
gh_patches_debug_12036 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-354 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix TFExample codec
</issue>
<code>
[start of record_codec/__init__.py]
[end of record_codec/__init__.py]
[start of record_codec/tf_example_codec.py]
1 from enum import Enum
2 import tensorflow as tf
3
4
5 class TFExampleCodec(object):
6 def __init__(self, feature_columns):
7 self._f_name2type = {}
8 self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)
9 for f_col in feature_columns:
10 self._f_name2type[f_col.key] = f_col.dtype
11
12 def encode(self, example):
13 f_dict = {}
14 for f_name, f_value in example:
15 f_type = self._f_name2type[f_name]
16 if f_type == tf.string:
17 f_dict[f_name] = tf.train.Feature(
18 bytes_list=tf.train.BytesList(value=f_value)
19 )
20 elif f_type == tf.float32:
21 f_dict[f_name] = tf.train.Feature(
22 float_list=tf.train.FloatList(value=f_value.flatten())
23 )
24 elif f_type == tf.int64:
25 f_dict[f_name] = tf.train.Feature(
26 int64_list=tf.train.Int64List(value=f_value.flatten())
27 )
28 else:
29 raise ValueError(
30 "not supported tensorflow data type: " + str(f_type)
31 )
32
33 example = tf.train.Example(features=tf.train.Features(feature=f_dict))
34 return example.SerializeToString()
35
36 def decode(self, raw):
37 return tf.parse_single_example(raw, self._example_spec)
38
[end of record_codec/tf_example_codec.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/record_codec/__init__.py b/record_codec/__init__.py
--- a/record_codec/__init__.py
+++ b/record_codec/__init__.py
@@ -0,0 +1,3 @@
+from record_codec.tf_example_codec import TFExampleCodec
+
+__all__=[TFExampleCodec]
diff --git a/record_codec/tf_example_codec.py b/record_codec/tf_example_codec.py
--- a/record_codec/tf_example_codec.py
+++ b/record_codec/tf_example_codec.py
@@ -4,10 +4,8 @@
class TFExampleCodec(object):
def __init__(self, feature_columns):
- self._f_name2type = {}
self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)
- for f_col in feature_columns:
- self._f_name2type[f_col.key] = f_col.dtype
+ self._f_name2type = {f_col.key: f_col.dtype for f_col in feature_columns}
def encode(self, example):
f_dict = {}
| {"golden_diff": "diff --git a/record_codec/__init__.py b/record_codec/__init__.py\n--- a/record_codec/__init__.py\n+++ b/record_codec/__init__.py\n@@ -0,0 +1,3 @@\n+from record_codec.tf_example_codec import TFExampleCodec\n+\n+__all__=[TFExampleCodec]\ndiff --git a/record_codec/tf_example_codec.py b/record_codec/tf_example_codec.py\n--- a/record_codec/tf_example_codec.py\n+++ b/record_codec/tf_example_codec.py\n@@ -4,10 +4,8 @@\n \n class TFExampleCodec(object):\n def __init__(self, feature_columns):\n- self._f_name2type = {}\n self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)\n- for f_col in feature_columns:\n- self._f_name2type[f_col.key] = f_col.dtype\n+ self._f_name2type = {f_col.key: f_col.dtype for f_col in feature_columns}\n \n def encode(self, example):\n f_dict = {}\n", "issue": "Fix TFExample codec\n\n", "before_files": [{"content": "", "path": "record_codec/__init__.py"}, {"content": "from enum import Enum\nimport tensorflow as tf\n\n\nclass TFExampleCodec(object):\n def __init__(self, feature_columns):\n self._f_name2type = {}\n self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)\n for f_col in feature_columns:\n self._f_name2type[f_col.key] = f_col.dtype\n\n def encode(self, example):\n f_dict = {}\n for f_name, f_value in example:\n f_type = self._f_name2type[f_name]\n if f_type == tf.string:\n f_dict[f_name] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=f_value)\n )\n elif f_type == tf.float32:\n f_dict[f_name] = tf.train.Feature(\n float_list=tf.train.FloatList(value=f_value.flatten())\n )\n elif f_type == tf.int64:\n f_dict[f_name] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=f_value.flatten())\n )\n else:\n raise ValueError(\n \"not supported tensorflow data type: \" + str(f_type)\n )\n\n example = tf.train.Example(features=tf.train.Features(feature=f_dict))\n return example.SerializeToString()\n\n def decode(self, raw):\n return tf.parse_single_example(raw, self._example_spec)\n", "path": "record_codec/tf_example_codec.py"}]} | 915 | 234 |
gh_patches_debug_18359 | rasdani/github-patches | git_diff | pantsbuild__pants-6361 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`pants_requirement` should include the `; python_version<'3'` environment marker
Until pants can run on python 3, the `pants_requirement` should add an environment marker to restict the python environment it applies to. After pants only supports `>=3.5` a change should be made to the environment marker.
</issue>
<code>
[start of src/python/pants/backend/python/pants_requirement.py]
1 # coding=utf-8
2 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
3 # Licensed under the Apache License, Version 2.0 (see LICENSE).
4
5 from __future__ import absolute_import, division, print_function, unicode_literals
6
7 import os
8 from builtins import object
9
10 from pants.backend.python.python_requirement import PythonRequirement
11 from pants.base.build_environment import pants_version
12
13
14 class PantsRequirement(object):
15 """Exports a `python_requirement_library` pointing at the active pants' corresponding sdist.
16
17 This requirement is useful for custom plugin authors who want to build and test their plugin with
18 pants itself. Using the resulting target as a dependency of their plugin target ensures the
19 dependency stays true to the surrounding repo's version of pants.
20
21 NB: The requirement generated is for official pants releases on pypi; so may not be appropriate
22 for use in a repo that tracks `pantsbuild/pants` or otherwise uses custom pants sdists.
23
24 :API: public
25 """
26
27 def __init__(self, parse_context):
28 self._parse_context = parse_context
29
30 def __call__(self, name=None):
31 """
32 :param string name: The name to use for the target, defaults to the parent dir name.
33 """
34 name = name or os.path.basename(self._parse_context.rel_path)
35 requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))
36 self._parse_context.create_object('python_requirement_library',
37 name=name,
38 requirements=[requirement])
39
[end of src/python/pants/backend/python/pants_requirement.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/backend/python/pants_requirement.py b/src/python/pants/backend/python/pants_requirement.py
--- a/src/python/pants/backend/python/pants_requirement.py
+++ b/src/python/pants/backend/python/pants_requirement.py
@@ -32,7 +32,14 @@
:param string name: The name to use for the target, defaults to the parent dir name.
"""
name = name or os.path.basename(self._parse_context.rel_path)
- requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))
+
+ # TODO(John Sirois): Modify to constraint to >=3.5,<4 as part of
+ # https://github.com/pantsbuild/pants/issues/6062
+ env_marker = "python_version>='2.7' and python_version<'3'"
+
+ requirement = PythonRequirement(requirement="pantsbuild.pants=={version} ; {env_marker}"
+ .format(version=pants_version(), env_marker=env_marker))
+
self._parse_context.create_object('python_requirement_library',
name=name,
requirements=[requirement])
| {"golden_diff": "diff --git a/src/python/pants/backend/python/pants_requirement.py b/src/python/pants/backend/python/pants_requirement.py\n--- a/src/python/pants/backend/python/pants_requirement.py\n+++ b/src/python/pants/backend/python/pants_requirement.py\n@@ -32,7 +32,14 @@\n :param string name: The name to use for the target, defaults to the parent dir name.\n \"\"\"\n name = name or os.path.basename(self._parse_context.rel_path)\n- requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))\n+\n+ # TODO(John Sirois): Modify to constraint to >=3.5,<4 as part of\n+ # https://github.com/pantsbuild/pants/issues/6062\n+ env_marker = \"python_version>='2.7' and python_version<'3'\"\n+\n+ requirement = PythonRequirement(requirement=\"pantsbuild.pants=={version} ; {env_marker}\"\n+ .format(version=pants_version(), env_marker=env_marker))\n+\n self._parse_context.create_object('python_requirement_library',\n name=name,\n requirements=[requirement])\n", "issue": "`pants_requirement` should include the `; python_version<'3'` environment marker\nUntil pants can run on python 3, the `pants_requirement` should add an environment marker to restict the python environment it applies to. After pants only supports `>=3.5` a change should be made to the environment marker.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom builtins import object\n\nfrom pants.backend.python.python_requirement import PythonRequirement\nfrom pants.base.build_environment import pants_version\n\n\nclass PantsRequirement(object):\n \"\"\"Exports a `python_requirement_library` pointing at the active pants' corresponding sdist.\n\n This requirement is useful for custom plugin authors who want to build and test their plugin with\n pants itself. Using the resulting target as a dependency of their plugin target ensures the\n dependency stays true to the surrounding repo's version of pants.\n\n NB: The requirement generated is for official pants releases on pypi; so may not be appropriate\n for use in a repo that tracks `pantsbuild/pants` or otherwise uses custom pants sdists.\n\n :API: public\n \"\"\"\n\n def __init__(self, parse_context):\n self._parse_context = parse_context\n\n def __call__(self, name=None):\n \"\"\"\n :param string name: The name to use for the target, defaults to the parent dir name.\n \"\"\"\n name = name or os.path.basename(self._parse_context.rel_path)\n requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))\n self._parse_context.create_object('python_requirement_library',\n name=name,\n requirements=[requirement])\n", "path": "src/python/pants/backend/python/pants_requirement.py"}]} | 1,016 | 254 |
gh_patches_debug_19531 | rasdani/github-patches | git_diff | fidals__shopelectro-733 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adapt page usage to the new refarm pages interface
At https://github.com/fidals/refarm-site/issues/240 we changed page templates and page context interfaces.
Adapt site code to the new interface.
</issue>
<code>
[start of shopelectro/context.py]
1 from functools import partial
2
3 from django.conf import settings
4 from django.shortcuts import get_object_or_404
5
6 from catalog import newcontext
7 from images.models import Image
8 from pages import models as pages_models, newcontext as pages_newcontext
9 from shopelectro import models, request_data
10
11
12 # @todo #255:60m Improve `SortingOption` interface.
13 # Now it's located in context and this is wrong.
14 # Maybe refactor `CATEGORY_SORTING_OPTIONS`.
15 class SortingOption:
16 def __init__(self, index=0):
17 options = settings.CATEGORY_SORTING_OPTIONS[index]
18 self.label = options['label']
19 self.field = options['field']
20 self.direction = options['direction']
21
22 @property
23 def directed_field(self):
24 return self.direction + self.field
25
26
27 class Page(newcontext.Context):
28
29 def __init__(self, page, tags: newcontext.Tags):
30 self._page = page
31 self._tags = tags
32
33 def context(self):
34 def template_context(page, tag_titles, tags):
35 return {
36 'page': page,
37 'tag_titles': tag_titles,
38 'tags': tags,
39 }
40
41 tags_qs = self._tags.qs()
42 self._page.get_template_render_context = partial(
43 template_context, self._page, tags_qs.as_title(), tags_qs
44 )
45
46 return {
47 'page': self._page,
48 }
49
50
51 class Catalog(newcontext.Context):
52
53 def __init__(self, request_data_: request_data.Catalog):
54 self.request_data = request_data_
55
56 @property
57 def page(self):
58 return get_object_or_404(
59 pages_models.ModelPage,
60 slug=self.request_data.slug
61 )
62
63 @property
64 def category(self):
65 return self.page.model
66
67 def context(self) -> dict:
68 all_tags = newcontext.Tags(models.Tag.objects.all())
69
70 selected_tags = newcontext.tags.ParsedTags(
71 tags=all_tags,
72 raw_tags=self.request_data.tags,
73 )
74 if self.request_data.tags:
75 selected_tags = newcontext.tags.Checked404Tags(selected_tags)
76
77 products = (
78 models.Product.objects.active()
79 .filter_descendants(self.category)
80 .tagged_or_all(selected_tags.qs())
81 .order_by(SortingOption(index=self.request_data.sorting_index).directed_field)
82 )
83
84 """
85 We have to use separated variable for pagination.
86
87 Because paginated QuerySet can not used as QuerySet.
88 It's not the most strong place of Django ORM, of course.
89 :return: ProductsContext with paginated QuerySet inside
90 """
91 # @todo #683:30m Remove *Tags and *Products suffixes from catalog.newcontext classes.
92 # Rename Checked404Tags to ExistingOr404.
93 paginated = newcontext.products.PaginatedProducts(
94 products=products,
95 url=self.request_data.request.path,
96 page_number=self.request_data.pagination_page_number,
97 per_page=self.request_data.pagination_per_page,
98 )
99
100 images = newcontext.products.ProductImages(paginated.products, Image.objects.all())
101 brands = newcontext.products.ProductBrands(paginated.products, all_tags)
102 grouped_tags = newcontext.tags.GroupedTags(
103 tags=newcontext.tags.TagsByProducts(all_tags, products)
104 )
105 page = Page(self.page, selected_tags)
106 category = newcontext.category.Context(self.category)
107 params = {
108 'view_type': self.request_data.get_view_type(),
109 'sorting_options': settings.CATEGORY_SORTING_OPTIONS.values(),
110 'limits': settings.CATEGORY_STEP_MULTIPLIERS,
111 'sort': self.request_data.sorting_index,
112 }
113
114 return {
115 **params,
116 **pages_newcontext.Contexts([
117 page, category, paginated,
118 images, brands, grouped_tags
119 ]).context()
120 }
121
[end of shopelectro/context.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/shopelectro/context.py b/shopelectro/context.py
--- a/shopelectro/context.py
+++ b/shopelectro/context.py
@@ -1,5 +1,3 @@
-from functools import partial
-
from django.conf import settings
from django.shortcuts import get_object_or_404
@@ -31,18 +29,15 @@
self._tags = tags
def context(self):
- def template_context(page, tag_titles, tags):
- return {
- 'page': page,
- 'tag_titles': tag_titles,
- 'tags': tags,
- }
-
tags_qs = self._tags.qs()
- self._page.get_template_render_context = partial(
- template_context, self._page, tags_qs.as_title(), tags_qs
- )
-
+ # use dirty patch here, because it's the most simple method
+ # to make shared templates work.
+ # For example `templates/layout/metadata.html`.
+ self._page.display = {
+ 'page': self._page,
+ 'tag_titles': tags_qs.as_title(),
+ 'tags': tags_qs,
+ }
return {
'page': self._page,
}
| {"golden_diff": "diff --git a/shopelectro/context.py b/shopelectro/context.py\n--- a/shopelectro/context.py\n+++ b/shopelectro/context.py\n@@ -1,5 +1,3 @@\n-from functools import partial\n-\n from django.conf import settings\n from django.shortcuts import get_object_or_404\n \n@@ -31,18 +29,15 @@\n self._tags = tags\n \n def context(self):\n- def template_context(page, tag_titles, tags):\n- return {\n- 'page': page,\n- 'tag_titles': tag_titles,\n- 'tags': tags,\n- }\n-\n tags_qs = self._tags.qs()\n- self._page.get_template_render_context = partial(\n- template_context, self._page, tags_qs.as_title(), tags_qs\n- )\n-\n+ # use dirty patch here, because it's the most simple method\n+ # to make shared templates work.\n+ # For example `templates/layout/metadata.html`.\n+ self._page.display = {\n+ 'page': self._page,\n+ 'tag_titles': tags_qs.as_title(),\n+ 'tags': tags_qs,\n+ }\n return {\n 'page': self._page,\n }\n", "issue": "Adapt page usage to the new refarm pages interface\nAt https://github.com/fidals/refarm-site/issues/240 we changed page templates and page context interfaces.\r\n\r\nAdapt site code to the new interface.\r\n\n", "before_files": [{"content": "from functools import partial\n\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\n\nfrom catalog import newcontext\nfrom images.models import Image\nfrom pages import models as pages_models, newcontext as pages_newcontext\nfrom shopelectro import models, request_data\n\n\n# @todo #255:60m Improve `SortingOption` interface.\n# Now it's located in context and this is wrong.\n# Maybe refactor `CATEGORY_SORTING_OPTIONS`.\nclass SortingOption:\n def __init__(self, index=0):\n options = settings.CATEGORY_SORTING_OPTIONS[index]\n self.label = options['label']\n self.field = options['field']\n self.direction = options['direction']\n\n @property\n def directed_field(self):\n return self.direction + self.field\n\n\nclass Page(newcontext.Context):\n\n def __init__(self, page, tags: newcontext.Tags):\n self._page = page\n self._tags = tags\n\n def context(self):\n def template_context(page, tag_titles, tags):\n return {\n 'page': page,\n 'tag_titles': tag_titles,\n 'tags': tags,\n }\n\n tags_qs = self._tags.qs()\n self._page.get_template_render_context = partial(\n template_context, self._page, tags_qs.as_title(), tags_qs\n )\n\n return {\n 'page': self._page,\n }\n\n\nclass Catalog(newcontext.Context):\n\n def __init__(self, request_data_: request_data.Catalog):\n self.request_data = request_data_\n\n @property\n def page(self):\n return get_object_or_404(\n pages_models.ModelPage,\n slug=self.request_data.slug\n )\n\n @property\n def category(self):\n return self.page.model\n\n def context(self) -> dict:\n all_tags = newcontext.Tags(models.Tag.objects.all())\n\n selected_tags = newcontext.tags.ParsedTags(\n tags=all_tags,\n raw_tags=self.request_data.tags,\n )\n if self.request_data.tags:\n selected_tags = newcontext.tags.Checked404Tags(selected_tags)\n\n products = (\n models.Product.objects.active()\n .filter_descendants(self.category)\n .tagged_or_all(selected_tags.qs())\n .order_by(SortingOption(index=self.request_data.sorting_index).directed_field)\n )\n\n \"\"\"\n We have to use separated variable for pagination.\n\n Because paginated QuerySet can not used as QuerySet.\n It's not the most strong place of Django ORM, of course.\n :return: ProductsContext with paginated QuerySet inside\n \"\"\"\n # @todo #683:30m Remove *Tags and *Products suffixes from catalog.newcontext classes.\n # Rename Checked404Tags to ExistingOr404.\n paginated = newcontext.products.PaginatedProducts(\n products=products,\n url=self.request_data.request.path,\n page_number=self.request_data.pagination_page_number,\n per_page=self.request_data.pagination_per_page,\n )\n\n images = newcontext.products.ProductImages(paginated.products, Image.objects.all())\n brands = newcontext.products.ProductBrands(paginated.products, all_tags)\n grouped_tags = newcontext.tags.GroupedTags(\n tags=newcontext.tags.TagsByProducts(all_tags, products)\n )\n page = Page(self.page, selected_tags)\n category = newcontext.category.Context(self.category)\n params = {\n 'view_type': self.request_data.get_view_type(),\n 'sorting_options': settings.CATEGORY_SORTING_OPTIONS.values(),\n 'limits': settings.CATEGORY_STEP_MULTIPLIERS,\n 'sort': self.request_data.sorting_index,\n }\n\n return {\n **params,\n **pages_newcontext.Contexts([\n page, category, paginated,\n images, brands, grouped_tags\n ]).context()\n }\n", "path": "shopelectro/context.py"}]} | 1,675 | 272 |
gh_patches_debug_41380 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-462 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix Jinja2 async integration
Since https://github.com/pallets/jinja/issues/765 , Jinja2 only lazily imports and adds the `render_async` function that we instrument (since #398).
We need to update to only instrument at the point the `render_async` method becomes available.
</issue>
<code>
[start of src/scout_apm/instruments/jinja2.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import logging
5
6 import wrapt
7
8 from scout_apm.core.tracked_request import TrackedRequest
9
10 try:
11 from jinja2 import Template
12 except ImportError: # pragma: no cover
13 Template = None
14
15 # The async_ module can only be shipped on Python 3.6+
16 try:
17 from scout_apm.async_.instruments.jinja2 import wrapped_render_async
18 except ImportError:
19 wrapped_render_async = None
20
21
22 logger = logging.getLogger(__name__)
23
24
25 have_patched_template_render = False
26 have_patched_template_render_async = False
27
28
29 def ensure_installed():
30 global have_patched_template_render
31 global have_patched_template_render_async
32
33 logger.info("Ensuring Jinja2 instrumentation is installed.")
34
35 if Template is None:
36 logger.info("Unable to import jinja2.Template")
37 return
38
39 if not have_patched_template_render:
40 try:
41 Template.render = wrapped_render(Template.render)
42 except Exception as exc:
43 logger.warning(
44 "Unable to instrument jinja2.Template.render: %r", exc, exc_info=exc
45 )
46 else:
47 have_patched_template_render = True
48
49 if not have_patched_template_render_async and wrapped_render_async is not None:
50 try:
51 Template.render_async = wrapped_render_async(Template.render_async)
52 except Exception as exc:
53 logger.warning(
54 "Unable to instrument jinja2.Template.render_async: %r",
55 exc,
56 exc_info=exc,
57 )
58 else:
59 have_patched_template_render_async = True
60
61
62 @wrapt.decorator
63 def wrapped_render(wrapped, instance, args, kwargs):
64 tracked_request = TrackedRequest.instance()
65 span = tracked_request.start_span(operation="Template/Render")
66 span.tag("name", instance.name)
67 try:
68 return wrapped(*args, **kwargs)
69 finally:
70 tracked_request.stop_span()
71
[end of src/scout_apm/instruments/jinja2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/scout_apm/instruments/jinja2.py b/src/scout_apm/instruments/jinja2.py
--- a/src/scout_apm/instruments/jinja2.py
+++ b/src/scout_apm/instruments/jinja2.py
@@ -2,11 +2,17 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
+import sys
import wrapt
from scout_apm.core.tracked_request import TrackedRequest
+try:
+ from jinja2 import Environment
+except ImportError: # pragma: no cover
+ Environment = None
+
try:
from jinja2 import Template
except ImportError: # pragma: no cover
@@ -22,13 +28,14 @@
logger = logging.getLogger(__name__)
+have_patched_environment_init = False
have_patched_template_render = False
have_patched_template_render_async = False
def ensure_installed():
+ global have_patched_environment_init
global have_patched_template_render
- global have_patched_template_render_async
logger.info("Ensuring Jinja2 instrumentation is installed.")
@@ -36,27 +43,27 @@
logger.info("Unable to import jinja2.Template")
return
- if not have_patched_template_render:
+ if not have_patched_environment_init:
try:
- Template.render = wrapped_render(Template.render)
+ Environment.__init__ = wrapped_environment_init(Environment.__init__)
except Exception as exc:
logger.warning(
- "Unable to instrument jinja2.Template.render: %r", exc, exc_info=exc
+ "Unable to instrument jinja2.Environment.__init__: %r",
+ exc,
+ exc_info=exc,
)
else:
- have_patched_template_render = True
+ have_patched_environment_init = True
- if not have_patched_template_render_async and wrapped_render_async is not None:
+ if not have_patched_template_render:
try:
- Template.render_async = wrapped_render_async(Template.render_async)
+ Template.render = wrapped_render(Template.render)
except Exception as exc:
logger.warning(
- "Unable to instrument jinja2.Template.render_async: %r",
- exc,
- exc_info=exc,
+ "Unable to instrument jinja2.Template.render: %r", exc, exc_info=exc
)
else:
- have_patched_template_render_async = True
+ have_patched_template_render = True
@wrapt.decorator
@@ -68,3 +75,33 @@
return wrapped(*args, **kwargs)
finally:
tracked_request.stop_span()
+
+
[email protected]
+def wrapped_environment_init(wrapped, instance, args, kwargs):
+ """
+ Delayed wrapping of render_async(), since Template won't have this method
+ until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is
+ done conditionally in Environment.__init__:
+ https://github.com/pallets/jinja/issues/765
+ """
+ global have_patched_template_render_async
+ result = wrapped(*args, **kwargs)
+
+ if (
+ wrapped_render_async is not None
+ and not have_patched_template_render_async
+ and "jinja2.asyncsupport" in sys.modules
+ ):
+ try:
+ Template.render_async = wrapped_render_async(Template.render_async)
+ except Exception as exc:
+ logger.warning(
+ "Unable to instrument jinja2.Template.render_async: %r",
+ exc,
+ exc_info=exc,
+ )
+ else:
+ have_patched_template_render_async = True
+
+ return result
| {"golden_diff": "diff --git a/src/scout_apm/instruments/jinja2.py b/src/scout_apm/instruments/jinja2.py\n--- a/src/scout_apm/instruments/jinja2.py\n+++ b/src/scout_apm/instruments/jinja2.py\n@@ -2,11 +2,17 @@\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n import logging\n+import sys\n \n import wrapt\n \n from scout_apm.core.tracked_request import TrackedRequest\n \n+try:\n+ from jinja2 import Environment\n+except ImportError: # pragma: no cover\n+ Environment = None\n+\n try:\n from jinja2 import Template\n except ImportError: # pragma: no cover\n@@ -22,13 +28,14 @@\n logger = logging.getLogger(__name__)\n \n \n+have_patched_environment_init = False\n have_patched_template_render = False\n have_patched_template_render_async = False\n \n \n def ensure_installed():\n+ global have_patched_environment_init\n global have_patched_template_render\n- global have_patched_template_render_async\n \n logger.info(\"Ensuring Jinja2 instrumentation is installed.\")\n \n@@ -36,27 +43,27 @@\n logger.info(\"Unable to import jinja2.Template\")\n return\n \n- if not have_patched_template_render:\n+ if not have_patched_environment_init:\n try:\n- Template.render = wrapped_render(Template.render)\n+ Environment.__init__ = wrapped_environment_init(Environment.__init__)\n except Exception as exc:\n logger.warning(\n- \"Unable to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n+ \"Unable to instrument jinja2.Environment.__init__: %r\",\n+ exc,\n+ exc_info=exc,\n )\n else:\n- have_patched_template_render = True\n+ have_patched_environment_init = True\n \n- if not have_patched_template_render_async and wrapped_render_async is not None:\n+ if not have_patched_template_render:\n try:\n- Template.render_async = wrapped_render_async(Template.render_async)\n+ Template.render = wrapped_render(Template.render)\n except Exception as exc:\n logger.warning(\n- \"Unable to instrument jinja2.Template.render_async: %r\",\n- exc,\n- exc_info=exc,\n+ \"Unable to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n )\n else:\n- have_patched_template_render_async = True\n+ have_patched_template_render = True\n \n \n @wrapt.decorator\n@@ -68,3 +75,33 @@\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n+\n+\[email protected]\n+def wrapped_environment_init(wrapped, instance, args, kwargs):\n+ \"\"\"\n+ Delayed wrapping of render_async(), since Template won't have this method\n+ until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is\n+ done conditionally in Environment.__init__:\n+ https://github.com/pallets/jinja/issues/765\n+ \"\"\"\n+ global have_patched_template_render_async\n+ result = wrapped(*args, **kwargs)\n+\n+ if (\n+ wrapped_render_async is not None\n+ and not have_patched_template_render_async\n+ and \"jinja2.asyncsupport\" in sys.modules\n+ ):\n+ try:\n+ Template.render_async = wrapped_render_async(Template.render_async)\n+ except Exception as exc:\n+ logger.warning(\n+ \"Unable to instrument jinja2.Template.render_async: %r\",\n+ exc,\n+ exc_info=exc,\n+ )\n+ else:\n+ have_patched_template_render_async = True\n+\n+ return result\n", "issue": "Fix Jinja2 async integration\nSince https://github.com/pallets/jinja/issues/765 , Jinja2 only lazily imports and adds the `render_async` function that we instrument (since #398).\r\n\r\nWe need to update to only instrument at the point the `render_async` method becomes available.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nimport wrapt\n\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from jinja2 import Template\nexcept ImportError: # pragma: no cover\n Template = None\n\n# The async_ module can only be shipped on Python 3.6+\ntry:\n from scout_apm.async_.instruments.jinja2 import wrapped_render_async\nexcept ImportError:\n wrapped_render_async = None\n\n\nlogger = logging.getLogger(__name__)\n\n\nhave_patched_template_render = False\nhave_patched_template_render_async = False\n\n\ndef ensure_installed():\n global have_patched_template_render\n global have_patched_template_render_async\n\n logger.info(\"Ensuring Jinja2 instrumentation is installed.\")\n\n if Template is None:\n logger.info(\"Unable to import jinja2.Template\")\n return\n\n if not have_patched_template_render:\n try:\n Template.render = wrapped_render(Template.render)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n )\n else:\n have_patched_template_render = True\n\n if not have_patched_template_render_async and wrapped_render_async is not None:\n try:\n Template.render_async = wrapped_render_async(Template.render_async)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument jinja2.Template.render_async: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_template_render_async = True\n\n\[email protected]\ndef wrapped_render(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n span = tracked_request.start_span(operation=\"Template/Render\")\n span.tag(\"name\", instance.name)\n try:\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n", "path": "src/scout_apm/instruments/jinja2.py"}]} | 1,176 | 837 |
gh_patches_debug_2580 | rasdani/github-patches | git_diff | pypa__setuptools-4127 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Setuptools 69.0.0 breaks Astropy's setup
### setuptools version
setuptools==69.0.0
### Python version
3.12
### OS
Ubuntu
### Additional environment information
_No response_
### Description
About 15h ago, Astropy's CI started failing to build with
```
ImportError: cannot import name 'newer_group' from 'setuptools.dep_util'
```
This seems to correspond to an [intentional change in setuptools 69](https://setuptools.pypa.io/en/latest/history.html#features).
Nonetheless, from reading the PR that introduced the change (https://github.com/pypa/setuptools/pull/4069), I'm not sure that this was supposed to break immediately. Was this intended ?
### Expected behavior
a deprecation warning instead of a hard error ?
### How to Reproduce
```shell
$ python -c "from setuptools.dep_util import newer_group"
```
### Output
```console
Traceback (most recent call last):
File "<string>", line 1, in <module>
ImportError: cannot import name 'newer_group' from 'setuptools.dep_util' (/private/tmp/venv/lib/python3.12/site-packages/setuptools/dep_util.py)
```
</issue>
<code>
[start of setuptools/dep_util.py]
1 import warnings
2
3 from ._distutils import _modified
4
5
6 def __getattr__(name):
7 if name not in ['newer_pairwise_group']:
8 raise AttributeError(name)
9 warnings.warn(
10 "dep_util is Deprecated. Use functions from setuptools.modified instead.",
11 DeprecationWarning,
12 stacklevel=2,
13 )
14 return getattr(_modified, name)
15
[end of setuptools/dep_util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setuptools/dep_util.py b/setuptools/dep_util.py
--- a/setuptools/dep_util.py
+++ b/setuptools/dep_util.py
@@ -4,7 +4,7 @@
def __getattr__(name):
- if name not in ['newer_pairwise_group']:
+ if name not in ['newer_group', 'newer_pairwise_group']:
raise AttributeError(name)
warnings.warn(
"dep_util is Deprecated. Use functions from setuptools.modified instead.",
| {"golden_diff": "diff --git a/setuptools/dep_util.py b/setuptools/dep_util.py\n--- a/setuptools/dep_util.py\n+++ b/setuptools/dep_util.py\n@@ -4,7 +4,7 @@\n \n \n def __getattr__(name):\n- if name not in ['newer_pairwise_group']:\n+ if name not in ['newer_group', 'newer_pairwise_group']:\n raise AttributeError(name)\n warnings.warn(\n \"dep_util is Deprecated. Use functions from setuptools.modified instead.\",\n", "issue": "[BUG] Setuptools 69.0.0 breaks Astropy's setup\n### setuptools version\n\nsetuptools==69.0.0\n\n### Python version\n\n3.12\n\n### OS\n\nUbuntu\n\n### Additional environment information\n\n_No response_\n\n### Description\n\nAbout 15h ago, Astropy's CI started failing to build with\r\n```\r\nImportError: cannot import name 'newer_group' from 'setuptools.dep_util'\r\n```\r\nThis seems to correspond to an [intentional change in setuptools 69](https://setuptools.pypa.io/en/latest/history.html#features).\r\nNonetheless, from reading the PR that introduced the change (https://github.com/pypa/setuptools/pull/4069), I'm not sure that this was supposed to break immediately. Was this intended ?\n\n### Expected behavior\n\na deprecation warning instead of a hard error ?\n\n### How to Reproduce\n\n```shell\r\n$ python -c \"from setuptools.dep_util import newer_group\"\r\n```\n\n### Output\n\n```console\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\nImportError: cannot import name 'newer_group' from 'setuptools.dep_util' (/private/tmp/venv/lib/python3.12/site-packages/setuptools/dep_util.py)\r\n```\r\n\n", "before_files": [{"content": "import warnings\n\nfrom ._distutils import _modified\n\n\ndef __getattr__(name):\n if name not in ['newer_pairwise_group']:\n raise AttributeError(name)\n warnings.warn(\n \"dep_util is Deprecated. Use functions from setuptools.modified instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return getattr(_modified, name)\n", "path": "setuptools/dep_util.py"}]} | 914 | 108 |
gh_patches_debug_23752 | rasdani/github-patches | git_diff | Azure__azure-cli-extensions-3136 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Transition to GA: databox
Command module `databox` has been released for a long time and is using stable sdk version `2019-09-01`.
Please check [Extension GA guidelines](https://github.com/Azure/azure-cli/blob/dev/doc/onboarding_guide.md#preview-extension-to-ga-extension) and remove `experimental` tag if necessary.
</issue>
<code>
[start of src/databox/azext_databox/commands.py]
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 # pylint: disable=line-too-long
7 # pylint: disable=too-many-lines
8 # pylint: disable=too-many-statements
9 # pylint: disable=too-many-locals
10 from azext_databox._validators import validate_create_input_parameters
11 from azure.cli.core.commands import CliCommandType
12
13
14 def load_command_table(self, _):
15
16 from azext_databox._client_factory import cf_jobs
17 databox_jobs = CliCommandType(
18 operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',
19 client_factory=cf_jobs)
20 with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g:
21 g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)
22 g.custom_command('update', 'update_databox_job')
23 g.custom_command('delete', 'delete_databox_job', confirmation=True)
24 g.custom_show_command('show', 'get_databox_job')
25 g.custom_command('list', 'list_databox_job')
26 g.custom_command('cancel', 'cancel_databox_job', confirmation=True)
27 g.custom_command('list-credentials', 'list_credentials_databox_job')
28
[end of src/databox/azext_databox/commands.py]
[start of src/databox/setup.py]
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8
9 from codecs import open
10 from setuptools import setup, find_packages
11 try:
12 from azure_bdist_wheel import cmdclass
13 except ImportError:
14 from distutils import log as logger
15 logger.warn("Wheel is not available, disabling bdist_wheel hook")
16
17 # TODO: Confirm this is the right version number you want and it matches your
18 # HISTORY.rst entry.
19 VERSION = '0.1.0'
20
21 # The full list of classifiers is available at
22 # https://pypi.python.org/pypi?%3Aaction=list_classifiers
23 CLASSIFIERS = [
24 'Development Status :: 4 - Beta',
25 'Intended Audience :: Developers',
26 'Intended Audience :: System Administrators',
27 'Programming Language :: Python',
28 'Programming Language :: Python :: 3',
29 'Programming Language :: Python :: 3.4',
30 'Programming Language :: Python :: 3.5',
31 'Programming Language :: Python :: 3.6',
32 'Programming Language :: Python :: 3.7',
33 'Programming Language :: Python :: 3.8',
34 'License :: OSI Approved :: MIT License',
35 ]
36
37 # TODO: Add any additional SDK dependencies here
38 DEPENDENCIES = []
39
40 with open('README.md', 'r', encoding='utf-8') as f:
41 README = f.read()
42 with open('HISTORY.rst', 'r', encoding='utf-8') as f:
43 HISTORY = f.read()
44
45 setup(
46 name='databox',
47 version=VERSION,
48 description='Microsoft Azure Command-Line Tools DataBox Extension',
49 # TODO: Update author and email, if applicable
50 author='Microsoft Corporation',
51 author_email='[email protected]',
52 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/databox',
53 long_description=README + '\n\n' + HISTORY,
54 license='MIT',
55 classifiers=CLASSIFIERS,
56 packages=find_packages(),
57 install_requires=DEPENDENCIES,
58 package_data={'azext_databox': ['azext_metadata.json']},
59 )
60
[end of src/databox/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/databox/azext_databox/commands.py b/src/databox/azext_databox/commands.py
--- a/src/databox/azext_databox/commands.py
+++ b/src/databox/azext_databox/commands.py
@@ -17,7 +17,7 @@
databox_jobs = CliCommandType(
operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',
client_factory=cf_jobs)
- with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g:
+ with self.command_group('databox job', databox_jobs, client_factory=cf_jobs) as g:
g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)
g.custom_command('update', 'update_databox_job')
g.custom_command('delete', 'delete_databox_job', confirmation=True)
diff --git a/src/databox/setup.py b/src/databox/setup.py
--- a/src/databox/setup.py
+++ b/src/databox/setup.py
@@ -16,7 +16,7 @@
# TODO: Confirm this is the right version number you want and it matches your
# HISTORY.rst entry.
-VERSION = '0.1.0'
+VERSION = '0.1.1'
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
| {"golden_diff": "diff --git a/src/databox/azext_databox/commands.py b/src/databox/azext_databox/commands.py\n--- a/src/databox/azext_databox/commands.py\n+++ b/src/databox/azext_databox/commands.py\n@@ -17,7 +17,7 @@\n databox_jobs = CliCommandType(\n operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',\n client_factory=cf_jobs)\n- with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g:\n+ with self.command_group('databox job', databox_jobs, client_factory=cf_jobs) as g:\n g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)\n g.custom_command('update', 'update_databox_job')\n g.custom_command('delete', 'delete_databox_job', confirmation=True)\ndiff --git a/src/databox/setup.py b/src/databox/setup.py\n--- a/src/databox/setup.py\n+++ b/src/databox/setup.py\n@@ -16,7 +16,7 @@\n \n # TODO: Confirm this is the right version number you want and it matches your\n # HISTORY.rst entry.\n-VERSION = '0.1.0'\n+VERSION = '0.1.1'\n \n # The full list of classifiers is available at\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n", "issue": "Transition to GA: databox\nCommand module `databox` has been released for a long time and is using stable sdk version `2019-09-01`.\r\n\r\nPlease check [Extension GA guidelines](https://github.com/Azure/azure-cli/blob/dev/doc/onboarding_guide.md#preview-extension-to-ga-extension) and remove `experimental` tag if necessary.\n", "before_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n# pylint: disable=line-too-long\n# pylint: disable=too-many-lines\n# pylint: disable=too-many-statements\n# pylint: disable=too-many-locals\nfrom azext_databox._validators import validate_create_input_parameters\nfrom azure.cli.core.commands import CliCommandType\n\n\ndef load_command_table(self, _):\n\n from azext_databox._client_factory import cf_jobs\n databox_jobs = CliCommandType(\n operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',\n client_factory=cf_jobs)\n with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g:\n g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)\n g.custom_command('update', 'update_databox_job')\n g.custom_command('delete', 'delete_databox_job', confirmation=True)\n g.custom_show_command('show', 'get_databox_job')\n g.custom_command('list', 'list_databox_job')\n g.custom_command('cancel', 'cancel_databox_job', confirmation=True)\n g.custom_command('list-credentials', 'list_credentials_databox_job')\n", "path": "src/databox/azext_databox/commands.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\ntry:\n from azure_bdist_wheel import cmdclass\nexcept ImportError:\n from distutils import log as logger\n logger.warn(\"Wheel is not available, disabling bdist_wheel hook\")\n\n# TODO: Confirm this is the right version number you want and it matches your\n# HISTORY.rst entry.\nVERSION = '0.1.0'\n\n# The full list of classifiers is available at\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'License :: OSI Approved :: MIT License',\n]\n\n# TODO: Add any additional SDK dependencies here\nDEPENDENCIES = []\n\nwith open('README.md', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='databox',\n version=VERSION,\n description='Microsoft Azure Command-Line Tools DataBox Extension',\n # TODO: Update author and email, if applicable\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/databox',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n classifiers=CLASSIFIERS,\n packages=find_packages(),\n install_requires=DEPENDENCIES,\n package_data={'azext_databox': ['azext_metadata.json']},\n)\n", "path": "src/databox/setup.py"}]} | 1,602 | 348 |
gh_patches_debug_37030 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-2713 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
yum.repos.d parser is too tolerant
The yum repos d parser will accept almost anything, including binary.
</issue>
<code>
[start of insights/parsers/yum_repos_d.py]
1 from .. import Parser, parser, get_active_lines, LegacyItemAccess
2 from insights.specs import Specs
3
4
5 @parser(Specs.yum_repos_d)
6 class YumReposD(LegacyItemAccess, Parser):
7 """Class to parse the files under ``yum.repos.d`` """
8
9 def get(self, key):
10 return self.data.get(key)
11
12 def parse_content(self, content):
13 '''
14 Return an object contains a dict.
15 {
16 "rhel-source": {
17 "gpgcheck": "1",
18 "gpgkey": ["file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release",
19 "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak"]
20 "enabled": "0",
21 "name": "Red Hat Enterprise Linux $releasever - $basearch - Source",
22 "baseurl": "ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/"
23 }
24 }
25 ----------------------------------------------------
26 There are several files in 'yum.repos.d' directory, which have the same
27 format. For example:
28 --------one of the files : rhel-source.repo---------
29 [rhel-source]
30 name=Red Hat Enterprise Linux $releasever - $basearch - Source
31 baseurl=ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/
32 enabled=0
33 gpgcheck=1
34 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
35 file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak
36 '''
37 repos_dict = {}
38 section_dict = {}
39 key = None
40 for line in get_active_lines(content):
41 if line.startswith('['):
42 section_dict = {}
43 repos_dict[line[1:-1]] = section_dict
44 elif '=' in line:
45 key, value = [s.strip() for s in line.split("=", 1)]
46 if key in ('baseurl', 'gpgkey'):
47 section_dict[key] = [v.strip() for v in value.split(",")]
48 else:
49 section_dict[key] = value
50 else:
51 if key and isinstance(section_dict[key], list):
52 section_dict[key].extend(v.strip() for v in line.split(","))
53 # Otherwise ignore line if no key or we don't store multiple values
54 self.data = repos_dict
55
56 def __iter__(self):
57 for repo in self.data:
58 yield repo
59
[end of insights/parsers/yum_repos_d.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/insights/parsers/yum_repos_d.py b/insights/parsers/yum_repos_d.py
--- a/insights/parsers/yum_repos_d.py
+++ b/insights/parsers/yum_repos_d.py
@@ -1,5 +1,39 @@
-from .. import Parser, parser, get_active_lines, LegacyItemAccess
+import re
+import string
+
+from .. import Parser, parser, LegacyItemAccess
from insights.specs import Specs
+from insights.parsr import (Char, EOF, HangingString, InSet, Many, OneLineComment, Opt,
+ skip_none, String, WithIndent, WS)
+
+
+header_chars = (set(string.printable) - set(string.whitespace) - set("[]")) | set(" ")
+sep_chars = set(":=")
+key_chars = header_chars - sep_chars - set(" ")
+value_chars = set(string.printable) - set("\n\r")
+
+LeftEnd = WS >> Char("[") << WS
+RightEnd = WS >> Char("]") << WS
+Header = LeftEnd >> String(header_chars) << RightEnd
+Key = WS >> String(key_chars) << WS
+Sep = InSet(sep_chars)
+Value = WS >> HangingString(value_chars)
+KVPair = WithIndent(Key + Opt(Sep >> Value))
+Comment = WS >> (OneLineComment("#") | OneLineComment(";")).map(lambda x: None)
+
+Line = Comment | KVPair.map(tuple)
+Sect = (Header + Many(Line).map(skip_none).map(dict)).map(tuple)
+Doc = Many(Comment | Sect).map(skip_none).map(dict)
+Top = Doc << WS << EOF
+
+
+def parse_yum_repos(content):
+ doc = Top(content)
+ for k, v in doc.items():
+ for special in ("baseurl", "gpgkey"):
+ if special in v:
+ v[special] = [i.strip() for i in re.split(",| ", v[special])]
+ return doc
@parser(Specs.yum_repos_d)
@@ -10,7 +44,7 @@
return self.data.get(key)
def parse_content(self, content):
- '''
+ """
Return an object contains a dict.
{
"rhel-source": {
@@ -33,25 +67,8 @@
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak
- '''
- repos_dict = {}
- section_dict = {}
- key = None
- for line in get_active_lines(content):
- if line.startswith('['):
- section_dict = {}
- repos_dict[line[1:-1]] = section_dict
- elif '=' in line:
- key, value = [s.strip() for s in line.split("=", 1)]
- if key in ('baseurl', 'gpgkey'):
- section_dict[key] = [v.strip() for v in value.split(",")]
- else:
- section_dict[key] = value
- else:
- if key and isinstance(section_dict[key], list):
- section_dict[key].extend(v.strip() for v in line.split(","))
- # Otherwise ignore line if no key or we don't store multiple values
- self.data = repos_dict
+ """
+ self.data = parse_yum_repos("\n".join(content))
def __iter__(self):
for repo in self.data:
| {"golden_diff": "diff --git a/insights/parsers/yum_repos_d.py b/insights/parsers/yum_repos_d.py\n--- a/insights/parsers/yum_repos_d.py\n+++ b/insights/parsers/yum_repos_d.py\n@@ -1,5 +1,39 @@\n-from .. import Parser, parser, get_active_lines, LegacyItemAccess\n+import re\n+import string\n+\n+from .. import Parser, parser, LegacyItemAccess\n from insights.specs import Specs\n+from insights.parsr import (Char, EOF, HangingString, InSet, Many, OneLineComment, Opt,\n+ skip_none, String, WithIndent, WS)\n+\n+\n+header_chars = (set(string.printable) - set(string.whitespace) - set(\"[]\")) | set(\" \")\n+sep_chars = set(\":=\")\n+key_chars = header_chars - sep_chars - set(\" \")\n+value_chars = set(string.printable) - set(\"\\n\\r\")\n+\n+LeftEnd = WS >> Char(\"[\") << WS\n+RightEnd = WS >> Char(\"]\") << WS\n+Header = LeftEnd >> String(header_chars) << RightEnd\n+Key = WS >> String(key_chars) << WS\n+Sep = InSet(sep_chars)\n+Value = WS >> HangingString(value_chars)\n+KVPair = WithIndent(Key + Opt(Sep >> Value))\n+Comment = WS >> (OneLineComment(\"#\") | OneLineComment(\";\")).map(lambda x: None)\n+\n+Line = Comment | KVPair.map(tuple)\n+Sect = (Header + Many(Line).map(skip_none).map(dict)).map(tuple)\n+Doc = Many(Comment | Sect).map(skip_none).map(dict)\n+Top = Doc << WS << EOF\n+\n+\n+def parse_yum_repos(content):\n+ doc = Top(content)\n+ for k, v in doc.items():\n+ for special in (\"baseurl\", \"gpgkey\"):\n+ if special in v:\n+ v[special] = [i.strip() for i in re.split(\",| \", v[special])]\n+ return doc\n \n \n @parser(Specs.yum_repos_d)\n@@ -10,7 +44,7 @@\n return self.data.get(key)\n \n def parse_content(self, content):\n- '''\n+ \"\"\"\n Return an object contains a dict.\n {\n \"rhel-source\": {\n@@ -33,25 +67,8 @@\n gpgcheck=1\n gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\n file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\n- '''\n- repos_dict = {}\n- section_dict = {}\n- key = None\n- for line in get_active_lines(content):\n- if line.startswith('['):\n- section_dict = {}\n- repos_dict[line[1:-1]] = section_dict\n- elif '=' in line:\n- key, value = [s.strip() for s in line.split(\"=\", 1)]\n- if key in ('baseurl', 'gpgkey'):\n- section_dict[key] = [v.strip() for v in value.split(\",\")]\n- else:\n- section_dict[key] = value\n- else:\n- if key and isinstance(section_dict[key], list):\n- section_dict[key].extend(v.strip() for v in line.split(\",\"))\n- # Otherwise ignore line if no key or we don't store multiple values\n- self.data = repos_dict\n+ \"\"\"\n+ self.data = parse_yum_repos(\"\\n\".join(content))\n \n def __iter__(self):\n for repo in self.data:\n", "issue": "yum.repos.d parser is too tolerant\nThe yum repos d parser will accept almost anything, including binary.\n", "before_files": [{"content": "from .. import Parser, parser, get_active_lines, LegacyItemAccess\nfrom insights.specs import Specs\n\n\n@parser(Specs.yum_repos_d)\nclass YumReposD(LegacyItemAccess, Parser):\n \"\"\"Class to parse the files under ``yum.repos.d`` \"\"\"\n\n def get(self, key):\n return self.data.get(key)\n\n def parse_content(self, content):\n '''\n Return an object contains a dict.\n {\n \"rhel-source\": {\n \"gpgcheck\": \"1\",\n \"gpgkey\": [\"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\",\n \"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\"]\n \"enabled\": \"0\",\n \"name\": \"Red Hat Enterprise Linux $releasever - $basearch - Source\",\n \"baseurl\": \"ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/\"\n }\n }\n ----------------------------------------------------\n There are several files in 'yum.repos.d' directory, which have the same\n format. For example:\n --------one of the files : rhel-source.repo---------\n [rhel-source]\n name=Red Hat Enterprise Linux $releasever - $basearch - Source\n baseurl=ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/\n enabled=0\n gpgcheck=1\n gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\n file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\n '''\n repos_dict = {}\n section_dict = {}\n key = None\n for line in get_active_lines(content):\n if line.startswith('['):\n section_dict = {}\n repos_dict[line[1:-1]] = section_dict\n elif '=' in line:\n key, value = [s.strip() for s in line.split(\"=\", 1)]\n if key in ('baseurl', 'gpgkey'):\n section_dict[key] = [v.strip() for v in value.split(\",\")]\n else:\n section_dict[key] = value\n else:\n if key and isinstance(section_dict[key], list):\n section_dict[key].extend(v.strip() for v in line.split(\",\"))\n # Otherwise ignore line if no key or we don't store multiple values\n self.data = repos_dict\n\n def __iter__(self):\n for repo in self.data:\n yield repo\n", "path": "insights/parsers/yum_repos_d.py"}]} | 1,232 | 802 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.